text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# -*- coding:utf-8 -*-
import sys
from datetime import datetime
from django.template.loader import get_template
from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import Post
reload(sys)
sys.setdefaultencoding('utf-8')
# Create your views here.
def homepage(request):
template = get_template('index.html')
posts = Post.objects.all()
posts_lists = list()
now = datetime.now()
html = template.render(locals())
print sys.getdefaultencoding()
#for count, post in enumerate(posts):
# print post
# print post.pub_date
# print post.slug
# #posts_lists.append("NO.{}:".format(str(count)) + str(post) + "<br />")
# posts_lists.append("NO.{}:".format(str(count)) + str(post) + "<hr />")
# posts_lists.append("<small>" + str(post.body) + "</small><br /><br />")
return HttpResponse(html)
def showpost(request,slug):
template = get_template('post.html')
try:
post = Post.objects.get(slug = slug)
print post
if post != None:
html = template.render(locals())
return HttpResponse(html)
except:
return redirect('/homepage/')
|
LouisLinY/mblog
|
mainsite/views.py
|
Python
|
apache-2.0
| 1,203
| 0.009975
|
from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
try:
from urllib.parse import quote
except ImportError: # Python 2
from urllib import quote
import warnings
from django.utils.functional import Promise
from django.utils import six
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", PendingDeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path.replace("\\", "/")), safe=b"/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
|
blaze33/django
|
django/utils/encoding.py
|
Python
|
bsd-3-clause
| 9,166
| 0.001855
|
def foo(a_new, b_new):
print(a_new + b_new * 123)
def f():
a = 1
b = 1
foo(a, b)
|
IllusionRom-deprecated/android_platform_tools_idea
|
python/testData/refactoring/extractmethod/Statement.after.py
|
Python
|
apache-2.0
| 98
| 0.010204
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from django.conf.urls.defaults import *
from django.conf import settings
INSTANCES = r'^(?P<tenant_id>[^/]+)/instances/(?P<instance_id>[^/]+)/%s$'
IMAGES = r'^(?P<tenant_id>[^/]+)/images/(?P<image_id>[^/]+)/%s$'
KEYPAIRS = r'^(?P<tenant_id>[^/]+)/keypairs/%s$'
urlpatterns = patterns('django_openstack.dash.views.instances',
url(r'^(?P<tenant_id>[^/]+)/$', 'usage', name='dash_usage'),
url(r'^(?P<tenant_id>[^/]+)/instances/$', 'index', name='dash_instances'),
url(INSTANCES % 'console', 'console', name='dash_instances_console'),
url(INSTANCES % 'vnc', 'vnc', name='dash_instances_vnc'),
)
urlpatterns += patterns('django_openstack.dash.views.images',
url(r'^(?P<tenant_id>[^/]+)/images/$', 'index', name='dash_images'),
url(IMAGES % 'launch', 'launch', name='dash_images_launch'),
)
urlpatterns += patterns('django_openstack.dash.views.keypairs',
url(r'^(?P<tenant_id>[^/]+)/keypairs/$', 'index', name='dash_keypairs'),
url(KEYPAIRS % 'create', 'create', name='dash_keypairs_create'),
)
|
termie/openstack-dashboard
|
django-openstack/src/django_openstack/dash/urls.py
|
Python
|
apache-2.0
| 1,070
| 0.002804
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import time
from django.db import connection
from django_logutils.conf import settings
logger = logging.getLogger(__name__)
def create_log_dict(request, response):
"""
Create a dictionary with logging data.
"""
remote_addr = request.META.get('REMOTE_ADDR')
if remote_addr in getattr(settings, 'INTERNAL_IPS', []):
remote_addr = request.META.get(
'HTTP_X_FORWARDED_FOR') or remote_addr
user_email = "-"
if hasattr(request, 'user'):
user_email = getattr(request.user, 'email', '-')
if response.streaming:
content_length = 'streaming'
else:
content_length = len(response.content)
return {
# 'event' makes event-based filtering possible in logging backends
# like logstash
'event': settings.LOGUTILS_LOGGING_MIDDLEWARE_EVENT,
'remote_address': remote_addr,
'user_email': user_email,
'method': request.method,
'url': request.get_full_path(),
'status': response.status_code,
'content_length': content_length,
'request_time': -1, # NA value: real value added by LoggingMiddleware
}
def create_log_message(log_dict, use_sql_info=False, fmt=True):
"""
Create the logging message string.
"""
log_msg = (
"%(remote_address)s %(user_email)s %(method)s %(url)s %(status)d "
"%(content_length)d (%(request_time).2f seconds)"
)
if use_sql_info:
sql_time = sum(
float(q['time']) for q in connection.queries) * 1000
extra_log = {
'nr_queries': len(connection.queries),
'sql_time': sql_time}
log_msg += " (%(nr_queries)d SQL queries, %(sql_time)f ms)"
log_dict.update(extra_log)
return log_msg % log_dict if fmt else log_msg
class LoggingMiddleware(object):
"""
Capture request info and logs it.
Logs all requests with log level info. If request take longer than
REQUEST_TIME_THRESHOLD, log level warningis used.
Logging middleware that captures the following:
* logging event.
* remote address (whether proxied or direct).
* if authenticated, then user email address.
* request method (GET/POST etc).
* request full path.
* response status code (200, 404 etc).
* content length.
* request process time.
* if DEBUG=True or REQUEST_TIME_THRESHOLD is exceeded, also logs SQL
query information - number of queries and how long they too.
Based on: https://djangosnippets.org/snippets/2624/
"""
def __init__(self, *args, **kwargs):
"""
Add initial empty start_time.
"""
self.start_time = None
def process_request(self, request):
"""
Add start time to request.
"""
self.start_time = time.time()
def process_response(self, request, response):
"""
Create the logging message..
"""
try:
log_dict = create_log_dict(request, response)
# add the request time to the log_dict; if no start time is
# available, use -1 as NA value
request_time = (
time.time() - self.start_time if hasattr(self, 'start_time')
and self.start_time else -1)
log_dict.update({'request_time': request_time})
is_request_time_too_high = (
request_time > float(settings.LOGUTILS_REQUEST_TIME_THRESHOLD))
use_sql_info = settings.DEBUG or is_request_time_too_high
log_msg = create_log_message(log_dict, use_sql_info, fmt=False)
if is_request_time_too_high:
logger.warning(log_msg, log_dict, extra=log_dict)
else:
logger.info(log_msg, log_dict, extra=log_dict)
except Exception as e:
logger.exception(e)
return response
|
jsmits/django-logutils
|
django_logutils/middleware.py
|
Python
|
bsd-3-clause
| 4,098
| 0
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import numpy.random
import scipy.stats
import functools
from collections import defaultdict
from nose import SkipTest
from nose.tools import assert_greater
from nose.tools import assert_in
from nose.tools import assert_is_instance
from nose.tools import assert_not_equal
from nose.tools import assert_true
from goftests import density_goodness_of_fit
from goftests import discrete_goodness_of_fit
from goftests import vector_density_goodness_of_fit
from distributions.dbg.random import sample_discrete
from distributions.util import scores_to_probs
from distributions.tests.util import assert_all_close
from distributions.tests.util import assert_close
from distributions.tests.util import assert_hasattr
from distributions.tests.util import import_model
from distributions.tests.util import list_models
from distributions.tests.util import seed_all
try:
import distributions.io.schema_pb2
has_protobuf = True
except ImportError:
has_protobuf = False
DATA_COUNT = 20
SAMPLE_COUNT = 1000
MIN_GOODNESS_OF_FIT = 1e-3
MODULES = {
'{flavor}.models.{name}'.format(**spec): import_model(spec)
for spec in list_models()
}
IS_FAST = {'dbg': False, 'hp': True, 'lp': True}
def model_is_fast(model):
flavor = model.__name__.split('.')[1]
return IS_FAST[flavor]
def iter_examples(module):
assert_hasattr(module, 'EXAMPLES')
EXAMPLES = module.EXAMPLES
assert_is_instance(EXAMPLES, list)
assert_true(EXAMPLES, 'no examples provided')
for i, EXAMPLE in enumerate(EXAMPLES):
print 'example {}/{}'.format(1 + i, len(EXAMPLES))
assert_in('shared', EXAMPLE)
assert_in('values', EXAMPLE)
values = EXAMPLE['values']
assert_is_instance(values, list)
count = len(values)
assert_true(
count >= 7,
'Add more example values (expected >= 7, found {})'.format(count))
yield EXAMPLE
def for_each_model(*filters):
'''
Run one test per Model, filtering out inappropriate Models for test.
'''
def filtered(test_fun):
@functools.wraps(test_fun)
def test_one_model(name):
module = MODULES[name]
assert_hasattr(module, 'Shared')
for EXAMPLE in iter_examples(module):
test_fun(module, EXAMPLE)
@functools.wraps(test_fun)
def test_all_models():
for name in MODULES:
module = MODULES[name]
if all(f(module) for f in filters):
yield test_one_model, name
return test_all_models
return filtered
@for_each_model()
def test_value(module, EXAMPLE):
assert_hasattr(module, 'Value')
assert_is_instance(module.Value, type)
values = EXAMPLE['values']
for value in values:
assert_is_instance(value, module.Value)
@for_each_model()
def test_shared(module, EXAMPLE):
assert_hasattr(module, 'Shared')
assert_is_instance(module.Shared, type)
shared1 = module.Shared.from_dict(EXAMPLE['shared'])
shared2 = module.Shared.from_dict(EXAMPLE['shared'])
assert_close(shared1.dump(), EXAMPLE['shared'])
values = EXAMPLE['values']
seed_all(0)
for value in values:
shared1.add_value(value)
seed_all(0)
for value in values:
shared2.add_value(value)
assert_close(shared1.dump(), shared2.dump())
for value in values:
shared1.remove_value(value)
assert_close(shared1.dump(), EXAMPLE['shared'])
@for_each_model()
def test_group(module, EXAMPLE):
assert_hasattr(module, 'Group')
assert_is_instance(module.Group, type)
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
group1 = module.Group()
group1.init(shared)
for value in values:
group1.add_value(shared, value)
group2 = module.Group.from_values(shared, values)
assert_close(group1.dump(), group2.dump())
group = module.Group.from_values(shared, values)
dumped = group.dump()
group.init(shared)
group.load(dumped)
assert_close(group.dump(), dumped)
for value in values:
group2.remove_value(shared, value)
assert_not_equal(group1, group2)
group2.merge(shared, group1)
for value in values:
group1.score_value(shared, value)
for _ in xrange(10):
value = group1.sample_value(shared)
group1.score_value(shared, value)
module.sample_group(shared, 10)
group1.score_data(shared)
group2.score_data(shared)
@for_each_model(lambda module: hasattr(module.Shared, 'protobuf_load'))
def test_protobuf(module, EXAMPLE):
if not has_protobuf:
raise SkipTest('protobuf not available')
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
Message = getattr(distributions.io.schema_pb2, module.NAME)
message = Message.Shared()
shared.protobuf_dump(message)
shared2 = module.Shared()
shared2.protobuf_load(message)
assert_close(shared2.dump(), shared.dump())
message.Clear()
dumped = shared.dump()
module.Shared.to_protobuf(dumped, message)
assert_close(module.Shared.from_protobuf(message), dumped)
if hasattr(module.Group, 'protobuf_load'):
for value in values:
shared.add_value(value)
group = module.Group.from_values(shared, values)
message = Message.Group()
group.protobuf_dump(message)
group2 = module.Group()
group2.protobuf_load(message)
assert_close(group2.dump(), group.dump())
message.Clear()
dumped = group.dump()
module.Group.to_protobuf(dumped, message)
assert_close(module.Group.from_protobuf(message), dumped)
@for_each_model()
def test_add_remove(module, EXAMPLE):
# Test group_add_value, group_remove_value, score_data, score_value
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group = module.Group.from_values(shared)
score = 0.0
assert_close(group.score_data(shared), score, err_msg='p(empty) != 1')
for _ in range(DATA_COUNT):
value = group.sample_value(shared)
values.append(value)
score += group.score_value(shared, value)
group.add_value(shared, value)
group_all = module.Group.from_dict(group.dump())
assert_close(
score,
group.score_data(shared),
err_msg='p(x1,...,xn) != p(x1) p(x2|x1) p(xn|...)')
numpy.random.shuffle(values)
for value in values:
group.remove_value(shared, value)
group_empty = module.Group.from_values(shared)
assert_close(
group.dump(),
group_empty.dump(),
err_msg='group + values - values != group')
numpy.random.shuffle(values)
for value in values:
group.add_value(shared, value)
assert_close(
group.dump(),
group_all.dump(),
err_msg='group - values + values != group')
@for_each_model()
def test_add_repeated(module, EXAMPLE):
# Test add_repeated value vs n * add
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for value in EXAMPLE['values']:
group = module.Group.from_values(shared)
for _ in range(DATA_COUNT):
group.add_value(shared, value)
group_repeated = module.Group.from_values(shared)
group_repeated.add_repeated_value(shared, value, count=DATA_COUNT)
assert_close(
group.dump(),
group_repeated.dump(),
err_msg='n * add_value != add_repeated_value n')
@for_each_model()
def test_add_merge(module, EXAMPLE):
# Test group_add_value, group_merge
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values'][:]
for value in values:
shared.add_value(value)
numpy.random.shuffle(values)
group = module.Group.from_values(shared, values)
for i in xrange(len(values) + 1):
numpy.random.shuffle(values)
group1 = module.Group.from_values(shared, values[:i])
group2 = module.Group.from_values(shared, values[i:])
group1.merge(shared, group2)
assert_close(group.dump(), group1.dump())
@for_each_model()
def test_group_merge(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
group1 = module.Group.from_values(shared)
group2 = module.Group.from_values(shared)
expected = module.Group.from_values(shared)
actual = module.Group.from_values(shared)
for _ in xrange(100):
value = expected.sample_value(shared)
expected.add_value(shared, value)
group1.add_value(shared, value)
value = expected.sample_value(shared)
expected.add_value(shared, value)
group2.add_value(shared, value)
actual.load(group1.dump())
actual.merge(shared, group2)
assert_close(actual.dump(), expected.dump())
@for_each_model(lambda module: module.Value in [bool, int])
def test_group_allows_debt(module, EXAMPLE):
# Test that group.add_value can safely go into data debt
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group1 = module.Group.from_values(shared, values)
for _ in range(DATA_COUNT):
value = group1.sample_value(shared)
values.append(value)
group1.add_value(shared, value)
group2 = module.Group.from_values(shared)
pos_values = [(v, +1) for v in values]
neg_values = [(v, -1) for v in values]
signed_values = pos_values * 3 + neg_values * 2
numpy.random.shuffle(signed_values)
for value, sign in signed_values:
if sign > 0:
group2.add_value(shared, value)
else:
group2.remove_value(shared, value)
assert_close(group1.dump(), group2.dump())
@for_each_model()
def test_sample_seed(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
seed_all(0)
group1 = module.Group.from_values(shared)
values1 = [group1.sample_value(shared) for _ in xrange(DATA_COUNT)]
seed_all(0)
group2 = module.Group.from_values(shared)
values2 = [group2.sample_value(shared) for _ in xrange(DATA_COUNT)]
assert_close(values1, values2, err_msg='values')
@for_each_model()
def test_sample_value(module, EXAMPLE):
seed_all(0)
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for values in [[], EXAMPLE['values']]:
group = module.Group.from_values(shared, values)
sample_count = SAMPLE_COUNT
if module.Value == numpy.ndarray:
sample_count *= 10
samples = [group.sample_value(shared) for _ in xrange(sample_count)]
if module.Value in [bool, int]:
probs_dict = {
value: math.exp(group.score_value(shared, value))
for value in set(samples)
}
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
elif module.Value == float:
probs = numpy.exp([
group.score_value(shared, value)
for value in samples
])
gof = density_goodness_of_fit(samples, probs, plot=True)
elif module.Value == numpy.ndarray:
if module.__name__ == 'distributions.lp.models.niw':
raise SkipTest('FIXME known sampling bug')
probs = numpy.exp([
group.score_value(shared, value)
for value in samples
])
gof = vector_density_goodness_of_fit(samples, probs, plot=True)
else:
raise SkipTest('Not implemented for {}'.format(module.Value))
print '{} gof = {:0.3g}'.format(module.__name__, gof)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
@for_each_model()
def test_sample_group(module, EXAMPLE):
seed_all(0)
SIZE = 2
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for values in [[], EXAMPLE['values']]:
if module.Value in [bool, int]:
samples = []
probs_dict = {}
for _ in xrange(SAMPLE_COUNT):
values = module.sample_group(shared, SIZE)
sample = tuple(values)
samples.append(sample)
group = module.Group.from_values(shared, values)
probs_dict[sample] = math.exp(group.score_data(shared))
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
else:
raise SkipTest('Not implemented for {}'.format(module.Value))
print '{} gof = {:0.3g}'.format(module.__name__, gof)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
def _append_ss(group, aggregator):
ss = group.dump()
for key, val in ss.iteritems():
if isinstance(val, list):
for i, v in enumerate(val):
aggregator['{}_{}'.format(key, i)].append(v)
elif isinstance(val, dict):
for k, v in val.iteritems():
aggregator['{}_{}'.format(key, k)].append(v)
else:
aggregator[key].append(val)
def sample_marginal_conditional(module, shared, value_count):
values = module.sample_group(shared, value_count)
group = module.Group.from_values(shared, values)
return group
def sample_successive_conditional(module, shared, group, value_count):
sampler = module.Sampler()
sampler.init(shared, group)
values = [sampler.eval(shared) for _ in xrange(value_count)]
new_group = module.Group.from_values(shared, values)
return new_group
@for_each_model(model_is_fast)
def test_joint(module, EXAMPLE):
# \cite{geweke04getting}
seed_all(0)
SIZE = 10
SKIP = 100
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
marginal_conditional_samples = defaultdict(lambda: [])
successive_conditional_samples = defaultdict(lambda: [])
cond_group = sample_marginal_conditional(module, shared, SIZE)
for _ in xrange(SAMPLE_COUNT):
marg_group = sample_marginal_conditional(module, shared, SIZE)
_append_ss(marg_group, marginal_conditional_samples)
for __ in range(SKIP):
cond_group = sample_successive_conditional(
module,
shared,
cond_group,
SIZE)
_append_ss(cond_group, successive_conditional_samples)
for key in marginal_conditional_samples.keys():
gof = scipy.stats.ttest_ind(
marginal_conditional_samples[key],
successive_conditional_samples[key])[1]
if isinstance(gof, numpy.ndarray):
raise SkipTest('XXX: handle array case, gof = {}'.format(gof))
print '{}:{} gof = {:0.3g}'.format(module.__name__, key, gof)
if not numpy.isfinite(gof):
raise SkipTest('Test fails with gof = {}'.format(gof))
assert_greater(gof, MIN_GOODNESS_OF_FIT)
@for_each_model(lambda module: hasattr(module.Shared, 'scorer_create'))
def test_scorer(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
group = module.Group.from_values(shared)
scorer1 = shared.scorer_create()
scorer2 = shared.scorer_create(group)
for value in values:
score1 = shared.scorer_eval(scorer1, value)
score2 = shared.scorer_eval(scorer2, value)
score3 = group.score_value(shared, value)
assert_all_close([score1, score2, score3])
@for_each_model(lambda module: hasattr(module, 'Mixture'))
def test_mixture_runs(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
mixture = module.Mixture()
for value in values:
shared.add_value(value)
mixture.append(module.Group.from_values(shared, [value]))
mixture.init(shared)
groupids = []
for value in values:
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
mixture.score_value(shared, value, scores)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
mixture.add_value(shared, groupid, value)
groupids.append(groupid)
mixture.add_group(shared)
assert len(mixture) == len(values) + 1
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
for value, groupid in zip(values, groupids):
mixture.remove_value(shared, groupid, value)
mixture.remove_group(shared, 0)
if module.__name__ == 'distributions.lp.models.dpd':
raise SkipTest('FIXME known segfault here')
mixture.remove_group(shared, len(mixture) - 1)
assert len(mixture) == len(values) - 1
for value in values:
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
mixture.score_value(shared, value, scores)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
mixture.add_value(shared, groupid, value)
@for_each_model(lambda module: hasattr(module, 'Mixture'))
def test_mixture_score(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
groups = [module.Group.from_values(shared, [value]) for value in values]
mixture = module.Mixture()
for group in groups:
mixture.append(group)
mixture.init(shared)
def check_score_value(value):
expected = [group.score_value(shared, value) for group in groups]
actual = numpy.zeros(len(mixture), dtype=numpy.float32)
noise = numpy.random.randn(len(actual))
actual += noise
mixture.score_value(shared, value, actual)
actual -= noise
assert_close(actual, expected, err_msg='score_value {}'.format(value))
another = [
mixture.score_value_group(shared, i, value)
for i in xrange(len(groups))
]
assert_close(
another,
expected,
err_msg='score_value_group {}'.format(value))
return actual
def check_score_data():
expected = sum(group.score_data(shared) for group in groups)
actual = mixture.score_data(shared)
assert_close(actual, expected, err_msg='score_data')
print 'init'
for value in values:
check_score_value(value)
check_score_data()
print 'adding'
groupids = []
for value in values:
scores = check_score_value(value)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
groups[groupid].add_value(shared, value)
mixture.add_value(shared, groupid, value)
groupids.append(groupid)
check_score_data()
print 'removing'
for value, groupid in zip(values, groupids):
groups[groupid].remove_value(shared, value)
mixture.remove_value(shared, groupid, value)
scores = check_score_value(value)
check_score_data()
|
fritzo/distributions
|
distributions/tests/test_models.py
|
Python
|
bsd-3-clause
| 20,478
| 0
|
"""Tests for the Volumio integration."""
|
jawilson/home-assistant
|
tests/components/volumio/__init__.py
|
Python
|
apache-2.0
| 41
| 0
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Sergi Blanch-Torne"
__email__ = "sblanch@cells.es"
__copyright__ = "Copyright 2016 CELLS/Alba synchrotron"
__license__ = "GPLv3+"
__status__ = "development"
__all__ = ["Logger", "Dog", "WatchdogTester", "main"]
__docformat__ = 'restructuredtext'
try:
from fandango import Astor # soft dependency
except:
Astor = None
import email.mime.text
import smtplib
from socket import gethostname
from PyTango import DeviceProxy, DevState, EventType, DevFailed
from time import sleep, time
from threading import Thread, Event
import traceback
DEFAULT_RECHECK_TIME = 90.0 # seconds
DEFAULT_nOVERLAPS_ALERT = 10
DEFAULT_ASTOR_nSTOPS = 2
DEFAULT_ASTOR_STOPWAIT = 3 # seconds
SEPARATOR = "\\"
class Logger(object):
def __init__(self, parent, *args, **kwargs):
super(Logger, self).__init__(*args, **kwargs)
self._parent = parent
# --- tango streams
self.error_stream = parent.error_stream
self.warn_stream = parent.warn_stream
self.info_stream = parent.info_stream
self.debug_stream = parent.debug_stream
# --- tango event retransmission
self.fireEventsList = parent.fireEventsList
# --- running
self.isInRunningLst = parent.isInRunningLst
self.appendToRunning = parent.appendToRunning
self.removeFromRunning = parent.removeFromRunning
# --- fault
self.isInFaultLst = parent.isInFaultLst
self.appendToFault = parent.appendToFault
self.removeFromFault = parent.removeFromFault
# --- hang
self.isInHangLst = parent.isInHangLst
self.appendToHang = parent.appendToHang
self.removeFromHang = parent.removeFromHang
# --- mailto
self.mailto = parent.mailto
def fireEvent(self, attrName, value, timestamp=None, quality=None):
attrFullName = "%s%s%s"\
% (self.devName.replace("/", SEPARATOR), SEPARATOR, attrName)
try:
if timestamp and quality:
self.fireEventsList([[attrFullName, value, timestamp,
quality]])
else:
self.fireEventsList([[attrFullName, value]])
except Exception as e:
self.error_stream("Cannot fire event for %s/%s: %s"
% (self.devName, attrName, e))
traceback.print_exc()
class Dog(Logger):
def __init__(self, devName, joinerEvent=None, startDelay=None,
extraAttrs=None, *args, **kwargs):
super(Dog, self).__init__(*args, **kwargs)
self._devName = devName
self._devProxy = None
self._eventId = None
self._devState = None
# --- fault vbles
self._tryFaultRecovery = False
self._faultRecoveryCtr = 0
self._devStatus = None
# --- hangVbles
self._tryHangRecovery = False
self._hangRecoveryCtr = 0
# --- Thread for hang monitoring
self._joinerEvent = joinerEvent
self._thread = None
self._recheckPeriod = DEFAULT_RECHECK_TIME
self._overlaps = 0
self._overlapsAlert = DEFAULT_nOVERLAPS_ALERT
# --- extra attributes
self._extraAttributes = []
self._extraEventIds = {}
self._extraAttrValues = {}
for attrName in extraAttrs:
attrName = attrName.lower()
self._extraAttributes.append(attrName)
self._extraEventIds[attrName] = None
self._extraAttrValues[attrName] = None
# --- build proxy and event subscriptions
self.__buildProxy()
self.__createThread(startDelay)
def __str__(self):
return "Dog(%s, state=%s)" % (self.devName, self.devState)
def __repr__(self):
return "Dog(%s, state=%s, faultRecovery=%s, hangRecovery=%s)"\
% (self.devName, self.devState, self.tryFaultRecovery,
self.tryHangRecovery)
# --- object properties
@property
def devName(self):
return self._devName
@property
def devProxy(self):
return self._devProxy
@property
def devState(self):
return self._devState
def hasExtraAttr(self, attrName):
return self._extraAttributes.count(attrName.lower()) > 0
def getExtraAttr(self, attrName):
try:
value = self._devProxy[attrName].value
timestamp = self._devProxy[attrName].time.totime()
quality = self._devProxy[attrName].quality
if value != self._extraAttrValues[attrName]:
self.debug_stream("%s/%s has changed from %s to %s"
% (self.devName, attrName,
self._extraAttrValues[attrName], value))
self._extraAttrValues[attrName] = value
self.fireEvent(attrName, value, timestamp, quality)
if self.isInHangLst(self.devName):
self.removeFromHang(self.devName)
return value
except DevFailed as e:
if not self.isInHangLst(self.devName):
try:
self.devProxy.State()
except:
self.appendToHang(self.devName)
if e[0].reason in ['ATTRIBUTE_UNAVAILABLE',
'SOFTWARE_FAILURE']:
return
self.warn_stream("%s/%s read exception: %r %s"
% (self.devName, attrName,
e[0].reason, e[0].desc))
except Exception as e:
self.error_stream("%s/%s read exception: %s"
% (self.devName, attrName, e))
raise Exception("%s/%s cannot be read" % (self.devName, attrName))
def setExtraAttr(self, attrName, value):
try:
self.info_stream("Writing %s/%s with %s"
% (self.devName, attrName, str(value)))
self._devProxy[attrName] = value
if self.isInHangLst(self.devName):
self.removeFromHang(self.devName)
except DevFailed as e:
if not self.isInHangLst(self.devName):
try:
self.devProxy.State()
except:
self.appendToHang(self.devName)
self.warn_stream("%s/%s write exception: %r %s"
% (self.devName, attrName,
e[0].reason, e[0].desc))
except Exception as e:
self.error_stream("%s/%s write exception: %s"
% (self.devName, attrName, e))
raise Exception("%s/%s cannot be write" % (self.devName, attrName))
@property
def tryFaultRecovery(self):
return self._tryFaultRecovery
@tryFaultRecovery.setter
def tryFaultRecovery(self, value):
if type(value) == bool:
self._tryFaultRecovery = value
else:
self.error_stream("Only boolean assignment")
@property
def tryHangRecovery(self):
return self._tryHangRecovery
@tryFaultRecovery.setter
def tryHangRecovery(self, value):
if type(value) == bool:
if value and not Astor:
self.error_stream("This feature is only available with "
"fandango's Astor present")
return
self._tryHangRecovery = value
else:
self.error_stream("Only boolean assignment")
@property
def recheckPeriod(self):
return self._recheckPeriod
@property
def overlapsAlert(self):
return self._overlapsAlert
# --- Constructor methods
def __buildProxy(self):
try:
self._devProxy = DeviceProxy(self._devName)
self.__subscribe_event()
except Exception as e:
self.error_stream("%s proxy not available: %s"
% (self._devName, e))
self.appendToHang(self.devName)
def __subscribe_event(self):
self._eventId = \
self._devProxy.subscribe_event('State',
EventType.CHANGE_EVENT,
self)
self.debug_stream("Subscribed to %s/State (id=%d)"
% (self.devName, self._eventId))
self.__subscribe_extraAttrs()
def __unsubscribe_event(self):
if self._eventId:
try:
self._devProxy.unsubscribe_event(self._eventId)
except Exception as e:
self.error_stream("%s failed to unsubscribe event: %s"
% (self.devName, e))
self._eventId = None
else:
self.warn_stream("%s no event id to unsubscribe." % (self.devName))
self.__unsubscribe_extraAttrs()
def __subscribe_extraAttrs(self):
for attrName in self._extraAttributes:
try:
self._extraEventIds[attrName] = \
self._devProxy.subscribe_event(attrName,
EventType.CHANGE_EVENT,
self)
self.debug_stream("Subscribed to %s/%s (id=%d)"
% (self.devName, attrName,
self._extraEventIds[attrName]))
except DevFailed as e:
self.warn_stream("%s/%s failed to subscribe event: %s"
% (self.devName, attrName, e[0].desc))
except Exception as e:
self.error_stream("%s/%s failed to subscribe event: %s"
% (self.devName, attrName, e))
def __unsubscribe_extraAttrs(self):
for attrName in self._extraEventIds.keys():
if self._extraEventIds[attrName]:
try:
self._devProxy.\
unsubscribe_event(self._extraEventIds[attrName])
except Exception as e:
self.error_stream("%s/%s failed to unsubscribe event: %s"
% (self.devName, attrName, e))
self._extraEventIds[attrName] = None
else:
self.warn_stream("%s/%s no event id to unsubscribe."
% (self.devName, attrName))
def __createThread(self, startDelay):
try:
self._thread = Thread(target=self.__hangMonitorThread,
args=(startDelay,))
self._thread.setDaemon(True)
if startDelay > 0:
self.info_stream("Monitor %s will wait %g seconds until "
"thread start" % (self.devName, startDelay))
else:
self.info_stream("Monitor %s will start the thread "
"immediately" % (self.devName))
self._thread.start()
except Exception as e:
self.error_stream("%s hang monitor thread creation fail: %s"
% (self.devName, e))
traceback.print_exc()
# --- Events
def push_event(self, event):
try:
if event is None:
return
if not hasattr(event, 'attr_value') or event.attr_value is None \
or event.attr_value.value is None:
# self.debug_stream("%s push_event() %s: value has None type"
# %(self.devName, event.attr_name))
return
try: # ---FIXME: Ugly!! but it comes with a fullname
# nameSplit = event.attr_name.split('/', 3)
# it may start as tango://...
nameSplit = event.attr_name.rsplit('/', 4)[-4:]
domain, family, member, attrName = nameSplit
except Exception as e:
self.error_stream("%s push_event() error splitting the "
"attr_name %s from the event."
% (self.devName, event.attr_name))
return
devName = "%s/%s/%s" % (domain, family, member)
attrName = attrName.lower()
if devName != self.devName:
self.error_stream("Event received doesn't correspond with "
"who the listener expects (%s != %s)"
% (devName, self.devName))
return
# ---
if attrName == 'state':
self.info_stream("%s push_event() value = %s"
% (self.devName, event.attr_value.value))
self.__checkDeviceState(event.attr_value.value)
self.fireEvent('State', event.attr_value.value)
elif attrName in self._extraAttributes:
self.debug_stream("%s/%s push_event() value = %s"
% (self.devName, attrName,
event.attr_value.value))
self.fireEvent(attrName, event.attr_value.value)
self._extraAttrValues[attrName] = event.attr_value.value
else:
self.warn_stream("%s/%s push_event() unmanaged attribute "
"(value = %s)" % (self.devName, attrName,
event.attr_value.value))
except Exception as e:
self.debug_stream("%s push_event() Exception %s"
% (self.devName, e))
traceback.print_exc()
# --- checks
def __checkDeviceState(self, newState=None):
if self.__stateHasChange(newState):
self.info_stream("%s state change from %s to %s"
% (self.devName, self._devState, newState))
oldState = self._devState
self._devState = newState
try:
# state change to one of the lists
if newState is DevState.RUNNING:
self.appendToRunning(self.devName)
elif newState is DevState.FAULT:
self.appendToFault(self.devName)
# state change from one of the lists
elif self.__wasRunning() or self.isInRunningLst(self.devName):
self.removeFromRunning(self.devName)
elif self.__wasInFault() or self.isInFaultLst(self.devName):
self.removeFromFault(self.devName)
self._faultRecoveryCtr = 0
# recover from Hang
if self.devState is None or self.isInHangLst(self.devName):
self.debug_stream("%s received state information after "
"hang, remove from the list."
% (self.devName))
self.removeFromHang(self.devName)
self._hangRecoveryCtr = 0
except Exception as e:
self.error_stream("%s: Exception processing a newer state "
"(restoring the previous %s): %s"
% (self.devName, oldState, e))
self._devState = oldState
self.info_stream("%s store newer state %s"
% (self.devName, self.devState))
# else: nothing change, nothing to do.
def __stateHasChange(self, newState):
return newState != self.devState
def __wasRunning(self):
return self.devState == DevState.RUNNING
def __wasInFault(self):
return self.devState == DevState.FAULT
# --- threading
def __hangMonitorThread(self, startDelay):
if startDelay > 0:
self.info_stream("%s watchdog build, wait %g until start"
% (self.devName, startDelay))
sleep(startDelay)
self.info_stream("%s launch background monitor" % (self.devName))
while not self._joinerEvent.isSet():
t_0 = time()
self.__manualCheck()
self.__waitNextCheck(time()-t_0)
def __manualCheck(self):
for i in range(2):
state = self.__stateRequest()
if state:
self.debug_stream("%s respond state %s"
% (self.devName, state))
break
if not state: # no answer from the device
if not self.isInHangLst(self.devName):
self.debug_stream("%s no state information." % (self.devName))
self.__unsubscribe_event()
self.appendToHang(self.devName)
# review any other list where it can be
if self.isInRunningLst(self.devName):
self.removeFromRunning(self.devName)
if self.isInFaultLst(self.devName):
self.removeFromFault(self.devName)
self._faultRecoveryCtr = 0
if self.devState is None and self.tryHangRecovery:
self.debug_stream("%s not state information by a second try."
% (self.devName))
# force to launch the recover after a second loop
self.__hangRecoveryProcedure()
self._devState = None
else:
if self.devState is None:
self.debug_stream("%s gives state information, back from hang."
% (self.devName))
if self.isInHangLst(self.devName):
self.removeFromHang(self.devName)
self._hangRecoveryCtr = 0
self._devState = state
self.__buildProxy()
if state == DevState.FAULT and self.tryFaultRecovery:
self.__faultRecoveryProcedure()
if self.devState != state:
# state has change but hasn't been cached by events
self._devState = state
def __stateRequest(self):
try:
return self.devProxy.State()
except:
self.warn_stream("%s don't respond state request" % (self.devName))
return None
def __waitNextCheck(self, deltaT):
if deltaT < self._recheckPeriod:
toSleep = self._recheckPeriod-deltaT
self.debug_stream("%s monitor's thread required %g seconds"
"(go sleep for %g seconds)"
% (self.devName, deltaT, toSleep))
self._overlaps = 0
sleep(toSleep)
else:
self._overlaps += 1
if self._overlaps % self._overlapsAlert:
self.warn_stream("%s hang check has take more than loop time "
"(%g seconds). No sleep for another check."
% (self.devName, deltaT))
else: # when modulo self._overlapsAlert == 0
self.warn_stream("%s hang check has take more than loop time "
"(%g seconds). But %d consecutive, forcing "
"to sleep some time."
% (self.devName, deltaT, self._overlaps))
self.mailto("Recheck overlaps", "There has been %d "
"consecutive overlaps in the recheck thread"
% (self._recheckLoopOverlaps))
sleep(self._recheckPeriod)
def __faultRecoveryProcedure(self):
statusMsg = None
try:
if self._devProxy:
statusMsg = self._devProxy.status()
self._devProxy.Init()
else:
self.warn_stream("%s no proxy to command Init()"
% (self.devName))
except Exception as exceptionObj:
self.error_stream("%s in Fault recovery procedure Exception: %s"
% (self.devName, exceptionObj))
else:
self.debug_stream("%s Init() completed" % (self.devName))
exceptionObj = None
self._reportFaultProcedure(exceptionObj, statusMsg)
self._faultRecoveryCtr += 1
def __hangRecoveryProcedure(self):
try:
astor = Astor()
instance = astor.get_device_server(self.devName)
if not instance:
raise Exception("Astor didn't solve the "
"device server instance (%s)" % instance)
if not self.__forceRestartInstance(astor, instance):
self.error_stream("%s Astor cannot recover" % (self.devName))
except Exception as exceptionObj:
self.error_stream("%s __hangRecoveryProcedure() Exception %s"
% (self.devName, exceptionObj))
else:
exceptionObj = None
self._reportHangProcedure(instance, exceptionObj)
self._hangRecoveryCtr += 1
def __forceRestartInstance(self, astor, instance):
for i in range(DEFAULT_ASTOR_nSTOPS):
res = astor.stop_servers([instance])
if res:
break
sleep(DEFAULT_ASTOR_STOPWAIT)
self.debug_stream("%s Astor start %s" % (self.devName, instance))
return astor.start_servers([instance])
def _reportFaultProcedure(self, exceptionObj, statusMsg):
if self._faultRecoveryCtr == 0:
# only report when it has happen, no remainders
mailBody = "Applied the recovery from Fault procedure.\n"
mailBody = "%s\nAffected camera was: %s" % (mailBody, self.devName)
if exceptionObj:
mailBody = "%s\nEncoutered exceptions during the process:\n%s"\
% (mailBody, exceptionObj)
if statusMsg:
mailBody = "%s\n\nStatus before the Init(): %s"\
% (mailBody, statusMsg)
self._devStatus = statusMsg
mailBody = "%s\n--\nEnd transmission." % (mailBody)
# self.mailto("Device in FAULT state", mailBody)
def _reportHangProcedure(self, instance, exceptionObj):
if self._hangRecoveryCtr == 0:
# only report when it has happen, no remainders
mailBody = "Applied the recovery from Hang procedure.\n"
mailBody = "%s\nAffected camera was: %s" % (mailBody, self.devName)
if instance:
mailBody = "%s (instance: %s)" % (mailBody, instance)
if exceptionObj:
mailBody = "%s\nEncoutered exceptions during the process:\n%s"\
% (mailBody, exceptionObj)
mailBody = "%s\n--\nEnd transmission." % (mailBody)
# self.mailto("Device HANG", mailBody)
class WatchdogTester(object):
def __init__(self, deviceLst, joinerEvent, *args, **kwargs):
super(WatchdogTester, self).__init__(*args, **kwargs)
self._monitorsLst = []
self._runningLst = []
self._faultLst = []
self._hangLst = []
for deviceName in deviceLst:
dog = Dog(deviceName, joinerEvent, self)
dog.tryFaultRecovery = True
dog.tryHangRecovery = True
self._monitorsLst.append(dog)
def error_stream(self, msg):
print("ERROR:\t%s" % msg)
def warn_stream(self, msg):
print("WARN:\t%s" % msg)
def info_stream(self, msg):
print("INFO:\t%s" % msg)
def debug_stream(self, msg):
print("DEBUG:\t%s" % msg)
def isInRunningLst(self, who):
self.isInLst(self._runningLst, who)
def appendToRunning(self, who):
self.appendToLst(self._runningLst, "running", who)
def removeFromRunning(self, who):
self.removeFromLst(self._runningLst, "running", who)
def isInFaultLst(self, who):
self.isInLst(self._faultLst, who)
def appendToFault(self, who):
self.appendToLst(self._faultLst, "fault", who)
def removeFromFault(self, who):
self.removeFromLst(self._faultLst, "fault", who)
def isInHangLst(self, who):
self.isInLst(self._hangLst, who)
def appendToHang(self, who):
self.appendToLst(self._hangLst, "hang", who)
def removeFromHang(self, who):
self.removeFromLst(self._hangLst, "hang", who)
def isInLst(self, lst, who):
return lst.count(who)
def appendToLst(self, lst, lstName, who):
if not lst.count(who):
lst.append(who)
self.debug_stream("%s append to %s list" % (who, lstName))
else:
self.warn_stream("%s was already in the %s list" % (who, lstName))
def removeFromLst(self, lst, lstName, who):
if lst.count(who):
lst.pop(lst.index(who))
self.debug_stream("%s removed from %s list" % (who, lstName))
else:
self.warn_stream("%s was NOT in the %s list" % (who, lstName))
def mailto(self, action, msg):
if len(self.MailTo) != 0:
name = self.get_name()
mail = email.mime.text.MIMEText(msg)
mail['From'] = "%s@%s" % (name, gethostname())
mail['To'] = ', '.join(self.MailTo)
mail['Subject'] = "[%s] %s" % (self.get_name(), action)
s = smtplib.SMTP('localhost')
s.sendmail(mail['From'], self.MailTo, mail.as_string())
s.quit()
self.debug_stream("Email sent...")
def main():
from optparse import OptionParser
import signal
import sys
def signal_handler(signal, frame):
print('\nYou pressed Ctrl+C!\n')
sys.exit(0)
parser = OptionParser()
parser.add_option('', "--devices",
help="List of device names to provide to the tester")
(options, args) = parser.parse_args()
if options.devices:
signal.signal(signal.SIGINT, signal_handler)
print("\n\tPress Ctrl+C to finish\n")
joinerEvent = Event()
joinerEvent.clear()
tester = WatchdogTester(options.devices.split(','), joinerEvent)
signal.pause()
del tester
if __name__ == '__main__':
main()
|
srgblnch/TangoDeviceWatchdog
|
tango-ds/dog.py
|
Python
|
gpl-3.0
| 27,005
| 0.000185
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
PY3 = sys.version_info[0] == 3
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
module_fd = open(module_path, 'rb')
try:
head = module_fd.read(1024)
if head[0:2] != '#!':
return None
return head[2:head.index('\n')].strip().split(' ')
finally:
module_fd.close()
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid}))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd": wrapped_cmd,
"msg": str(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed": 1,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice only
"stderr": stderr,
"msg": traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
if __name__ == '__main__':
if len(sys.argv) < 5:
print(json.dumps({
"failed": True,
"msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup job output directory
jobdir = os.path.expanduser(async_dir)
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except:
print(json.dumps({
"failed": 1,
"msg": "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
print(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)" % (sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)" % (sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s" % (sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s " % sub_pid)
time.sleep(1)
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
notice("Done in kid B.")
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)" % os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)" % os.getpid())
sys.exit(0)
except SystemExit:
# On python2.4, SystemExit is a subclass of Exception.
# This block makes python2.4 behave the same as python2.5+
raise
except Exception:
e = sys.exc_info()[1]
notice("error: %s" % e)
print(json.dumps({
"failed": True,
"msg": "FATAL ERROR: %s" % e
}))
sys.exit(1)
|
alexlo03/ansible
|
lib/ansible/modules/utilities/logic/async_wrapper.py
|
Python
|
gpl-3.0
| 10,223
| 0.002152
|
# -*- coding: utf-8 -*-
""" Sahana Eden Human Resources Management
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("HRModel",
"HRSiteModel",
"HRSalaryModel",
"HRInsuranceModel",
#"HRJobModel",
"HRContractModel",
"HRSkillModel",
"HRTagModel",
"HRAppraisalModel",
"HRExperienceModel",
"HRAwardModel",
"HRDisciplinaryActionModel",
"HRProgrammeModel",
"HRShiftModel",
"HRDelegationModel",
"hrm_AssignMethod",
"hrm_competency_controller",
"hrm_compose",
"hrm_configure_pr_group_membership",
"hrm_credential_controller",
"hrm_CV",
"hrm_experience_controller",
"hrm_group_controller",
"hrm_human_resource_controller",
"hrm_human_resource_filters",
"hrm_HumanResourceRepresent",
"hrm_human_resource_onaccept",
"hrm_map_popup",
#"hrm_Medical",
"hrm_person_controller",
#"hrm_position_represent",
"hrm_Record",
"hrm_rheader",
"hrm_training_controller",
"hrm_training_event_controller",
"hrm_TrainingEventRepresent",
"hrm_xls_list_fields",
#"hrm_competency_list_layout",
#"hrm_credential_list_layout",
#"hrm_experience_list_layout",
#"hrm_training_list_layout",
)
import datetime
import json
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3PopupLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class HRModel(S3Model):
names = ("hrm_department",
"hrm_department_id",
"hrm_job_title",
"hrm_job_title_id",
"hrm_job_title_human_resource",
"hrm_human_resource",
"hrm_human_resource_id",
"hrm_type_opts",
"hrm_human_resource_represent",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
auth = current.auth
settings = current.deployment_settings
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
messages = current.messages
AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP
#ORGANISATION = messages.ORGANISATION
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
organisation_id = self.org_organisation_id
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
mix_staff = settings.get_hrm_mix_staff()
request = current.request
controller = request.controller
group = request.get_vars.get("group", None)
if not group:
if mix_staff:
group = None
elif controller == "vol":
group = "volunteer"
elif controller == "deploy":
group = None
#elif controller in ("hrm", "org", "inv", "cr", "hms", "req"):
else:
group = "staff"
# =====================================================================
# Departments
#
tablename = "hrm_department"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Only included in order to be able to set
# realm_entity to filter appropriately
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
label_create = T("Create Department")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Department Details"),
title_list = T("Department Catalog"),
title_update = T("Edit Department"),
title_upload = T("Import Departments"),
label_list_button = T("List Departments"),
label_delete_button = T("Delete Department"),
msg_record_created = T("Department added"),
msg_record_modified = T("Department updated"),
msg_record_deleted = T("Department deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup = tablename)
department_id = S3ReusableField("department_id", "reference %s" % tablename,
label = T("Department / Unit"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_department.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "department",
label = label_create,
),
)
configure("hrm_department",
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
)
# =====================================================================
# Job Titles (Mayon: StaffResourceType)
#
STAFF = settings.get_hrm_staff_label()
if settings.has_module("vol"):
hrm_types = True
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
3: T("Both")
}
if group == "staff":
hrm_type_default = 1
elif group == "volunteer":
hrm_type_default = 2
else:
hrm_type_default = 3
else:
hrm_types = False
hrm_type_opts = {1: STAFF}
hrm_type_default = 1
if settings.get_hrm_job_title_deploy():
hrm_types = True
hrm_type_opts[4] = T("Deployment")
if group == "volunteer":
not_filter_opts = (1, 4)
code_label = T("Volunteer ID")
departments = settings.get_hrm_vol_departments()
job_titles = settings.get_hrm_vol_roles()
elif mix_staff:
not_filter_opts = (4,)
code_label = T("Organization ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
else:
# Staff
not_filter_opts = (2, 4)
code_label = T("Staff ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
org_dependent_job_titles = settings.get_hrm_org_dependent_job_titles()
tablename = "hrm_job_title"
define_table(tablename,
Field("name", notnull=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Enable in templates as-required
self.org_region_id(readable = False,
writable = False,
),
organisation_id(default = root_org if org_dependent_job_titles else None,
readable = is_admin if org_dependent_job_titles else False,
writable = is_admin if org_dependent_job_titles else False,
),
Field("type", "integer",
default = hrm_type_default,
label = T("Type"),
readable = hrm_types,
writable = hrm_types,
represent = S3Represent(options = hrm_type_opts),
requires = IS_IN_SET(hrm_type_opts),
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
if group == "volunteer":
label = T("Volunteer Role")
label_create = T("Create Volunteer Role")
tooltip = T("The volunteer's role")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Volunteer Role Details"),
title_list = T("Volunteer Role Catalog"),
title_update = T("Edit Volunteer Role"),
label_list_button = T("List Volunteer Roles"),
label_delete_button = T("Delete Volunteer Role"),
msg_record_created = T("Volunteer Role added"),
msg_record_modified = T("Volunteer Role updated"),
msg_record_deleted = T("Volunteer Role deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
else:
label = T("Job Title")
label_create = T("Create Job Title")
tooltip = T("The staff member's official job title")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Job Title Details"),
title_list = T("Job Title Catalog"),
title_update = T("Edit Job Title"),
label_list_button = T("List Job Titles"),
label_delete_button = T("Delete Job Title"),
msg_record_created = T("Job Title added"),
msg_record_modified = T("Job Title updated"),
msg_record_deleted = T("Job Title deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup = tablename,
translate = True,
)
if org_dependent_job_titles:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
not_filterby = "type",
not_filter_opts = not_filter_opts,
))
else:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
not_filterby = "type",
not_filter_opts = not_filter_opts,
))
job_title_id = S3ReusableField("job_title_id", "reference %s" % tablename,
label = label,
ondelete = "SET NULL",
represent = represent,
requires = requires,
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "job_title",
# Add this for usecases where this is no special controller for an options lookup
#vars = {"prefix": "hrm",
# "parent": "human_resource",
# },
label = label_create,
title = label,
tooltip = tooltip,
),
)
configure("hrm_job_title",
deduplicate = self.hrm_job_title_duplicate,
onvalidation = self.hrm_job_title_onvalidation,
)
# =====================================================================
# Human Resource
#
# People who are either Staff or Volunteers
#
# @ToDo: Move Volunteers to a separate resource?: vol_volunteer
#
# @ToDo: Allocation Status for Events (link table)
#
STAFF = settings.get_hrm_staff_label()
# NB These numbers are hardcoded into KML Export stylesheet
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
}
hrm_status_opts = {1: T("Active"),
2: T("Resigned"), # They left of their own accord
3: T("Terminated"), # Org terminated their contract
4: T("Died"),
}
organisation_label = settings.get_hrm_organisation_label()
multiple_contracts = settings.get_hrm_multiple_contracts()
use_code = settings.get_hrm_use_code()
if group == "volunteer" or s3.bulk or not group:
# Volunteers don't have a Site
# Don't set a Site for Bulk Imports unless set explicitly
default_site = None
else:
default_site = auth.user.site_id if auth.is_logged_in() else None
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Requested By Facility"),
AUTOCOMPLETE_HELP,
))
else:
site_widget = None
site_comment = None
tablename = "hrm_human_resource"
realms = auth.permission.permitted_realms(tablename, method="create")
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
organisation_id(empty = not settings.get_hrm_org_required(),
label = organisation_label,
requires = self.org_organisation_requires(required = True,
realms = realms,
),
widget = org_widget,
),
super_link("site_id", "org_site",
comment = site_comment,
default = default_site,
instance_types = auth.org_site_types,
#empty = False,
label = settings.get_org_site_label(),
ondelete = "SET NULL",
orderby = "org_site.name",
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
realms = realms,
represent = self.org_site_represent,
widget = site_widget,
),
self.pr_person_id(comment = None,
empty = False,
ondelete = "CASCADE",
widget = S3AddPersonWidget(controller = "hrm"),
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = S3Represent(options = hrm_type_opts),
requires = IS_IN_SET(hrm_type_opts,
zero = None),
widget = RadioWidget.widget,
# Normally set via the Controller we create from
readable = mix_staff,
writable = mix_staff,
),
Field("code",
label = code_label,
represent = lambda v: v or messages["NONE"],
readable = use_code,
writable = use_code,
),
job_title_id(readable = job_titles,
writable = job_titles,
),
department_id(readable = departments,
writable = departments,
),
Field("essential", "boolean",
label = T("Essential Staff?"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Essential Staff?"),
T("If the person counts as essential staff when evacuating all non-essential staff."),
),
),
),
# Contract
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_human_resource_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_human_resource_start_date",
start_field = "hrm_human_resource_start_date",
default_interval = 12,
),
# Current status
Field("status", "integer",
default = 1,
label = T("Status"),
represent = S3Represent(options = hrm_status_opts),
requires = IS_IN_SET(hrm_status_opts,
zero = None),
),
# Base location + Site
self.gis_location_id(label = T("Base Location"),
readable = False,
writable = False,
),
Field("org_contact", "boolean",
label = T("Organization Contact"),
represent = s3_yes_no_represent,
readable = False,
writable = False,
),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = s3_yes_no_represent,
),
s3_comments(),
*s3_meta_fields())
# @ToDo: Move this configurability to templates rather than lots of deployment_settings
if STAFF == T("Contacts"):
contacts = True
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = STAFF,
title_update = T("Edit Contact Details"),
title_upload = T("Import Contacts"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact Details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
else:
contacts = False
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Staff Member"),
title_display = T("Staff Member Details"),
title_list = STAFF,
title_update = T("Edit Staff Member Details"),
title_upload = T("Import Staff"),
label_list_button = T("List Staff Members"),
label_delete_button = T("Delete Staff Member"),
msg_record_created = T("Staff Member added"),
msg_record_modified = T("Staff Member Details updated"),
msg_record_deleted = T("Staff Member deleted"),
msg_list_empty = T("No Staff currently registered"))
crud_strings["hrm_volunteer"] = Storage(
label_create = T("Create Volunteer"),
title_display = T("Volunteer Details"),
title_list = T("Volunteers"),
title_update = T("Edit Volunteer Details"),
title_upload = T("Import Volunteers"),
label_list_button = T("List Volunteers"),
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer Details updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered"))
hrm_human_resource_represent = hrm_HumanResourceRepresent(show_link = True)
if group == "staff":
label = STAFF
crud_strings[tablename] = crud_strings["hrm_staff"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
filterby = "type",
filter_opts = (1,),
sort = True,
))
widget = S3HumanResourceAutocompleteWidget(group="staff")
elif group == "volunteer":
label = T("Volunteer")
crud_strings[tablename] = crud_strings["hrm_volunteer"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
filterby = "type",
filter_opts = (2,),
sort = True,
))
widget = S3HumanResourceAutocompleteWidget(group="volunteer")
else:
label = T("Human Resource")
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort = True
))
widget = S3HumanResourceAutocompleteWidget()
if contacts:
crud_strings[tablename] = crud_strings["hrm_staff"]
else:
crud_strings[tablename] = Storage(
label_create = T("Create Staff or Volunteer"),
title_display = T("Human Resource Details"),
title_list = T("Staff & Volunteers"),
title_update = T("Edit Record"),
title_upload = T("Search Staff & Volunteers"),
label_list_button = T("List Staff & Volunteers"),
label_delete_button = T("Delete Record"),
msg_record_created = T("Human Resource added"),
msg_record_modified = T("Record updated"),
msg_record_deleted = T("Record deleted"),
msg_list_empty = T("No staff or volunteers currently registered"))
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = group or "staff",
vars = {"child": "human_resource_id"},
label = crud_strings["hrm_%s" % group].label_create if group else \
crud_strings[tablename].label_create,
title = label,
tooltip = AUTOCOMPLETE_HELP,
)
human_resource_id = S3ReusableField("human_resource_id", "reference %s" % tablename,
label = label,
ondelete = "RESTRICT",
represent = hrm_human_resource_represent,
requires = requires,
sortby = ["type", "status"],
widget = widget,
comment = comment,
)
# Custom Method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
set_method = self.set_method
set_method("hrm", "human_resource",
method = "search_ac",
action = self.hrm_search_ac)
set_method("hrm", "human_resource",
method = "lookup",
action = self.hrm_lookup)
# Components
add_components(tablename,
# Contact Data
pr_contact = (# Email
{"name": "email",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "EMAIL",
},
},
# Mobile Phone
{"name": "phone",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "SMS",
},
},
),
pr_contact_emergency = {"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
},
pr_address = ({"name": "home_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"type": "1",
},
},
),
# Experience & Skills
hrm_appraisal = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_certification = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_competency = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_contract = {"joinby": "human_resource_id",
"multiple": multiple_contracts,
},
hrm_credential = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
pr_education = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_experience = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_insurance = "human_resource_id",
hrm_salary = "human_resource_id",
hrm_training = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_trainings = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
"multiple": False,
},
# Organisation Groups
org_group_person = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Projects
project_project = {"link": "project_human_resource_project",
"joinby": "human_resource_id",
"key": "project_id",
},
# Application(s) for Deployment
deploy_application = "human_resource_id",
# Assignments
deploy_assignment = "human_resource_id",
# Hours
#hrm_hours = "human_resource_id",
# Tags
hrm_human_resource_tag = {"name": "tag",
"joinby": "human_resource_id",
},
)
# Optional Components
teams = settings.get_hrm_teams()
if teams:
add_components(tablename,
# Team Memberships
pr_group_membership = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
)
if group in ("volunteer", None) or mix_staff:
add_components(tablename,
# Programmes
hrm_programme_hours = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Availability
pr_person_availability = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
# Will need tochange in future
"multiple": False,
},
# Volunteer Details
vol_details = {"joinby": "human_resource_id",
"multiple": False,
},
# Volunteer Cluster
vol_volunteer_cluster = {"joinby": "human_resource_id",
"multiple": False,
},
)
if settings.get_hrm_multiple_job_titles():
add_components(tablename,
# Job Titles
hrm_job_title_human_resource = "human_resource_id",
)
crud_fields = ["organisation_id",
"person_id",
"start_date",
"end_date",
"status",
]
if use_code:
crud_fields.insert(2, "code")
filter_widgets = hrm_human_resource_filters(resource_type = group,
hrm_type_opts = hrm_type_opts)
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
(T("Training"), "training.course_id"),
"location_id$L1",
"location_id$L2",
]
if settings.get_org_branches():
report_fields.insert(1, (settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"))
if teams:
report_fields.append((T(teams), "group_membership.group_id"))
if mix_staff:
crud_fields.insert(1, "site_id")
crud_fields.insert(2, "type")
posn = 4
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments() or \
settings.get_hrm_vol_departments():
crud_fields.insert(posn, "department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(posn, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.append("details.card")
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
(T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
elif group == "volunteer":
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "person_id$address.location_id" # When not using S3Track()
if settings.get_hrm_vol_roles():
crud_fields.insert(2, "job_title_id")
report_fields.append("job_title_id")
if settings.get_hrm_vol_departments():
crud_fields.insert(4, "department_id")
report_fields.append("department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(2, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.extend(("details.card",
# @ToDo: Move these to the IFRC Template (PH RC only people to use this)
#"volunteer_cluster.vol_cluster_type_id",
#"volunteer_cluster.vol_cluster_id",
#"volunteer_cluster.vol_cluster_position_id",
))
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(((T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
else:
# Staff
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "site_id$location_id" # When not using S3Track()
crud_fields.insert(1, "site_id")
posn = 3
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments():
crud_fields.insert(posn, "department_id")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
))
report_fields_extra = []
# Redirect to the Details tabs after creation
if controller in ("hrm", "vol"):
hrm_url = URL(c=controller, f="person",
vars = {"human_resource.id": "[id]"},
)
else:
# Being added as a component to Org, Site or Project
hrm_url = None
# Custom Form
s3.hrm = Storage(crud_fields = crud_fields) # Store fields for easy ability to modify later
crud_form = S3SQLCustomForm(*crud_fields)
if settings.get_hrm_org_required():
mark_required = ("organisation_id",)
else:
mark_required = None
configure(tablename,
context = {#"location": location_context,
"organisation": "organisation_id",
"person": "person_id",
"project": "project.id",
"site": "site_id",
},
create_next = hrm_url,
crud_form = crud_form,
# This allows only one HR record per person and organisation,
# if multiple HR records of the same person with the same org
# are desired, then this needs an additional criteria in the
# query (e.g. job title, or type):
deduplicate = S3Duplicate(primary = ("person_id",),
secondary = ("organisation_id",),
ignore_deleted = True,
),
deletable = settings.get_hrm_deletable(),
#extra_fields = ["person_id"]
filter_widgets = filter_widgets,
mark_required = mark_required,
onaccept = hrm_human_resource_onaccept,
ondelete = self.hrm_human_resource_ondelete,
realm_components = ("presence",),
report_fields = report_fields_extra,
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ("count", "list",),
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
)
),
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
],
super_entity = ("sit_trackable", "doc_entity"),
#update_next = hrm_url,
update_realm = True,
)
# =====================================================================
# Job Titles <> Human Resources link table
#
tablename = "hrm_job_title_human_resource"
define_table(tablename,
human_resource_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
Field("main", "boolean",
default = True,
label = T("Main?"),
represent = s3_yes_no_represent,
),
s3_date(label = T("Start Date")),
s3_date("end_date",
label = T("End Date"),
),
s3_comments(),
*s3_meta_fields())
configure("hrm_job_title_human_resource",
onaccept = self.hrm_job_title_human_resource_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_department_id": department_id,
"hrm_job_title_id": job_title_id,
"hrm_human_resource_id": human_resource_id,
"hrm_status_opts": hrm_status_opts,
"hrm_type_opts": hrm_type_opts,
"hrm_human_resource_represent": hrm_human_resource_represent,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Safe defaults for model-global names in case module is disabled
"""
dummy = S3ReusableField.dummy
return {"hrm_department_id": dummy("department_id"),
"hrm_job_title_id": dummy("job_title_id"),
"hrm_human_resource_id": dummy("human_resource_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_duplicate(item):
"""
Update detection for hrm_job_title
@param item: the S3ImportItem
"""
data_get = item.data.get
name = data_get("name", None)
if current.deployment_settings.get_hrm_org_dependent_job_titles():
org = data_get("organisation_id", None)
else:
org = None
role_type = data_get("type", None)
table = item.table
query = (table.name.lower() == s3_unicode(name).lower())
if org:
query = query & (table.organisation_id == org)
if role_type:
query = query & (table.type == role_type)
duplicate = current.db(query).select(table.id,
limitby = (0, 1),
).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_onvalidation(form):
"""
Ensure Job Titles are not Org-specific unless configured to be so
"""
if not current.deployment_settings.get_hrm_org_dependent_job_titles():
form.vars["organisation_id"] = None
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_human_resource_onaccept(form):
"""
Record creation post-processing
If the job title is the main, set the
human_resource.job_title_id accordingly
"""
formvars = form.vars
if formvars.main:
# Read the record
# (safer than relying on vars which might be missing on component tabs)
db = current.db
ltable = db.hrm_job_title_human_resource
record = db(ltable.id == formvars.id).select(ltable.human_resource_id,
ltable.job_title_id,
limitby = (0, 1),
).first()
# Set the HR's job_title_id to the new job title
htable = db.hrm_human_resource
db(htable.id == record.human_resource_id).update(job_title_id = record.job_title_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_search_ac(r, **attr):
"""
JSON search method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
- full name search
- include Organisation & Job Role in the output
"""
resource = r.resource
response = current.response
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = current.request.get_vars
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
r.error(400, "No value provided!")
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = s3_unicode(value).lower()
if " " in value:
# Multiple words
# - check for match of first word against first_name
# - & second word against either middle_name or last_name
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = ((FS("person_id$first_name").lower().like(value1 + "%")) & \
((FS("person_id$middle_name").lower().like(value2 + "%")) | \
(FS("person_id$last_name").lower().like(value2 + "%"))))
else:
# Single word - check for match against any of the 3 names
value = value.strip()
query = ((FS("person_id$first_name").lower().like(value + "%")) | \
(FS("person_id$middle_name").lower().like(value + "%")) | \
(FS("person_id$last_name").lower().like(value + "%")))
resource.add_filter(query)
settings = current.deployment_settings
limit = int(_vars.limit or 0)
MAX_SEARCH_RESULTS = settings.get_search_max_results()
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = [
{"label": str(current.T("There are more than %(max)s results, please input more characters.") % \
{"max": MAX_SEARCH_RESULTS}),
},
]
else:
fields = ["id",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"job_title_id$name",
]
show_orgs = settings.get_hrm_show_organisation()
if show_orgs:
fields.append("organisation_id$name")
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
rows = resource.select(fields,
start = 0,
limit = limit,
orderby = orderby,
)["rows"]
output = []
iappend = output.append
for row in rows:
name = Storage(first_name=row["pr_person.first_name"],
middle_name=row["pr_person.middle_name"],
last_name=row["pr_person.last_name"],
)
name = s3_fullname(name)
item = {"id" : row["hrm_human_resource.id"],
"name" : name,
}
if show_orgs:
item["org"] = row["org_organisation.name"]
job_title = row.get("hrm_job_title.name", None)
if job_title:
item["job"] = job_title
iappend(item)
response.headers["Content-Type"] = "application/json"
return json.dumps(output, separators=SEPARATORS)
# -------------------------------------------------------------------------
@staticmethod
def hrm_lookup(r, **attr):
"""
JSON lookup method for S3AddPersonWidget
"""
hrm_id = r.id
if not hrm_id:
r.error(400, "No id provided!")
db = current.db
s3db = current.s3db
settings = current.deployment_settings
request_dob = settings.get_pr_request_dob()
request_gender = settings.get_pr_request_gender()
home_phone = settings.get_pr_request_home_phone()
tags = settings.get_pr_request_tags()
htable = db.hrm_human_resource
ptable = db.pr_person
ctable = s3db.pr_contact
fields = [htable.organisation_id,
ptable.pe_id,
# We have these already from the search_ac
#ptable.first_name,
#ptable.middle_name,
#ptable.last_name,
]
separate_name_fields = settings.get_pr_separate_name_fields()
if separate_name_fields:
middle_name = separate_name_fields == 3
fields += [ptable.first_name,
ptable.middle_name,
ptable.last_name,
]
left = None
if request_dob:
fields.append(ptable.date_of_birth)
if request_gender:
fields.append(ptable.gender)
if current.request.controller == "vol":
dtable = s3db.pr_person_details
fields.append(dtable.occupation)
left = dtable.on(dtable.person_id == ptable.id)
if tags:
fields.append(ptable.id)
query = (htable.id == hrm_id) & \
(ptable.id == htable.person_id)
row = db(query).select(left=left,
*fields).first()
if left:
occupation = row["pr_person_details.occupation"]
else:
occupation = None
organisation_id = row["hrm_human_resource.organisation_id"]
row = row["pr_person"]
#first_name = row.first_name
#middle_name = row.middle_name
#last_name = row.last_name
if request_dob:
date_of_birth = row.date_of_birth
else:
date_of_birth = None
if request_gender:
gender = row.gender
else:
gender = None
if separate_name_fields:
first_name = row.first_name
last_name = row.last_name
if middle_name:
middle_name = row.middle_name
else:
first_name = None
middle_name = None
last_name = None
# Tags
if tags:
tags = [t[1] for t in tags]
ttable = s3db.pr_person_tag
query = (ttable.person_id == row.id) & \
(ttable.deleted == False) & \
(ttable.tag.belongs(tags))
tags = db(query).select(ttable.tag,
ttable.value,
)
# Lookup contacts separately as we can't limitby here
if home_phone:
contact_methods = ("SMS", "EMAIL", "HOME_PHONE")
else:
contact_methods = ("SMS", "EMAIL")
query = (ctable.pe_id == row.pe_id) & \
(ctable.contact_method.belongs(contact_methods))
rows = db(query).select(ctable.contact_method,
ctable.value,
orderby = ctable.priority,
)
email = mobile_phone = None
if home_phone:
home_phone = None
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
elif not home_phone and row.contact_method == "HOME_PHONE":
home_phone = row.value
if email and mobile_phone and home_phone:
break
else:
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
if email and mobile_phone:
break
# Minimal flattened structure
item = {}
if first_name:
item["first_name"] = first_name
if middle_name:
item["middle_name"] = middle_name
if last_name:
item["last_name"] = last_name
if email:
item["email"] = email
if mobile_phone:
item["mphone"] = mobile_phone
if home_phone:
item["hphone"] = home_phone
if gender:
item["sex"] = gender
if date_of_birth:
item["dob"] = date_of_birth
if occupation:
item["occupation"] = occupation
if organisation_id:
item["org_id"] = organisation_id
for row in tags:
item[row.tag] = row.value
output = json.dumps(item, separators=SEPARATORS)
current.response.headers["Content-Type"] = "application/json"
return output
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_ondelete(row):
""" On-delete routine for HR records """
db = current.db
htable = db.hrm_human_resource
# Update PE hierarchy
person_id = row.person_id
if person_id:
current.s3db.pr_update_affiliations(htable, row)
# =============================================================================
class HRSiteModel(S3Model):
names = ("hrm_human_resource_site",)
def model(self):
T = current.T
# =========================================================================
# Link between Human Resources & Facilities
# - this is used to allow different Site Contacts per Sector
# - it can be used to allow the right UI interface when adding HRs to a
# Facility via the Staff tab, although we use hrm_Assign for that now.
#
tablename = "hrm_human_resource_site"
self.define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
self.org_site_id(),
self.org_sector_id(),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = lambda opt: \
(T("No"), T("Yes"))[opt == True],
),
*s3_meta_fields())
self.configure(tablename,
# Each HR can only be assigned to one site at a time:
deduplicate = S3Duplicate(primary = ("human_resource_id",),
secondary = ("sector_id",),
),
onaccept = self.hrm_human_resource_site_onaccept,
ondelete = self.hrm_human_resource_site_ondelete,
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Assign Staff"),
title_display = T("Staff Assignment Details"),
title_list = T("Staff Assignments"),
title_update = T("Edit Staff Assignment"),
label_list_button = T("List Staff Assignments"),
label_delete_button = T("Delete Staff Assignment"),
msg_record_created = T("Staff Assigned"),
msg_record_modified = T("Staff Assignment updated"),
msg_record_deleted = T("Staff Assignment removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no staff assigned"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_site_onaccept(form):
"""
Update the Human Resource record with the site_id
"""
db = current.db
human_resource_id = form.vars.human_resource_id
# Remove any additional records for this HR
# (i.e. staff was assigned elsewhere previously)
# @ToDo: Allow one person to be the Site Contact for multiple sectors
ltable = db.hrm_human_resource_site
rows = db(ltable.human_resource_id == human_resource_id).select(ltable.id,
ltable.site_id,
#ltable.sector_id,
ltable.human_resource_id,
ltable.site_contact,
orderby = ~ltable.id
)
first = True
for row in rows:
if first:
first = False
continue
db(ltable.id == row.id).delete()
record = rows.first()
site_id = record.site_id
table = db.hrm_human_resource
db(table.id == human_resource_id).update(site_id = site_id,
site_contact = record.site_contact
)
# Update realm_entity of HR
entity = current.s3db.pr_get_pe_id("org_site", site_id)
if entity:
current.auth.set_realm_entity(table, human_resource_id,
entity = entity,
force_update = True)
# Fire the normal onaccept
hrform = Storage(id = human_resource_id)
hrm_human_resource_onaccept(hrform)
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_site_ondelete(row):
"""
Update the Human Resource record with the site_id
"""
db = current.db
table = db.hrm_human_resource
human_resource_id = row.human_resource_id
db(table.id == human_resource_id).update(location_id = None,
site_id = None,
site_contact = False,
)
# Update realm_entity of HR
current.auth.set_realm_entity(table,
human_resource_id,
force_update = True,
)
# =============================================================================
class HRSalaryModel(S3Model):
""" Data Model to track salaries of staff """
names = ("hrm_staff_level",
"hrm_salary_grade",
"hrm_salary",
)
def model(self):
db = current.db
T = current.T
define_table = self.define_table
configure = self.configure
organisation_id = self.org_organisation_id
organisation_requires = self.org_organisation_requires
# =====================================================================
# Staff Level
#
tablename = "hrm_staff_level"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Staff Level"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
staff_level_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary Grades
#
tablename = "hrm_salary_grade"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Salary Grade"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
salary_grade_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary
#
tablename = "hrm_salary"
define_table(tablename,
self.pr_person_id(),
self.hrm_human_resource_id(label = T("Staff Record"),
widget = None,
comment = None,
),
Field("staff_level_id", "reference hrm_staff_level",
label = T("Staff Level"),
represent = staff_level_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_staff_level.id",
staff_level_represent,
)),
comment = S3PopupLink(f = "staff_level",
label = T("Create Staff Level"),
),
),
Field("salary_grade_id", "reference hrm_salary_grade",
label = T("Salary Grade"),
represent = salary_grade_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_salary_grade.id",
salary_grade_represent,
)),
comment = S3PopupLink(f = "salary_grade",
label = T("Create Salary Grade"),
),
),
s3_date("start_date",
default = "now",
label = T("Start Date"),
set_min = "#hrm_salary_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_salary_start_date",
),
Field("monthly_amount", "double",
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v,
precision = 2,
),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum = 0.0)
),
default = 0.0,
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Salary"),
title_display = T("Salary Details"),
title_list = T("Salaries"),
title_update = T("Edit Salary"),
label_list_button = T("List Salaries"),
label_delete_button = T("Delete Salary"),
msg_record_created = T("Salary added"),
msg_record_modified = T("Salary updated"),
msg_record_deleted = T("Salary removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no salary registered"))
configure(tablename,
onvalidation = self.hrm_salary_onvalidation,
orderby = "%s.start_date desc" % tablename,
)
# =====================================================================
# Salary Coefficient
#
# @todo: implement
# =====================================================================
# Allowance Level
#
# @todo: implement
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_salary_onvalidation(form):
try:
form_vars = form.vars
start_date = form_vars.get("start_date")
end_date = form_vars.get("end_date")
except AttributeError:
return
if start_date and end_date and start_date > end_date:
form.errors["end_date"] = current.T("End date must be after start date.")
return
# =============================================================================
class hrm_OrgSpecificTypeRepresent(S3Represent):
""" Representation of organisation-specific taxonomic categories """
def __init__(self, lookup=None):
""" Constructor """
if lookup is None:
raise SyntaxError("must specify a lookup table")
fields = ("name", "organisation_id")
super(hrm_OrgSpecificTypeRepresent, self).__init__(lookup = lookup,
fields = fields,
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
table = self.table
otable = s3db.org_organisation
left = otable.on(otable.id == table.organisation_id)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(table.id,
table.name,
otable.id,
otable.name,
otable.acronym,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
try:
name = row[self.tablename].name
except AttributeError:
return row.name
try:
organisation = row["org_organisation"]
except AttributeError:
return name
if organisation.acronym:
return "%s (%s)" % (name, organisation.acronym)
elif organisation.name:
return "%s (%s)" % (name, organisation.name)
else:
return name
# =============================================================================
class HRInsuranceModel(S3Model):
""" Data Model to track insurance information of staff members """
names = ("hrm_insurance",
)
def model(self):
T = current.T
insurance_types = {"SOCIAL": T("Social Insurance"),
"HEALTH": T("Health Insurance"),
}
insurance_type_represent = S3Represent(options = insurance_types)
# =====================================================================
# Insurance Information
#
tablename = "hrm_insurance"
self.define_table(tablename,
# The original use (IFRC) used human_resource_id instead of the usual person_id in order to put it into the HR form
self.hrm_human_resource_id(),
# RMS uses person_id in order to have on a common Medical Information tab with Physical Description fields
#self.pr_person_id(),
Field("type",
label = T("Type"),
represent = insurance_type_represent,
requires = IS_IN_SET(insurance_types),
),
Field("insurance_number",
length = 128,
label = T("Insurance Number"),
requires = IS_LENGTH(128),
),
Field("insurer",
length = 255,
label = T("Insurer"),
requires = IS_LENGTH(255),
),
Field("provider",
length = 255,
label = T("Provider"),
requires = IS_LENGTH(255),
),
Field("phone",
label = T("Emergency Number"),
requires = IS_EMPTY_OR(
IS_PHONE_NUMBER_MULTI(),
),
),
#Field("beneficiary",
# label = T("Beneficiary"),
# ),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
#context = {"person": "human_resource_id$person_id",
# },
deduplicate = S3Duplicate(primary = ("human_resource_id",
#"person_id",
"type",
),
),
)
return {}
# =============================================================================
class HRContractModel(S3Model):
""" Data model to track employment contract details of staff members """
names = ("hrm_contract",
)
def model(self):
T = current.T
contract_terms = {"SHORT": T("Short-term"),
"LONG": T("Long-term"),
"PERMANENT": T("Permanent")
}
contract_term_represent = S3Represent(options = contract_terms)
hours_models = {"PARTTIME": T("Part-time"),
"FULLTIME": T("Full-time"),
}
hours_model_represent = S3Represent(options = hours_models)
# =====================================================================
# Employment Contract Details
#
tablename = "hrm_contract"
self.define_table(tablename,
self.hrm_human_resource_id(),
Field("name",
label = T("Name"),
),
s3_date(label = T("Start Date"),
),
#s3_date("end_date",
# label = T("End Date"),
# ),
Field("term",
requires = IS_IN_SET(contract_terms),
represent = contract_term_represent,
),
Field("hours",
requires = IS_IN_SET(hours_models),
represent = hours_model_represent,
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",)),
)
return {}
# =============================================================================
class HRJobModel(S3Model):
"""
Unused
@ToDo: If bringing back into use then Availability better as Person component not HR
"""
names = ("hrm_position",
"hrm_position_id",
)
def model(self):
s3db = current.s3db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
define_table = self.define_table
job_title_id = self.hrm_job_title_id
organisation_id = self.org_organisation_id
site_id = self.org_site_id
group_id = self.pr_group_id
human_resource_id = self.hrm_human_resource_id
hrm_type_opts = self.hrm_type_opts
# =========================================================================
# Positions
#
# @ToDo: Shifts for use in Scenarios & during Exercises & Events
#
# @ToDo: Vacancies
#
tablename = "hrm_position"
table = define_table(tablename,
job_title_id(empty = False),
organisation_id(empty = False),
site_id,
group_id(label = "Team"),
*s3_meta_fields())
table.site_id.readable = table.site_id.writable = True
#crud_strings[tablename] = Storage(
# label_create = T("Add Position"),
# title_display = T("Position Details"),
# title_list = T("Position Catalog"),
# title_update = T("Edit Position"),
# label_list_button = T("List Positions"),
# label_delete_button = T("Delete Position"),
# msg_record_created = T("Position added"),
# msg_record_modified = T("Position updated"),
# msg_record_deleted = T("Position deleted"),
# msg_list_empty = T("Currently no entries in the catalog"))
#label_create = crud_strings[tablename].label_create
position_id = S3ReusableField("position_id", "reference %s" % tablename,
label = T("Position"),
ondelete = "SET NULL",
#represent = hrm_position_represent,
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"hrm_position.id",
#hrm_position_represent,
)),
sortby = "name",
#comment = DIV(A(label_create,
# _class="s3_add_resource_link",
# _href=URL(f="position",
# args="create",
# vars={"format": "popup"}
# ),
# _target="top",
# _title=label_create),
# DIV(_class="tooltip",
# _title="%s|%s" % (label_create,
# T("Add a new job role to the catalog.")))),
)
# =========================================================================
# Availability
#
# unused - see PRAvailabilityModel
#
weekdays = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday")
}
weekdays_represent = lambda opt: ",".join([str(weekdays[o]) for o in opt])
tablename = "hrm_availability"
define_table(tablename,
human_resource_id(),
Field("date_start", "date"),
Field("date_end", "date"),
Field("day_of_week", "list:integer",
default = [1, 2, 3, 4, 5],
represent = weekdays_represent,
requires = IS_EMPTY_OR(IS_IN_SET(weekdays,
zero=None,
multiple=True)),
widget = CheckboxesWidgetS3.widget,
),
Field("hours_start", "time"),
Field("hours_end", "time"),
#location_id(label=T("Available for Location"),
# requires=IS_ONE_OF(db, "gis_location.id",
# gis_LocationRepresent(),
# filterby="level",
# # @ToDo Should this change per config?
# filter_opts=gis.region_level_keys,
# orderby="gis_location.name"),
# widget=None),
*s3_meta_fields())
# =========================================================================
# Hours registration
#
tablename = "hrm_hours"
define_table(tablename,
human_resource_id(),
Field("timestmp_in", "datetime"),
Field("timestmp_out", "datetime"),
Field("hours", "double"),
*s3_meta_fields())
# =========================================================================
# Vacancy
#
# These are Positions which are not yet Filled
#
tablename = "hrm_vacancy"
define_table(tablename,
organisation_id(),
#Field("code"),
Field("title"),
Field("description", "text"),
self.super_link("site_id", "org_site",
label = T("Facility"),
readable = False,
writable = False,
sort = True,
represent = s3db.org_site_represent,
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = lambda opt: \
hrm_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_type_opts, zero=None),
),
Field("number", "integer"),
#location_id(),
Field("from", "date"),
Field("until", "date"),
Field("open", "boolean",
default = False,
),
Field("app_deadline", "date",
#label = T("Application Deadline"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_position_id": position_id,
}
# =============================================================================
class HRSkillModel(S3Model):
names = ("hrm_skill_type",
"hrm_skill",
"hrm_competency_rating",
"hrm_competency",
#"hrm_competency_id",
"hrm_credential",
"hrm_training",
"hrm_trainings",
"hrm_event_type",
"hrm_training_event",
"hrm_training_event_id",
"hrm_event_location",
"hrm_event_tag",
"hrm_training_event_report",
"hrm_certificate",
"hrm_certification",
"hrm_certification_onaccept",
"hrm_certificate_skill",
"hrm_course",
"hrm_course_certificate",
"hrm_course_job_title",
"hrm_course_sector",
"hrm_course_id",
"hrm_skill_id",
"hrm_multi_skill_id",
"hrm_multi_skill_represent",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
request = current.request
folder = request.folder
s3 = current.response.s3
settings = current.deployment_settings
job_title_id = self.hrm_job_title_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP
ORGANISATION = settings.get_hrm_organisation_label()
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
is_float_represent = IS_FLOAT_AMOUNT.represent
float_represent = lambda v: is_float_represent(v, precision=2)
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
c = current.request.controller
if c not in ("hrm", "vol"):
c = "hrm"
if settings.get_org_autocomplete():
widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
widget = None
# ---------------------------------------------------------------------
# Skill Types
# - optional hierarchy of skills
# disabled by default, enable with deployment_settings.hrm.skill_types = True
# if enabled, then each needs their own list of competency levels
#
tablename = "hrm_skill_type"
define_table(tablename,
Field("name", notnull=True, unique=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill Type"),
title_display = T("Details"),
title_list = T("Skill Type Catalog"),
title_update = T("Edit Skill Type"),
label_list_button = T("List Skill Types"),
label_delete_button = T("Delete Skill Type"),
msg_record_created = T("Skill Type added"),
msg_record_modified = T("Skill Type updated"),
msg_record_deleted = T("Skill Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
skill_types = settings.get_hrm_skill_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup = tablename)
skill_type_id = S3ReusableField("skill_type_id", "reference %s" % tablename,
default = self.skill_type_default,
label = T("Skill Type"),
ondelete = "RESTRICT",
readable = skill_types,
writable = skill_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "skill_type",
label = label_create,
title = label_create,
tooltip = T("Add a new skill type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_competency_rating = "skill_type_id",
)
# ---------------------------------------------------------------------
# Skills
# - these can be simple generic skills or can come from certifications
#
tablename = "hrm_skill"
define_table(tablename,
skill_type_id(empty = False),
Field("name", notnull=True, unique=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill"),
title_display = T("Skill Details"),
title_list = T("Skill Catalog"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Delete Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
autocomplete = False
label_create = crud_strings[tablename].label_create
if autocomplete:
# NB FilterField widget needs fixing for that too
widget = S3AutocompleteWidget(request.controller,
"skill")
tooltip = AUTOCOMPLETE_HELP
else:
widget = None
tooltip = None
skill_help = S3PopupLink(c = c,
f = "skill",
label = label_create,
tooltip = tooltip,
)
represent = S3Represent(lookup=tablename, translate=True)
skill_id = S3ReusableField("skill_id", "reference %s" % tablename,
label = T("Skill"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort = True
)),
sortby = "name",
comment = skill_help,
widget = widget
)
multi_skill_represent = S3Represent(lookup = tablename,
multiple = True,
)
multi_skill_id = S3ReusableField("skill_id", "list:reference hrm_skill",
label = T("Skills"),
ondelete = "SET NULL",
represent = multi_skill_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort = True,
multiple = True
)),
sortby = "name",
#comment = skill_help,
widget = S3MultiSelectWidget(header = "",
selectedList = 3,
),
)
configure("hrm_skill",
deduplicate = S3Duplicate(),
)
# =====================================================================
# Competency Ratings
#
# These are the levels of competency. Default is Levels 1-3.
# The levels can vary by skill_type if deployment_settings.hrm.skill_types = True
#
# The textual description can vary a lot, but is important to individuals
# Priority is the numeric used for preferential role allocation in Mayon
#
# http://docs.oasis-open.org/emergency/edxl-have/cs01/xPIL-types.xsd
#
tablename = "hrm_competency_rating"
define_table(tablename,
skill_type_id(empty = False),
Field("name",
length=64, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("priority", "integer",
default = 1,
label = T("Priority"),
requires = IS_INT_IN_RANGE(1, 10),
widget = S3SliderWidget(),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Priority"),
T("Priority from 1 to 9. 1 is most preferred."),
),
),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Competency Rating"),
title_display = T("Competency Rating Details"),
title_list = T("Competency Rating Catalog"),
title_update = T("Edit Competency Rating"),
label_list_button = T("List Competency Ratings"),
label_delete_button = T("Delete Competency Rating"),
msg_record_created = T("Competency Rating added"),
msg_record_modified = T("Competency Rating updated"),
msg_record_deleted = T("Competency Rating deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup=tablename, translate=True)
competency_id = S3ReusableField("competency_id", "reference %s" % tablename,
label = T("Competency"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_competency_rating.id",
represent,
orderby = "hrm_competency_rating.priority desc",
sort = True,
)),
sortby = "priority",
comment = self.competency_rating_comment(),
)
configure("hrm_competency_rating",
deduplicate = self.hrm_competency_rating_duplicate,
)
# ---------------------------------------------------------------------
# Competencies
#
# Link table between Persons & Skills
# - with a competency rating & confirmation
#
# Users can add their own but these are confirmed only by specific roles
#
# Component added in the hrm person() controller
#
tablename = "hrm_competency"
define_table(tablename,
person_id(ondelete = "CASCADE"),
skill_id(ondelete = "CASCADE"),
competency_id(),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(label = T("Confirming Organization"),
comment = None,
widget = widget,
writable = False,
),
Field("from_certification", "boolean",
default = False,
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill"),
title_display = T("Skill Details"),
title_list = T("Skills"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Remove Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill removed"),
msg_list_empty = T("Currently no Skills registered"))
configure("hrm_competency",
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"skill_id",
),
),
list_fields = ["id",
# Normally accessed via component
#"person_id",
"skill_id",
"competency_id",
"comments",
],
list_layout = hrm_competency_list_layout,
)
# =====================================================================
# Skill Provisions
#
# The minimum Competency levels in a Skill to be assigned the given Priority
# for allocation to Mayon's shifts for the given Job Role
#
#tablename = "hrm_skill_provision"
#define_table(tablename,
# Field("name", notnull=True, unique=True,
# length=32, # Mayon compatibility
# label = T("Name"),
# requires = [IS_NOT_EMPTY(),
# IS_LENGTH(32),
# ],
# ),
# job_title_id(),
# skill_id(),
# competency_id(),
# Field("priority", "integer",
# default = 1,
# requires = IS_INT_IN_RANGE(1, 10),
# widget = S3SliderWidget(),
# comment = DIV(_class = "tooltip",
# _title = "%s|%s" % (T("Priority"),
# T("Priority from 1 to 9. 1 is most preferred.")))
# ),
# s3_comments(),
# *s3_meta_fields())
#crud_strings[tablename] = Storage(
# label_create = T("Add Skill Provision"),
# title_display = T("Skill Provision Details"),
# title_list = T("Skill Provision Catalog"),
# title_update = T("Edit Skill Provision"),
# label_list_button = T("List Skill Provisions"),
# label_delete_button = T("Delete Skill Provision"),
# msg_record_created = T("Skill Provision added"),
# msg_record_modified = T("Skill Provision updated"),
# msg_record_deleted = T("Skill Provision deleted"),
# msg_list_empty = T("Currently no entries in the catalog"))
#label_create = crud_strings[tablename].label_create
#represent = S3Represent(lookup = tablename)
#skill_group_id = S3ReusableField("skill_provision_id", "reference %s" % tablename,
# label = T("Skill Provision"),
# ondelete = "SET NULL",
# represent = represent,
# requires = IS_EMPTY_OR(
# IS_ONE_OF(db, "hrm_skill_provision.id",
# represent,
# )),
# sortby = "name",
# comment = DIV(A(label_create,
# _class = "s3_add_resource_link",
# _href = URL(f="skill_provision",
# args = "create",
# vars = {"format": "popup"},
# ),
# _target = "top",
# _title = label_create.
# ),
# DIV(_class = "tooltip",
# _title = "%s|%s" % (label_create,
# T("Add a new skill provision to the catalog."),
# ),
# ),
# ),
# )
# =========================================================================
# Courses
#
external_courses = settings.get_hrm_trainings_external()
course_pass_marks = settings.get_hrm_course_pass_marks()
hrm_course_types = settings.get_hrm_course_types()
tablename = "hrm_course"
define_table(tablename,
Field("code", length=64,
label = T("Code"),
requires = IS_LENGTH(64),
),
Field("name", length=128, notnull=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
# Optionally restrict to Staff/Volunteers/Members
Field("type", "integer",
label = T("Type"),
represent = S3Represent(options = hrm_course_types),
requires = IS_EMPTY_OR(IS_IN_SET(hrm_course_types)),
# Enable in Templates as-required
readable = False,
writable = False,
),
# Only included in order to be able to set
# realm_entity to filter appropriately
# @ToDo: Option to see multiple Training Centers even as non_admin
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
Field("external", "boolean",
default = False,
label = T("External"),
represent = s3_yes_no_represent,
readable = external_courses,
writable = external_courses,
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("pass_mark", "float",
default = 0.0,
label = T("Pass Mark"),
represent = float_represent,
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum = 0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
Field("url",
label = T("URL"),
requires = IS_EMPTY_OR(
IS_URL()
),
represent = s3_url_represent,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Course"),
title_display = T("Course Details"),
title_list = T("Course Catalog"),
title_update = T("Edit Course"),
title_upload = T("Import Courses"),
label_list_button = T("List Courses"),
label_delete_button = T("Delete Course"),
msg_record_created = T("Course added"),
msg_record_modified = T("Course updated"),
msg_record_deleted = T("Course deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"))
if is_admin:
label_create = crud_strings[tablename].label_create
course_help = S3PopupLink(c = c,
f = "course",
label = label_create,
)
else:
course_help = None
#course_help = DIV(_class="tooltip",
# _title="%s|%s" % (T("Course"),
# AUTOCOMPLETE_HELP))
course_represent = S3Represent(lookup = tablename,
translate = True,
)
course_id = S3ReusableField("course_id", "reference %s" % tablename,
label = T("Course"),
ondelete = "RESTRICT",
represent = course_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = course_help,
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "course")
)
if settings.get_hrm_create_certificates_from_courses():
onaccept = self.hrm_course_onaccept
else:
onaccept = None
configure(tablename,
create_next = URL(f="course",
args = ["[id]", "course_certificate"],
),
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
onaccept = onaccept,
)
# Components
add_components(tablename,
# Certificates
hrm_course_certificate = "course_id",
# Job Titles
hrm_course_job_title = "course_id",
# Sectors
org_sector = {"link": "hrm_course_sector",
"joinby": "course_id",
"key": "sector_id",
"actuate": "hide",
},
# Format for filter_widget
hrm_course_sector = "course_id",
# Trainees
hrm_training = "course_id",
)
# ---------------------------------------------------------------------
# Event Types
# - Trainings, Workshops, Meetings
#
tablename = "hrm_event_type"
define_table(tablename,
Field("name", notnull=True,
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Event Type"),
title_display = T("Event Type Details"),
title_list = T("Event Types"),
title_update = T("Edit Event Type"),
label_list_button = T("List Event Types"),
label_delete_button = T("Delete Event Type"),
msg_record_created = T("Event Type added"),
msg_record_modified = T("Event Type updated"),
msg_record_deleted = T("Event Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
event_types = settings.get_hrm_event_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
event_type_id = S3ReusableField("event_type_id", "reference %s" % tablename,
label = T("Event Type"),
ondelete = "RESTRICT",
readable = event_types,
writable = event_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_event_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = "hrm",
f = "event_type",
label = label_create,
title = label_create,
tooltip = T("Add a new event type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# =========================================================================
# (Training) Events
# - can include Meetings, Workshops, etc
#
#site_label = settings.get_org_site_label()
site_label = T("Venue")
course_mandatory = settings.get_hrm_event_course_mandatory()
event_site = settings.get_hrm_event_site()
# Instructor settings
INSTRUCTOR = T("Instructor")
instructors = settings.get_hrm_training_instructors()
int_instructor = ext_instructor = False
int_instructor_tooltip = None
ext_instructor_label = INSTRUCTOR
ext_instructor_tooltip = None
if instructors in ("internal", "both"):
int_instructor = True
int_instructor_tooltip = DIV(_class = "tooltip",
_title = "%s|%s" % (INSTRUCTOR,
AUTOCOMPLETE_HELP,
),
)
if instructors == "both":
ext_instructor = True
ext_instructor_label = T("External Instructor")
ext_instructor_tooltip = DIV(_class = "tooltip",
_title = "%s|%s" % (T("External Instructor"),
T("Enter the name of the external instructor"),
),
)
elif instructors == "external":
ext_instructor = True
tablename = "hrm_training_event"
define_table(tablename,
# Instance
super_link("pe_id", "pr_pentity"),
event_type_id(),
Field("name",
label = T("Name"),
readable = event_types,
writable = event_types,
),
course_id(empty = not course_mandatory),
organisation_id(label = T("Organized By")),
location_id(widget = S3LocationSelector(), # show_address = False
readable = not event_site,
writable = not event_site,
),
# Component, not instance
super_link("site_id", "org_site",
label = site_label,
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = event_site,
writable = event_site,
empty = not event_site,
represent = self.org_site_represent,
),
s3_datetime("start_date",
label = T("Start Date"),
min = datetime.datetime(2000, 1, 1),
set_min = "#hrm_training_event_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
min = datetime.datetime(2000, 1, 1),
set_max = "#hrm_training_event_start_date",
),
# @ToDo: Auto-populate from course
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None),
),
),
person_id(label = INSTRUCTOR,
comment = int_instructor_tooltip,
readable = int_instructor,
writable = int_instructor,
),
Field("instructor",
label = ext_instructor_label,
comment = ext_instructor_tooltip,
represent = lambda s: s if s else NONE,
readable = ext_instructor,
writable = ext_instructor,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_TRAINING_EVENT = T("Create Training Event")
crud_strings[tablename] = Storage(
label_create = ADD_TRAINING_EVENT,
title_display = T("Training Event Details"),
title_list = T("Training Events"),
title_update = T("Edit Training Event"),
title_upload = T("Import Training Events"),
label_list_button = T("List Training Events"),
label_delete_button = T("Delete Training Event"),
msg_record_created = T("Training Event added"),
msg_record_modified = T("Training Event updated"),
msg_record_deleted = T("Training Event deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no training events registered"))
represent = hrm_TrainingEventRepresent()
training_event_id = S3ReusableField("training_event_id", "reference %s" % tablename,
label = T("Training Event"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training_event.id",
represent,
#filterby = "organisation_id",
#filter_opts = filter_opts,
)),
sortby = "course_id",
comment = S3PopupLink(c = c,
f = "training_event",
label = ADD_TRAINING_EVENT,
),
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "training_event")
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
if event_site:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"site_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("site_id$location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("site_id",
label = site_label,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
else:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"location_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
# Resource Configuration
configure(tablename,
create_next = URL(f="training_event",
args = ["[id]", "participant"],
),
deduplicate = S3Duplicate(primary = ("course_id",
"start_date",
),
secondary = ("site_id",),
),
filter_widgets = filter_widgets,
realm_entity = self.hrm_training_event_realm_entity,
super_entity = "pr_pentity",
)
# Components
add_components(tablename,
gis_location = {"link": "hrm_event_location",
"joinby": "training_event_id",
"key": "location_id",
"actuate": "hide",
},
pr_person = [# Instructors
{"name": "instructor",
#"joinby": "person_id",
"link": "hrm_training_event_instructor",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
# Participants
{"name": "participant",
"link": "hrm_training",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
],
hrm_event_tag = "training_event_id",
# This format is better for permissions on the link table
hrm_training = "training_event_id",
# Format for list_fields
hrm_training_event_instructor = "training_event_id",
hrm_training_event_report = {"joinby": "training_event_id",
"multiple": False,
},
#project_strategy = {"link": "project_strategy_event",
# "joinby": "training_event_id",
# "key": "strategy_id",
# "actuate": "hide",
# },
#project_programme = {"link": "project_programme_event",
# "joinby": "training_event_id",
# "key": "programme_id",
# "actuate": "hide",
# },
#project_project = {"link": "project_project_event",
# "joinby": "training_event_id",
# "key": "project_id",
# "actuate": "hide",
# },
dc_target = {"link": "dc_target_event",
"joinby": "training_event_id",
"key": "target_id",
"actuate": "replace",
},
)
# =====================================================================
# Training Event Locations
# - e.g. used for showing which Locations an Event is relevant for
#
tablename = "hrm_event_location"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
location_id(empty = False,
ondelete = "CASCADE",
widget = S3LocationSelector(#show_address = False,
),
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# Training Event Tags
tablename = "hrm_event_tag"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
#s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("training_event_id",
"tag",
),
),
)
# =====================================================================
# Training Event Report
# - this is currently configured for RMS
# (move custom labels there if need to make this more generic)
#
tablename = "hrm_training_event_report"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(),
self.hrm_job_title_id(label = T("Position"),
),
organisation_id(),
Field("purpose",
label = T("Training Purpose"),
),
Field("code",
label = T("Code"),
),
s3_date(label = T("Report Date")),
Field("objectives",
label = T("Objectives"),
widget = s3_comments_widget,
),
Field("methodology",
label = T("Methodology"),
widget = s3_comments_widget,
),
Field("actions",
label = T("Implemented Actions"),
widget = s3_comments_widget,
),
Field("participants",
label = T("About the participants"),
widget = s3_comments_widget,
),
Field("results",
label = T("Results and Lessons Learned"),
widget = s3_comments_widget,
),
Field("followup",
label = T("Follow-up Required"),
widget = s3_comments_widget,
),
Field("additional",
label = T("Additional relevant information"),
widget = s3_comments_widget,
),
s3_comments(label = T("General Comments")),
*s3_meta_fields())
configure(tablename,
super_entity = "doc_entity",
)
# =====================================================================
# Training Intructors
# - used if there can be multiple per-event
#
tablename = "hrm_training_event_instructor"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(comment = self.pr_person_comment(INSTRUCTOR,
AUTOCOMPLETE_HELP,
child = "person_id"),
empty = False,
label = INSTRUCTOR,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# (Training) Participations (Trainees)
#
# These are an element of credentials:
# - a minimum number of hours of training need to be done each year
#
# Users can add their own but these are confirmed only by specific roles
#
course_grade_opts = settings.get_hrm_course_grades()
# @ToDo: configuration setting once-required
role_opts = {1: T("Participant"),
2: T("Facilitator"),
3: T("Observer"),
}
# @ToDo: configuration setting once-required
status_opts = {1: T("Applied"),
2: T("Approved"),
3: T("Rejected"),
4: T("Invited"),
5: T("Accepted"),
6: T("Declined"),
}
tablename = "hrm_training"
define_table(tablename,
# @ToDo: Create a way to add new people to training as staff/volunteers
person_id(comment = self.pr_person_comment(
T("Participant"),
T("Type the first few characters of one of the Participant's names."),
child="person_id"),
empty = False,
ondelete = "CASCADE",
),
# Just used when created from participation in an Event
training_event_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
course_id(empty = not course_mandatory,
),
Field("role", "integer",
default = 1,
label = T("Role"),
represent = S3Represent(options = role_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(role_opts,
zero = None)),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_datetime(),
s3_datetime("end_date",
label = T("End Date"),
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("status", "integer",
default = 4, # Invited
label = T("Status"),
represent = S3Represent(options = status_opts),
requires = IS_EMPTY_OR(IS_IN_SET(status_opts)),
# Enable in templates as-required
readable = False,
writable = False,
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
Field("grade", "integer",
label = T("Grade"),
represent = S3Represent(options = course_grade_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(course_grade_opts,
zero = None)),
readable = False,
writable = False,
),
# Can store specific test result here & then auto-calculate the Pass/Fail
Field("grade_details", "float",
default = 0.0,
label = T("Grade Details"),
represent = float_represent,
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum = 0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
Field("qualitative_feedback",
label = T("Qualitative Feedback"),
widget = s3_comments_widget,
# Enable in templates as-required
readable = False,
writable = False,
),
Field("file", "upload",
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = self.hrm_training_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
# Enable (& label) in templates as-required
readable = False,
writable = False,
),
Field.Method("job_title", hrm_training_job_title),
Field.Method("organisation", hrm_training_organisation),
s3_comments(),
*s3_meta_fields())
# Suitable for use when adding a Training to a Person
# The ones when adding a Participant to an Event are done in the Controller
crud_strings[tablename] = Storage(
label_create = T("Add Training"),
title_display = T("Training Details"),
title_list = T("Trainings"),
title_update = T("Edit Training"),
title_report = T("Training Report"),
title_upload = T("Import Training Participants"),
label_list_button = T("List Trainings"),
label_delete_button = T("Delete Training"),
msg_record_created = T("Training added"),
msg_record_modified = T("Training updated"),
msg_record_deleted = T("Training deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"))
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$last_name",
"course_id$name",
"training_event_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees."),
_class = "filter-search",
),
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent = "%(name)s",
),
S3LocationFilter("person_id$location_id",
levels = levels,
),
S3OptionsFilter("course_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("training_event_id$site_id",
label = T("Training Facility"),
represent = self.org_site_represent,
),
S3OptionsFilter("grade",
),
S3DateFilter("date",
hide_time = True,
),
]
# NB training_event_controller overrides these for Participants
list_fields = ["course_id",
"person_id",
#(T("Job Title"), "job_title"),
(ORGANISATION, "organisation"),
"grade",
]
if course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
report_fields = [(T("Training Event"), "training_event_id"),
"person_id",
"course_id",
"grade",
(ORGANISATION, "organisation"),
(T("Facility"), "training_event_id$site_id"),
(T("Month"), "month"),
(T("Year"), "year"),
]
rappend = report_fields.append
for level in levels:
rappend("person_id$location_id$%s" % level)
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ["count", "list"],
defaults = Storage(
rows = "training.course_id",
cols = "training.month",
fact = "count(training.person_id)",
totals = True,
)
)
# Resource Configuration
configure(tablename,
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"course_id",
),
secondary = ("date",),
),
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = hrm_training_list_layout,
onaccept = hrm_training_onaccept,
ondelete = hrm_training_onaccept,
# Only used in Imports
#onvalidation = hrm_training_onvalidation,
orderby = "hrm_training.date desc",
report_options = report_options,
)
# Components
add_components(tablename,
hrm_certification = {"name": "certification_from_training", # Distinguish from that linked to the Person
"joinby": "training_id",
"multiple": False,
},
)
# =====================================================================
# Trainings
#
# A list:reference table to support Contains queries:
# - people who have attended both Course A & Course B
#
tablename = "hrm_trainings"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
Field("course_id", "list:reference hrm_course",
label = T("Courses Attended"),
ondelete = "SET NULL",
represent = S3Represent(lookup = "hrm_course",
multiple = True,
translate = True,
),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
sort = True,
multiple = True,
)),
widget = S3MultiSelectWidget(header = "",
selectedList = 3,
),
),
*s3_meta_fields())
# =====================================================================
# Certificates
#
# NB Some Orgs will only trust the certificates of some Orgs
# - we currently make no attempt to manage this trust chain
#
filter_certs = settings.get_hrm_filter_certificates()
if filter_certs:
label = ORGANISATION
else:
label = T("Certifying Organization")
tablename = "hrm_certificate"
define_table(tablename,
Field("name", notnull=True,
length=128, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
organisation_id(default = root_org if filter_certs else None,
label = label,
readable = is_admin or not filter_certs,
writable = is_admin or not filter_certs,
widget = widget,
),
Field("expiry", "integer",
label = T("Expiry (months)"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None)
),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Certificate"),
title_display = T("Certificate Details"),
title_list = T("Certificate Catalog"),
title_update = T("Edit Certificate"),
title_upload = T("Import Certificates"),
label_list_button = T("List Certificates"),
label_delete_button = T("Delete Certificate"),
msg_record_created = T("Certificate added"),
msg_record_modified = T("Certificate updated"),
msg_record_deleted = T("Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"))
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
certificate_id = S3ReusableField("certificate_id", "reference %s" % tablename,
label = T("Certificate"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_certificate.id",
represent,
filterby = "organisation_id" if filter_certs else None,
filter_opts = filter_opts
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "certificate",
label = label_create,
title = label_create,
tooltip = T("Add a new certificate to the catalog."),
),
)
if settings.get_hrm_use_skills():
create_next = URL(f="certificate",
args=["[id]", "certificate_skill"])
else:
create_next = None
configure(tablename,
create_next = create_next,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_certificate_skill = "certificate_id",
)
# =====================================================================
# Certifications
#
# Link table between Persons & Certificates
#
# These are an element of credentials
#
tablename = "hrm_certification"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
),
# @ToDo: Option to auto-generate (like Waybills: SiteCode-CourseCode-UniqueNumber)
Field("number",
label = T("License Number"),
),
#Field("status", label = T("Status")),
s3_date(label = T("Expiry Date")),
Field("image", "upload",
autodelete = True,
label = T("Scanned Copy"),
length = current.MAX_FILENAME_LENGTH,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(comment = None,
label = T("Confirming Organization"),
widget = widget,
writable = False,
),
# Optional: When certification comes from a training
Field("training_id", "reference hrm_training",
readable = False,
writable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training.id",
)),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["certificate_id",
"number",
"date",
#"comments",
],
onaccept = self.hrm_certification_onaccept,
ondelete = self.hrm_certification_onaccept,
)
crud_strings[tablename] = Storage(
label_create = T("Add Certification"),
title_display = T("Certification Details"),
title_list = T("Certifications"),
title_update = T("Edit Certification"),
label_list_button = T("List Certifications"),
label_delete_button = T("Delete Certification"),
msg_record_created = T("Certification added"),
msg_record_modified = T("Certification updated"),
msg_record_deleted = T("Certification deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"))
# =====================================================================
# Credentials
#
# This determines whether an Organisation believes a person is suitable
# to fulfil a role. It is determined based on a combination of
# experience, training & a performance rating (medical fitness to come).
# @ToDo: Workflow to make this easy for the person doing the credentialling
#
# http://www.dhs.gov/xlibrary/assets/st-credentialing-interoperability.pdf
#
# Component added in the hrm person() controller
#
# Used by Courses
# & 6-monthly rating (Portuguese Bombeiros)
hrm_pass_fail_opts = {8: T("Pass"),
9: T("Fail"),
}
# 12-monthly rating (Portuguese Bombeiros)
# - this is used to determine rank progression (need 4-5 for 5 years)
#hrm_five_rating_opts = {1: T("Poor"),
# 2: T("Fair"),
# 3: T("Good"),
# 4: T("Very Good"),
# 5: T("Excellent"),
# }
# Lookup to represent both sorts of ratings
hrm_performance_opts = {1: T("Poor"),
2: T("Fair"),
3: T("Good"),
4: T("Very Good"),
5: T("Excellent"),
8: T("Pass"),
9: T("Fail"),
}
tablename = "hrm_credential"
define_table(tablename,
person_id(ondelete = "CASCADE"),
job_title_id(),
organisation_id(label = T("Credentialling Organization"),
widget = widget,
),
Field("performance_rating", "integer",
label = T("Performance Rating"),
represent = S3Represent(options = hrm_performance_opts),
# Default to pass/fail (can override to 5-levels in Controller)
# @ToDo: Build this onaccept of hrm_appraisal
requires = IS_EMPTY_OR(IS_IN_SET(hrm_pass_fail_opts)),
),
s3_date("start_date",
default = "now",
label = T("Date Received"),
set_min = "#hrm_credential_end_date",
),
s3_date("end_date",
label = T("Expiry Date"),
set_max = "#hrm_credential_start_date",
start_field = "hrm_credential_start_date",
default_interval = 12,
default_explicit = True,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Credential"),
title_display = T("Credential Details"),
title_list = T("Credentials"),
title_update = T("Edit Credential"),
label_list_button = T("List Credentials"),
label_delete_button = T("Delete Credential"),
msg_record_created = T("Credential added"),
msg_record_modified = T("Credential updated"),
msg_record_deleted = T("Credential deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Credentials registered"))
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["job_title_id",
"start_date",
"end_date",
],
list_layout = hrm_credential_list_layout,
)
# =====================================================================
# Skill Equivalence
#
# Link table between Certificates & Skills
#
# Used to auto-populate the relevant skills
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_certificate_skill"
define_table(tablename,
certificate_id(empty = False,
ondelete = "CASCADE",
),
skill_id(empty = False,
ondelete = "CASCADE",
),
competency_id(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill Equivalence"),
title_display = T("Skill Equivalence Details"),
title_list = T("Skill Equivalences"),
title_update = T("Edit Skill Equivalence"),
label_list_button = T("List Skill Equivalences"),
label_delete_button = T("Delete Skill Equivalence"),
msg_record_created = T("Skill Equivalence added"),
msg_record_modified = T("Skill Equivalence updated"),
msg_record_deleted = T("Skill Equivalence deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Skill Equivalences registered"))
# =====================================================================
# Course Certificates
#
# Link table between Courses & Certificates
#
# Used to auto-populate the relevant certificates
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_course_certificate"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Certificate for Course"),
title_display = T("Course Certificate Details"),
title_list = T("Course Certificates"),
title_update = T("Edit Course Certificate"),
label_list_button = T("List Course Certificates"),
label_delete_button = T("Delete Course Certificate"),
msg_record_created = T("Course Certificate added"),
msg_record_modified = T("Course Certificate updated"),
msg_record_deleted = T("Course Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Course Certificates registered"))
# =====================================================================
# Course <> Job Titles link table
#
# Show which courses a person has done that are relevant to specific job roles
#
tablename = "hrm_course_job_title"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# =====================================================================
# Course <> Sectors link table
#
# Show which courses a person has done that are relevant to specific sectors
#
tablename = "hrm_course_sector"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
self.org_sector_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {#"hrm_competency_id": competency_id,
"hrm_course_id": course_id,
"hrm_skill_id": skill_id,
"hrm_multi_skill_id": multi_skill_id,
"hrm_multi_skill_represent": multi_skill_represent,
"hrm_training_event_id": training_event_id,
"hrm_certification_onaccept": self.hrm_certification_onaccept,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField.dummy
return {#"hrm_competency_id": dummy("competency_id"),
"hrm_course_id": dummy("course_id"),
"hrm_skill_id": dummy("skill_id"),
"hrm_multi_skill_id": dummy("skill_id", "list:reference"),
}
# -------------------------------------------------------------------------
@staticmethod
def skill_type_default():
""" Lookup the default skill_type """
if current.deployment_settings.get_hrm_skill_types():
# We have many - don't set a default
default = None
else:
# We don't use skill_types so find the default
db = current.db
table = db.hrm_skill_type
skill_type = db(table.deleted == False).select(table.id,
limitby = (0, 1),
).first()
try:
default = skill_type.id
except AttributeError:
# Create a default skill_type
default = table.insert(name = "Default")
return default
# -------------------------------------------------------------------------
@staticmethod
def competency_rating_comment():
""" Define the comment for the HRM Competency Rating widget """
T = current.T
s3 = current.response.s3
if current.request.controller == "vol":
controller = "vol"
else:
controller = "hrm"
if current.auth.s3_has_role(current.session.s3.system_roles.ADMIN):
label_create = s3.crud_strings["hrm_competency_rating"].label_create
comment = S3PopupLink(c = controller,
f = "competency_rating",
vars = {"child":"competency_id"},
label = label_create,
tooltip = T("Add a new competency rating to the catalog."),
)
else:
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Competency Rating"),
T("Level of competency this person has with this skill."),
),
)
if current.deployment_settings.get_hrm_skill_types():
script = \
'''$.filterOptionsS3({
'trigger':'skill_id',
'target':'competency_id',
'lookupResource':'competency',
'lookupURL':S3.Ap.concat('/%s/skill_competencies/'),
'msgNoRecords':'%s'
})''' % (controller, T("No Ratings for Skill Type"))
comment = TAG[""](comment,
S3ScriptItem(script = script))
return comment
# -------------------------------------------------------------------------
@staticmethod
def hrm_course_onaccept(form):
"""
Ensure that there is a Certificate created for each Course
- only called when create_certificates_from_courses in (True, "organisation_id")
"""
form_vars = form.vars
course_id = form_vars.id
db = current.db
s3db = current.s3db
ltable = s3db.hrm_course_certificate
exists = db(ltable.course_id == course_id).select(ltable.id,
limitby = (0, 1),
)
if not exists:
name = form_vars.get("name")
organisation_id = form_vars.get("organisation_id")
if not name or not organisation_id:
table = s3db.hrm_course
course = db(table.id == course_id).select(table.name,
table.organisation_id,
limitby = (0, 1),
).first()
name = course.name
organisation_id = course.organisation_id
ctable = s3db.hrm_certificate
certificate = db(ctable.name == name).select(ctable.id,
limitby = (0, 1),
).first()
if certificate:
certificate_id = certificate.id
else:
if current.deployment_settings.get_hrm_create_certificates_from_courses() is True:
# Don't limit to Org
organisation_id = None
certificate_id = ctable.insert(name = name,
organisation_id = organisation_id,
)
ltable.insert(course_id = course_id,
certificate_id = certificate_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_certification_onaccept(form):
"""
Ensure that Skills are Populated from Certifications
- called both onaccept & ondelete
"""
# Deletion and update have a different format
delete = False
try:
record_id = form.vars.id
except AttributeError:
# Delete
record_id = form.id
delete = True
# Read the full record
db = current.db
table = db.hrm_certification
record = db(table.id == record_id).select(table.person_id,
table.training_id,
table.number,
limitby = (0, 1),
).first()
if delete:
person_id = form.person_id
training_id = form.training_id
else:
person_id = record.person_id
training_id = record.training_id
if not person_id:
# This record is being created as a direct component of the Training,
# in order to set the Number (RMS usecase).
# Find the other record (created onaccept of training)
query = (table.training_id == training_id) & \
(table.id != record_id)
original = db(query).select(table.id,
limitby = (0, 1),
).first()
if original:
# Update it with the number
number = record.number
original.update_record(number = number)
# Delete this extraneous record
db(table.id == record_id).delete()
# Don't update any competencies
return
ctable = db.hrm_competency
cstable = db.hrm_certificate_skill
# Drop all existing competencies which came from certification
# - this is a lot easier than selective deletion
# @ToDo: Avoid this method as it will break Inline Component Updates
# if we ever use those (see hrm_training_onaccept)
query = (ctable.person_id == person_id) & \
(ctable.from_certification == True)
db(query).delete()
# Figure out which competencies we're _supposed_ to have.
# FIXME unlimited select
query = (table.person_id == person_id) & \
(table.certificate_id == cstable.certificate_id) & \
(cstable.skill_id == db.hrm_skill.id)
certifications = db(query).select()
# Add these competencies back in.
# FIXME unlimited select inside loop
# FIXME multiple implicit db queries inside nested loop
# FIXME db.delete inside nested loop
# FIXME unnecessary select (sub-select in Python loop)
for certification in certifications:
skill = certification["hrm_skill"]
cert = certification["hrm_certificate_skill"]
query = (ctable.person_id == person_id) & \
(ctable.skill_id == skill.id)
existing = db(query).select()
better = True
for e in existing:
if e.competency_id.priority > cert.competency_id.priority:
db(ctable.id == e.id).delete()
else:
better = False
break
if better:
ctable.update_or_insert(person_id = person_id,
competency_id = cert.competency_id,
skill_id = skill.id,
comments = "Added by certification",
from_certification = True,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_competency_rating_duplicate(item):
"""
This callback will be called when importing records
it will look to see if the record being imported is a duplicate.
@param item: An S3ImportItem object which includes all the details
of the record being imported
If the record is a duplicate then it will set the item method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case and skill_type
"""
name = item.data.get("name")
skill = False
for citem in item.components:
if citem.tablename == "hrm_skill_type":
cdata = citem.data
if "name" in cdata:
skill = cdata.name
if skill == False:
return
table = item.table
stable = current.s3db.hrm_skill_type
query = (table.name.lower() == s3_unicode(name).lower()) & \
(table.skill_type_id == stable.id) & \
(stable.value.lower() == s3_unicode(skill).lower())
duplicate = current.db(query).select(table.id,
limitby = (0, 1),
).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_file_represent(value):
""" File representation """
if value:
try:
# Read the filename from the field value
filename = current.db.hrm_training.file.retrieve(value)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href = URL(c="default", f="download",
args = [value],
))
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_event_realm_entity(table, record):
"""
Set the training_event realm entity
- to the root Org of the Site
"""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
if current.deployment_settings.get_org_branches():
site = db(query).select(stable.organisation_id,
limitby = (0, 1),
).first()
if site:
org_id = site.organisation_id
root_org = current.cache.ram(
# Common key for all users of this org & vol_service_record()
"root_org_%s" % org_id,
lambda: current.s3db.org_root_organisation(org_id),
time_expire = 120
)
otable = db.org_organisation
org = db(otable.id == root_org).select(otable.realm_entity,
limitby = (0, 1),
).first()
if org:
return org.realm_entity
else:
otable = db.org_organisation
query &= (stable.organisation_id == otable.id)
org = db(query).select(otable.realm_entity,
limitby = (0, 1),
).first()
if org:
return org.realm_entity
return None
# =============================================================================
def hrm_training_onvalidation(form):
"""
If the Training is created from a Training Event (e.g. during Import),
then auto-populate the fields from that
"""
form_vars = form.vars
training_event_id = form_vars.get("training_event_id", None)
if not training_event_id:
# Nothing to do
return
db = current.db
table = db.hrm_training_event
record = db(table.id == training_event_id).select(table.course_id,
table.start_date,
table.end_date,
table.hours,
cache = current.s3db.cache,
limitby = (0, 1),
).first()
try:
form_vars.course_id = record.course_id
form_vars.date = record.start_date
form_vars.end_date = record.end_date
form_vars.hours = record.hours
except AttributeError:
# Record not found
return
# =============================================================================
def hrm_training_onaccept(form):
"""
Ensure that Certifications, Hours & list:Trainings are Populated from Trainings
Provide a Pass/Fail rating based on the Course's Pass Mark
- called both onaccept & ondelete
"""
# Deletion and update have a different format
delete = False
try:
training_id = form.vars.id
except AttributeError:
training_id = form.id
delete = True
# Get the full record
db = current.db
table = db.hrm_training
record = db(table.id == training_id).select(table.id,
table.person_id,
table.course_id,
table.date,
table.hours,
table.grade,
table.grade_details,
limitby = (0, 1),
).first()
if delete:
course_id = form.course_id
person_id = form.person_id
else:
course_id = record.course_id
person_id = record.person_id
s3db = current.s3db
course_table = db.hrm_course
settings = current.deployment_settings
if course_id:
course_pass_marks = settings.get_hrm_course_pass_marks()
if course_pass_marks and not record.grade and record.grade_details:
# Provide a Pass/Fail rating based on the Course's Pass Mark
course = db(course_table.id == course_id).select(course_table.pass_mark,
limitby = (0, 1),
).first()
if course:
if record.grade_details >= course.pass_mark:
# Pass
record.update_record(grade = 8)
else:
# Fail
record.update_record(grade = 9)
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Check if this person is a volunteer
hrtable = db.hrm_human_resource
query = (hrtable.person_id == person_id) & \
(hrtable.deleted == False)
vol = db(query).select(hrtable.type,
limitby = (0, 1),
).first()
if vol and vol.type == 2:
# Update Hours
ptable = s3db.hrm_programme_hours
query = (ptable.training_id == training_id)
if delete:
resource = s3db.resource("hrm_programme_hours", filter=query)
# Automatically propagates to Active Status
resource.delete()
else:
date = record.date
hours = record.hours
# Update or Insert?
exists = db(query).select(ptable.id,
ptable.date,
ptable.hours,
limitby = (0, 1),
).first()
if exists:
if date != exists.date or \
hours != exists.hours:
db(query).update(date = date,
hours = hours,
)
ph_id = exists.id
else:
# Nothing to propagate
ph_id = None
else:
ph_id = ptable.insert(training_id = training_id,
person_id = person_id,
date = date,
hours = hours,
training = True,
)
if ph_id:
# Propagate to Active Status
form = Storage()
form.vars = Storage()
form.vars.id = ph_id
hrm_programme_hours_onaccept(form)
# Update Trainings list:reference for Contains filter
ltable = db.hrm_trainings
query = (table.person_id == person_id) & \
(table.deleted == False)
courses = db(query).select(table.course_id,
distinct = True,
)
courses = [c.course_id for c in courses if c.course_id is not None]
exists = db(ltable.person_id == person_id).select(ltable.id,
limitby = (0, 1),
).first()
if exists:
exists.update_record(course_id = courses)
else:
ltable.insert(person_id = person_id,
course_id = courses,
)
# Update Certifications
ctable = db.hrm_certification
ltable = db.hrm_course_certificate
# Old: Breaks Inline Component Updates since record_id changes
# Drop all existing certifications which came from trainings
# - this is a lot easier than selective deletion.
if delete:
# Remove certifications if provided by this training and no other
# training led to it
query = (ctable.training_id == training_id) & \
(ctable.deleted == False)
certifications = db(query).select(ctable.id,
ctable.certificate_id)
for certification in certifications:
query = (ltable.certificate_id == certification.certificate_id) & \
(ltable.deleted == False) & \
(ltable.course_id == table.course_id) & \
(table.deleted == False)
trainings = db(query).select(table.id,
table.date,
limitby = (0, 1),
orderby = "date desc",
)
if trainings:
# Update the training_id
certification.update_record(training_id = trainings.first().id)
else:
# Remove the certification
query = (ctable.id == certification.id)
resource = s3db.resource("hrm_certification", filter=query)
# Automatically propagates to Skills
resource.delete()
else:
if course_id:
# Which certificates does this course give?
query = (ltable.course_id == course_id) & \
(ltable.deleted == False)
certificates = db(query).select(ltable.certificate_id)
# Lookup user_id to allow the user to see their certifications
ptable = db.pr_person
putable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(putable.pe_id == ptable.pe_id)
user = db(query).select(putable.user_id,
limitby = (0, 1),
).first()
if user:
user_id = user.user_id
else:
# Record has no special ownership
user_id = None
# Add any missing certifications
hrm_certification_onaccept = s3db.hrm_certification_onaccept
for certificate in certificates:
certification_id = ctable.update_or_insert(person_id = person_id,
certificate_id = certificate.certificate_id,
training_id = training_id,
comments = "Added by training",
owned_by_user = user_id,
)
# Propagate to Skills
form = Storage()
form.vars = Storage()
form.vars.id = certification_id
hrm_certification_onaccept(form)
# =============================================================================
class HRAppraisalModel(S3Model):
"""
Appraisal for an HR
- can be for a specific Mission or routine annual appraisal
"""
names = ("hrm_appraisal",
"hrm_appraisal_document",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
person_id = self.pr_person_id
if current.deployment_settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile = True)
else:
org_widget = None
# =====================================================================
# Appraisal
#
tablename = "hrm_appraisal"
define_table(tablename,
person_id(),
# For Mission or Event
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_job_title_id(),
s3_date(),
Field("rating", "float",
label = T("Rating"),
# @ToDo: make this configurable
# 1 to 4
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, 5)
),
widget = S3SliderWidget(step = 0.1,
type = "float",
),
),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Appraisal"),
title_display = T("Appraisal Details"),
title_list = T("Appraisals"),
title_update = T("Edit Appraisal"),
label_list_button = T("List of Appraisals"),
label_delete_button = T("Delete Appraisal"),
msg_record_created = T("Appraisal added"),
msg_record_modified = T("Appraisal updated"),
msg_record_deleted = T("Appraisal deleted"),
msg_no_match = T("No Appraisals found"),
msg_list_empty = T("Currently no Appraisals entered"))
crud_form = S3SQLCustomForm("organisation_id",
"job_title_id",
"date",
"rating",
"supervisor_id",
S3SQLInlineComponent("document",
label = T("Files"),
link = False,
fields = ["file"],
),
"comments",
)
configure(tablename,
context = {"person": "person_id",
#"organisation": "organisation_id",
},
create_onaccept = self.hrm_appraisal_create_onaccept,
crud_form = crud_form,
list_fields = [# Normally accessed via component
#"person_id",
"date",
"organisation_id",
"job_title_id",
"supervisor_id",
"comments",
"document.file",
],
#list_layout = hrm_render_appraisal,
orderby = "hrm_appraisal.date desc",
)
# Components
self.add_components(tablename,
# Appraisal Documents
doc_document={"link": "hrm_appraisal_document",
"joinby": "appraisal_id",
"key": "document_id",
"autodelete": False,
},
)
# =====================================================================
# Appraisal Documents
#
tablename = "hrm_appraisal_document"
define_table(tablename,
Field("appraisal_id", "reference hrm_appraisal"),
self.doc_document_id(empty = False),
*s3_meta_fields())
configure(tablename,
onaccept = self.hrm_appraisal_document_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_create_onaccept(form):
"""
Link Appraisal to Assignment
"""
mission_id = current.request.get_vars.get("mission_id", None)
if not mission_id:
return
record_id = form.vars.id
db = current.db
s3db = current.s3db
atable = s3db.deploy_assignment
hatable = db.hrm_appraisal
hrtable = db.hrm_human_resource
query = (hatable.id == record_id) & \
(hrtable.person_id == hatable.person_id) & \
(atable.human_resource_id == hrtable.id) & \
(atable.mission_id == mission_id)
assignment = db(query).select(atable.id,
limitby = (0, 1),
).first()
if not assignment:
return
db.deploy_assignment_appraisal.insert(assignment_id = assignment.id,
appraisal_id = record_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_document_onaccept(form):
"""
Set the doc_id to that of the HRM, so that it also appears there
"""
db = current.db
s3db = current.s3db
atable = db.hrm_appraisal
ltable = db.hrm_appraisal_document
htable = s3db.hrm_human_resource
query = (ltable.id == form.vars.id) & \
(ltable.appraisal_id == atable.id) & \
(atable.person_id == htable.person_id) & \
(htable.deleted != False)
row = db(query).select(htable.doc_id,
ltable.document_id,
limitby = (0, 1),
).first()
if row:
document_id = row["hrm_appraisal_document.document_id"]
doc_id = row["hrm_human_resource.doc_id"]
db(db.doc_document.id == document_id).update(doc_id = doc_id)
# =============================================================================
class HRExperienceModel(S3Model):
"""
Record a person's work experience
"""
names = ("hrm_experience",)
def model(self):
T = current.T
person_id = self.pr_person_id
settings = current.deployment_settings
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile = True)
else:
org_widget = None
site_label = settings.get_org_site_label()
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class = "tooltip",
_title = "%s|%s" % (site_label,
current.messages.AUTOCOMPLETE_HELP,
),
)
else:
site_widget = None
site_comment = None
# =====================================================================
# Professional Experience (Mission Record)
#
# These are an element of credentials:
# - a minimum number of hours of active duty need to be done
# (e.g. every 6 months for Portuguese Bombeiros)
#
# This should be auto-populated out of Events
# - as well as being updateable manually for off-system Events
#
hr_type = self.hrm_human_resource.type
activity_types = settings.get_hrm_activity_types()
if not isinstance(activity_types, dict):
activity_type_requires = None
activity_type_represent = None
use_activity_types = False
else:
activity_type_opts = {} #{"other": T("Other")}
for k, v in activity_types.items():
activity_type_opts[k] = T(v)
activity_type_requires = IS_EMPTY_OR(IS_IN_SET(activity_type_opts))
activity_type_represent = S3Represent(options = activity_type_opts)
use_activity_types = True
tablename = "hrm_experience"
self.define_table(tablename,
person_id(ondelete = "CASCADE",
),
# Employment type (staff or volunteer)
Field("employment_type", "integer",
default = hr_type.default,
represent = hr_type.represent,
requires = hr_type.requires,
),
# Activity type (e.g. "RDRT Mission")
Field("activity_type",
represent = activity_type_represent,
requires = activity_type_requires,
# Expose only when there are options defined
readable = use_activity_types,
writable = use_activity_types,
),
# For Events
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_department_id(readable = False,
writable = False,
),
# Alternate free-text form especially suitable for volunteers
Field("organisation",
label = T("Organization"),
readable = False,
writable = False,
),
# Component, not instance
self.super_link("site_id", "org_site",
comment = site_comment,
label = site_label,
orderby = "org_site.name",
#readable = True,
represent = self.org_site_represent,
widget = site_widget,
#writable = True,
),
self.hrm_job_title_id(),
# Alternate free-text form especially suitable for volunteers
Field("job_title",
label = T("Position"),
readable = False,
writable = False,
),
Field("responsibilities",
label = T("Key Responsibilities"),
),
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_experience_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_experience_start_date",
start_field = "hrm_experience_start_date",
default_interval = 12,
),
Field("hours", "float",
label = T("Hours"),
),
#Field("place",
# label = T("Place"),
# ),
self.gis_location_id(),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Professional Experience"),
title_display = T("Professional Experience Details"),
title_list = T("Professional Experience"),
title_update = T("Edit Professional Experience"),
label_list_button = T("List of Professional Experience"),
label_delete_button = T("Delete Professional Experience"),
msg_record_created = T("Professional Experience added"),
msg_record_modified = T("Professional Experience updated"),
msg_record_deleted = T("Professional Experience deleted"),
msg_no_match = T("No Professional Experience found"),
msg_list_empty = T("Currently no Professional Experience entered"))
self.configure(tablename,
context = {"person": "person_id",
"organisation": "organisation_id",
},
list_fields = [# Normally accessed via component
#"person_id",
"start_date",
"end_date",
"organisation_id",
"employment_type",
"job_title_id",
"location_id",
"comments",
],
list_layout = hrm_experience_list_layout,
orderby = "hrm_experience.start_date desc",
)
# Components
self.add_components(tablename,
# Assignments
deploy_assignment = {"name": "assignment",
"link": "deploy_assignment_experience",
"joinby": "experience_id",
"key": "assignment_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class HRAwardModel(S3Model):
""" Data model for staff awards """
names = ("hrm_award_type",
"hrm_award",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
# =====================================================================
# Award types
#
tablename = "hrm_award_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Award Type"),
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
ADD_AWARD_TYPE = T("Create Award Type")
award_type_represent = hrm_OrgSpecificTypeRepresent(lookup = tablename)
# =====================================================================
# Awards
#
tablename = "hrm_award"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("awarding_body",
label = T("Awarding Body"),
),
Field("award_type_id", "reference hrm_award_type",
label = T("Award Type"),
represent = award_type_represent,
requires = IS_ONE_OF(db, "hrm_award_type.id",
award_type_represent,
),
comment = S3PopupLink(f = "award_type",
label = ADD_AWARD_TYPE,
),
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Award"),
title_display = T("Award Details"),
title_list = T("Awards"),
title_update = T("Edit Award"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no awards registered"))
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class HRDisciplinaryActionModel(S3Model):
""" Data model for staff disciplinary record """
names = ("hrm_disciplinary_type",
"hrm_disciplinary_action",
)
def model(self):
T = current.T
define_table = self.define_table
# =====================================================================
# Types of disciplinary action
#
tablename = "hrm_disciplinary_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Disciplinary Action Type"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
disciplinary_type_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Disciplinary record
tablename = "hrm_disciplinary_action"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("disciplinary_body"),
Field("disciplinary_type_id", "reference hrm_disciplinary_type",
label = T("Disciplinary Action Type"),
represent = disciplinary_type_represent,
requires = IS_ONE_OF(current.db,
"hrm_disciplinary_type.id",
disciplinary_type_represent,
),
comment = S3PopupLink(f = "disciplinary_type",
label = T("Add Disciplinary Action Type"),
),
),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class HRTagModel(S3Model):
""" Arbitrary Key:Value Tags for Human Resources """
names = ("hrm_human_resource_tag",
)
def model(self):
T = current.T
# =====================================================================
# Human Resource Tags
#
tablename = "hrm_human_resource_tag"
self.define_table(tablename,
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",
"tag",
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class HRProgrammeModel(S3Model):
"""
Programmes
- record Volunteer Hours
- categorise (Training) Events
These are separate to the Project module's Programmes
- @ToDo: setting to make them the same?
"""
names = ("hrm_programme",
"hrm_programme_hours",
"hrm_programme_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
root_org = auth.root_org()
# =====================================================================
# Progammes
#
tablename = "hrm_programme"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
represent = T,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("name_long",
label = T("Long Name"),
),
# Only included in order to be able to set
# realm_entity to filter appropriately
self.org_organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Program"),
title_display = T("Program Details"),
title_list = T("Programs"),
title_update = T("Edit Program"),
label_list_button = T("List Programs"),
label_delete_button = T("Delete Program"),
msg_record_created = T("Program added"),
msg_record_modified = T("Program updated"),
msg_record_deleted = T("Program deleted"),
msg_list_empty = T("Currently no programs registered"),
)
label_create = crud_strings[tablename].label_create
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
represent = S3Represent(lookup = tablename,
translate = True,
)
programme_id = S3ReusableField("programme_id", "reference %s" % tablename,
label = T("Program"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_programme.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = S3PopupLink(f = "programme",
label = label_create,
title = label_create,
tooltip = T("Add a new program to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
# Components
self.add_components(tablename,
hrm_programme_hours = {"name": "person",
"joinby": "programme_id",
},
# Uncomment if-required for reporting
#hrm_training_event = {"link": "hrm_event_programme",
# "joinby": "programme_id",
# "key": "training_event_id",
# "actuate": "hide",
# },
)
# =====================================================================
# Programmes <> Persons Link Table
#
vol_roles = current.deployment_settings.get_hrm_vol_roles()
tablename = "hrm_programme_hours"
define_table(tablename,
self.pr_person_id(ondelete = "CASCADE",
represent = self.pr_PersonRepresent(show_link = True)
),
programme_id(),
self.hrm_job_title_id(readable = vol_roles,
writable = vol_roles,
),
Field("contract",
label = T("Contract Number"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("event",
label = T("Event Name"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("place",
label = T("Place"),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_date(default = "now",
future = 0,
),
s3_date("end_date",
label = T("End Date"),
),
Field("hours", "float",
label = T("Hours"),
),
# Training records are auto-populated
Field("training", "boolean",
default = False,
label = T("Type"),
represent = lambda opt: \
T("Training") if opt else T("Work"),
writable = False,
),
Field("training_id", self.hrm_training,
label = T("Course"),
represent = hrm_TrainingRepresent(),
writable = False,
),
Field.Method("month", hrm_programme_hours_month),
s3_comments(comment = None),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Hours"),
title_display = T("Hours Details"),
title_list = T("Hours"),
title_update = T("Edit Hours"),
title_upload = T("Import Hours"),
label_list_button = T("List Hours"),
label_delete_button = T("Delete Hours"),
msg_record_created = T("Hours added"),
msg_record_modified = T("Hours updated"),
msg_record_deleted = T("Hours deleted"),
msg_list_empty = T("Currently no hours recorded for this volunteer"),
)
filter_widgets = [
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("programme_id",
# Doesn't support translation
#represent = "%(name)s",
),
S3OptionsFilter("job_title_id",
#label = T("Volunteer Role"),
# Doesn't support translation
#represent = "%(name)s",
),
S3DateFilter("date",
hide_time = True,
),
]
report_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
(T("Month"), "month"),
"hours",
"person_id$gender",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(rows = "programme_id",
cols = "month",
fact = "sum(hours)",
totals = True,
)
)
configure(tablename,
context = {"person": "person_id",
},
extra_fields = ["date"],
filter_widgets = filter_widgets,
list_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
"date",
"hours",
],
onaccept = hrm_programme_hours_onaccept,
ondelete = hrm_programme_hours_onaccept,
orderby = "hrm_programme_hours.date desc",
report_options = report_options,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_programme_id": programme_id,
}
# =============================================================================
class HRShiftModel(S3Model):
"""
Shifts
"""
names = ("hrm_shift_template",
"hrm_shift",
"hrm_shift_id",
"hrm_human_resource_shift",
)
def model(self):
T = current.T
#configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
job_title_id = self.hrm_job_title_id
skill_id = self.hrm_skill_id
db = current.db
DAYS_OF_WEEK = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday"),
}
# ---------------------------------------------------------------------
# Shift Templates
#
tablename = "hrm_shift_template"
define_table(tablename,
job_title_id(),
skill_id(),
Field("day_of_week", "integer",
represent = S3Represent(options = DAYS_OF_WEEK),
requires = IS_IN_SET(DAYS_OF_WEEK),
),
s3_time("start_time",
empty = False,
label = T("Start Time"),
# Could be the next day
#set_min = "#hrm_shift_template_end_time",
),
s3_time("end_time",
empty = False,
label = T("End Time"),
# Could be the next day
#set_max = "#hrm_shift_template_start_time",
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts
#
tablename = "hrm_shift"
define_table(tablename,
job_title_id(),
skill_id(),
s3_datetime("start_date",
label = T("Start Date"),
set_min = "#hrm_shift_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
set_max = "#hrm_shift_start_date",
),
s3_comments(),
*s3_meta_fields())
represent = S3Represent(lookup = tablename,
fields = ["start_date", "end_date"])
shift_id = S3ReusableField("shift_id", "reference %s" % tablename,
label = T("Shift"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_shift.id",
represent,
)),
comment = S3PopupLink(c = "hrm",
f = "shift",
label = T("Create Shift"),
),
)
self.add_components(tablename,
hrm_human_resource_shift = {"joinby": "shift_id",
"multiple": False,
}
)
crud_form = S3SQLCustomForm("job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
)
list_fields = ["job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
]
self.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# Custom Method to Assign HRs
STAFF = current.deployment_settings.get_hrm_staff_label()
filter_widgets = [S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
),
#if settings.get_hrm_use_skills():
S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
),
S3OptionsFilter("job_title_id",
),
S3OptionsFilter("type",
label = T("Type"),
options = {1: STAFF,
2: T("Volunteer"),
},
cols = 2,
hidden = True,
),
]
#if settings.get_hrm_multiple_orgs():
# if settings.get_org_branches():
# append_filter(S3HierarchyFilter("organisation_id",
# leafonly = False,
# ))
# else:
# append_filter(S3OptionsFilter("organisation_id",
# search = True,
# header = "",
# #hidden = True,
# ))
list_fields = ["person_id",
"job_title_id",
"start_date",
(T("Skills"), "person_id$competency.skill_id"),
]
set_method("hrm", "shift",
method = "assign",
action = self.hrm_AssignMethod(component = "human_resource_shift",
next_tab = "facility",
filter_widgets = filter_widgets,
list_fields = list_fields,
rheader = hrm_rheader,
))
def facility_redirect(r, **attr):
"""
Redirect to the Facility's Shifts tab
"""
s3db = current.s3db
# Find the Facility
ltable = s3db.org_site_shift
ftable = s3db.org_facility
query = (ltable.shift_id == r.id) & \
(ltable.site_id == ftable.site_id)
facility = current.db(query).select(ftable.id,
limitby = (0, 1),
).first()
redirect(URL(c = "org",
f = "facility",
args = [facility.id, "shift"],
))
set_method("hrm", "shift",
method = "facility",
action = facility_redirect)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts <> Human Resources
#
# @ToDo: Replace with hrm_shift_person as it's the Person who should be
# busy, not just the HR
#
tablename = "hrm_human_resource_shift"
define_table(tablename,
shift_id(),
self.hrm_human_resource_id(writable = False),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_shift_id": shift_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
return {"hrm_shift_id": S3ReusableField.dummy("shift_id"),
}
# =============================================================================
class HRDelegationModel(S3Model):
"""
Model to manage delegations of staff/volunteers to other
organisations.
"""
names = ("hrm_delegation",
"hrm_delegation_status_opts",
"hrm_delegation_message",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
define_table = self.define_table
crud_strings = s3.crud_strings
# ---------------------------------------------------------------------
# Delegation Statuses
#
workflow = current.deployment_settings.get_hrm_delegation_workflow()
if isinstance(workflow, (tuple, list)) and len(workflow):
# Custom workflow
delegation_status = workflow
else:
if workflow == "Invitation":
# Invitation workflow:
# Other organisation invites the delegate, who then accepts
delegation_status = (("INVT", T("Invited")),
("ACPT", T("Accepted")),
("RJCT", T("Rejected")),
)
elif workflow == "Application":
# Application workflow:
# Person applies for the delegation, which is then accepted
delegation_status = (("APPL", T("Applied")),
("ACPT", T("Accepted")),
("RJCT", T("Rejected")),
)
else:
# Request workflow:
# Other organisation requests the delegate, which is then
# approved by the managing organisation
delegation_status = (("REQ", T("Requested")),
("APPR", T("Approved")),
("DECL", T("Declined")),
)
# Final statuses
delegation_status += (("CANC", T("Cancelled")),
("IMPL", T("Implemented")),
("NVLD", T("Invalid")),
)
# ---------------------------------------------------------------------
# Delegation
#
tablename = "hrm_delegation"
define_table(tablename,
self.org_organisation_id(
empty = False,
comment = DIV(_class = "tooltip",
# TODO tooltip depends on workflow
_title = "%s|%s" % (T("Requesting Organisation"),
T("The organisation requesting the delegation"),
),
),
),
self.super_link("site_id", "org_site",
orderby = "org_site.name",
represent = self.org_site_represent,
),
self.pr_person_id(
empty = False,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Person"),
T("The person to be delegated"),
),
),
),
s3_date(label = T("Start Date"),
set_min = "#hrm_delegation_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_delegation_date",
),
s3_datetime("requested_on",
label = T("Requested on"),
default = "now",
writable = False,
),
Field("status",
default = delegation_status[0],
requires = IS_IN_SET(delegation_status,
zero = None,
sort = False,
),
represent = S3Represent(options = dict(delegation_status)),
),
# Enable in template if/as required:
Field("hours_per_week", "integer",
label = T("Hours per week"),
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Components
self.add_components(tablename,
hrm_delegation_message = "delegation_id",
hrm_delegation_note = "delegation_id",
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Delegation"),
title_display = T("Delegation Details"),
title_list = T("Delegations"),
title_update = T("Edit Delegation"),
label_list_button = T("List Delegations"),
label_delete_button = T("Delete Delegation"),
msg_record_created = T("Delegation created"),
msg_record_modified = T("Delegation updated"),
msg_record_deleted = T("Delegation deleted"),
msg_list_empty = T("No Delegations currently registered"),
)
# ---------------------------------------------------------------------
# Messages exchanged in connection with a delegation
#
message_status = {"SENT": T("Sent"),
"FAILED": T("Failed"),
}
tablename = "hrm_delegation_message"
define_table(tablename,
Field("delegation_id", "reference hrm_delegation",
ondelete = "CASCADE",
readable = False,
writable = False,
),
s3_date(default="now"),
Field("recipient",
label = T("Recipient"),
),
Field("subject",
label = T("Subject"),
),
Field("message", "text",
label = T("Message"),
represent = s3_text_represent,
),
Field("status",
default = "SENT",
label = T("Status"),
requires = IS_IN_SET(message_status,
zero = None,
),
represent = S3Represent(options = message_status),
writable = False,
),
s3_comments(),
*s3_meta_fields())
# List fields
list_fields = ["date",
"recipient",
"subject",
"message",
"status",
]
# Table configuration
self.configure(tablename,
list_fields = list_fields,
insertable = False,
deletable = False,
editable = False,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Message"),
title_display = T("Message Details"),
title_list = T("Messages"),
title_update = T("Edit Message"),
label_list_button = T("List Messages"),
label_delete_button = T("Delete Message"),
msg_record_created = T("Message created"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently registered"),
)
# ---------------------------------------------------------------------
# Simple notes journal for delegations
#
tablename = "hrm_delegation_note"
define_table(tablename,
Field("delegation_id", "reference hrm_delegation",
ondelete = "CASCADE",
readable = False,
writable = False,
),
s3_date(default="now"),
Field("note", "text",
label = T("Note"),
represent = s3_text_represent,
),
*s3_meta_fields())
# List fields
list_fields = ["date",
(T("Author"), "modified_by"),
"note",
]
# Table configuration
self.configure(tablename,
list_fields = list_fields,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Note"),
title_display = T("Note Details"),
title_list = T("Notes"),
title_update = T("Edit Note"),
label_list_button = T("List Notes"),
label_delete_button = T("Delete Note"),
msg_record_created = T("Note added"),
msg_record_modified = T("Note updated"),
msg_record_deleted = T("Note deleted"),
msg_list_empty = T("No Notes currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_delegation_status_opts": delegation_status,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
#dummy = S3ReusableField.dummy
return {"hrm_delegation_status_opts": {}}
# =============================================================================
def hrm_programme_hours_month(row):
"""
Virtual field for hrm_programme_hours - returns the date of the first
day of the month of this entry, used for programme hours report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["hrm_programme_hours.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
#thisdate = thisdate.date()
month = thisdate.month
year = thisdate.year
first = datetime.date(year, month, 1)
return first.strftime("%y-%m")
# =============================================================================
def hrm_programme_hours_onaccept(form):
"""
Update the Active Status for the volunteer
- called both onaccept & ondelete
"""
vol_active = current.deployment_settings.get_hrm_vol_active()
if not callable(vol_active):
# Nothing to do (either field is disabled or else set manually)
return
# Deletion and update have a different format
delete = False
try:
record_id = form.vars.id
except AttributeError:
record_id = form.id
delete = True
db = current.db
if delete:
person_id = form.person_id
else:
# Get the full record
table = db.hrm_programme_hours
record = db(table.id == record_id).select(table.person_id,
limitby = (0, 1),
).first()
person_id = record.person_id
# Recalculate the Active Status for this Volunteer
active = vol_active(person_id)
# Read the current value
s3db = current.s3db
dtable = s3db.vol_details
htable = s3db.hrm_human_resource
query = (htable.person_id == person_id) & \
(dtable.human_resource_id == htable.id)
row = db(query).select(dtable.id,
dtable.active,
limitby = (0, 1),
).first()
if row:
if row.active != active:
# Update
db(dtable.id == row.id).update(active = active)
else:
# Create record
row = db(htable.person_id == person_id).select(htable.id,
limitby = (0, 1),
).first()
if row:
dtable.insert(human_resource_id = row.id,
active = active,
)
# =============================================================================
class hrm_AssignMethod(S3Method):
"""
Custom Method to allow human resources to be assigned to something
e.g. Incident, Project, Site, Vehicle
@ToDo: be able to filter by deployable status for the role
"""
# -------------------------------------------------------------------------
def __init__(self,
component,
next_tab = "human_resource",
types = None,
filter_widgets = None,
list_fields = None,
rheader = None,
):
"""
@param component: the Component in which to create records
@param next_tab: the component/method to redirect to after assigning
@param types: a list of types to pick from: Staff, Volunteers, Deployables
@param filter_widgets: a custom list of FilterWidgets to show
@param list_fields: a custom list of Fields to show
@param rheader: an rheader to show
"""
self.component = component
self.next_tab = next_tab
self.types = types
self.filter_widgets = filter_widgets
self.list_fields = list_fields
self.rheader = rheader
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
try:
component = r.resource.components[self.component]
except KeyError:
current.log.error("Invalid Component!")
raise
if component.link:
component = component.link
tablename = component.tablename
# Requires permission to create component
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
r.unauthorised()
settings = current.deployment_settings
types = self.types
if not types:
if settings.has_module("vol"):
types = (1, 2)
else:
# Staff
types = (1,)
if types == (2,):
controller = "vol"
else:
controller = "hrm"
T = current.T
db = current.db
s3db = current.s3db
table = s3db[tablename]
fkey = component.fkey
record = r.record
if fkey in record:
# SuperKey
record_id = record[fkey]
else:
record_id = r.id
get_vars = r.get_vars
response = current.response
output = None
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
if post_vars.mode == "Exclusive":
# 'Select All' ticked or all rows selected manually
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.filterURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
resource = s3db.resource("hrm_human_resource",
alias = self.component,
filter = query,
vars = filters)
rows = resource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
if component.multiple:
# Prevent multiple entries in the link table
query = (table.human_resource_id.belongs(selected)) & \
(table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.id)
rows = dict((row.id, row) for row in rows)
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
for human_resource_id in selected:
try:
hr_id = int(human_resource_id.strip())
except ValueError:
continue
if hr_id not in rows:
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
else:
human_resource_id = selected[0]
exists = db(table[fkey] == record_id).select(table.id,
limitby = (0, 1),
).first()
if exists:
onaccept = component.get_config("update_onaccept",
component.get_config("onaccept", None))
exists.update_record(human_resource_id = human_resource_id)
if onaccept:
link = Storage(id = exists.id,
human_resource_id = human_resource_id)
link[fkey] = record_id
form = Storage(vars = link)
onaccept(form)
else:
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
if r.representation == "popup":
# Don't redirect, so we retain popup extension & so close popup
response.confirmation = T("%(number)s assigned") % \
{"number": added}
output = {}
else:
current.session.confirmation = T("%(number)s assigned") % \
{"number": added}
if added > 0:
redirect(URL(args = [r.id, self.next_tab],
vars = {},
))
else:
redirect(URL(args = r.args,
vars = {},
))
elif r.http == "GET":
representation = r.representation
# Filter widgets
if self.filter_widgets is not None:
filter_widgets = self.filter_widgets
else:
if controller == "vol":
resource_type = "volunteer"
elif len(types) == 1:
resource_type = "staff"
else:
# Both
resource_type = None
if r.controller == "req":
module = "req"
else:
module = controller
filter_widgets = hrm_human_resource_filters(resource_type = resource_type,
module = module)
# List fields
if self.list_fields is not None:
list_fields = self.list_fields
else:
list_fields = ["person_id",
"organisation_id",
]
if len(types) == 2:
list_fields.append((T("Type"), "type"))
list_fields.append("job_title_id")
if settings.get_hrm_use_certificates():
list_fields.append((T("Certificates"), "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
list_fields.append((T("Skills"), "person_id$competency.skill_id"))
if settings.get_hrm_use_trainings():
list_fields.append((T("Trainings"), "person_id$training.course_id"))
# Data table
resource = s3db.resource("hrm_human_resource",
alias = r.component.alias if r.component else None,
vars = get_vars)
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter_, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter_)
# Hide people already in the link table
query = (table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.human_resource_id)
already = [row.human_resource_id for row in rows]
filter_ = (~db.hrm_human_resource.id.belongs(already))
resource.add_filter(filter_)
ajax_vars = dict(get_vars)
if settings.get_hrm_unavailability():
apply_availability_filter = False
if get_vars.get("available__ge") or \
get_vars.get("available__le"):
apply_availability_filter = True
elif representation != "aadata":
available_defaults = response.s3.filter_defaults["hrm_human_resource"]["available"]
if available_defaults:
apply_availability_filter = True
ge = available_defaults.get("ge")
if ge is not None:
ajax_vars["available__ge"] = s3_format_datetime(ge) # Used by dt_ajax_url
get_vars["available__ge"] = s3_format_datetime(ge) # Popped in pr_availability_filter
le = available_defaults.get("le")
if le is not None:
ajax_vars["available__le"] = s3_format_datetime(le) # Used by dt_ajax_url
get_vars["available__le"] = s3_format_datetime(le) # Popped in pr_availability_filter
if apply_availability_filter:
# Apply availability filter
request = Storage(get_vars = get_vars,
resource = resource,
tablename = "hrm_human_resource",
)
s3db.pr_availability_filter(request)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if representation in ("html", "popup"):
# Page load
resource.configure(deletable = False)
profile_url = URL(c = controller,
f = "human_resource",
args = ["[id]", "profile"],
)
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url,
)
response.s3.no_formats = True
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
submit_url_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars = submit_url_vars)
# Default Filters (before selecting data!)
resource.configure(filter_widgets = filter_widgets)
S3FilterForm.apply_filter_defaults(r, resource)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f = "human_resource",
args = ["filter.options"],
vars = {},
)
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = r.component.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
alias = alias)
else:
ff = ""
# Data table (items)
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url = r.url(representation = "aadata",
vars = ajax_vars),
dt_bulk_actions = dt_bulk_actions,
dt_bulk_single = not component.multiple,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "false",
)
STAFF = settings.get_hrm_staff_label()
response.view = "list_filter.html"
rheader = self.rheader
if callable(rheader):
rheader = rheader(r)
output = {"items": items,
"title": T("Assign %(staff)s") % {"staff": STAFF},
"list_filter_form": ff,
"rheader": rheader,
}
elif representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions = dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
output = items
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# =============================================================================
class hrm_HumanResourceRepresent(S3Represent):
""" Representation of human resource IDs """
def __init__(self, show_link=False):
"""
Constructor
@param show_link: whether to add a URL to representations
"""
super(hrm_HumanResourceRepresent, self).__init__(lookup = "hrm_human_resource",
show_link = show_link)
self.job_title_represent = S3Represent(lookup = "hrm_job_title")
self.types = {}
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (hrm_human_resource.id)
@param v: the representation of the key
@param row: the row with this key (unused here)
"""
# Link to specific controller for type
types = self.types
if types.get(k) == 1:
url = URL(c="hrm", f="staff", args=[k])
else:
url = URL(c="vol", f="volunteer", args=[k])
return A(v, _href = url)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
left = ptable.on(ptable.id == htable.person_id)
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(htable.id,
htable.job_title_id,
htable.organisation_id,
htable.type,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
limitby = (0, count),
left = left,
)
self.queries += 1
# Remember HR types
types = self.types
for row in rows:
types[row["hrm_human_resource.id"]] = row["hrm_human_resource.type"]
# Bulk-represent job_title_ids
job_title_id = str(htable.job_title_id)
job_title_ids = [row[job_title_id] for row in rows]
if job_title_ids:
self.job_title_represent.bulk(job_title_ids)
# Bulk-represent organisation_ids
if current.deployment_settings.get_hrm_show_organisation():
organisation_id = str(htable.organisation_id)
organisation_ids = [row[organisation_id] for row in rows]
if organisation_ids:
htable.organisation_id.represent.bulk(organisation_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
# Start with the person name
representation = [s3_str(s3_fullname(row.pr_person))]
append = representation.append
hr = row.hrm_human_resource
# Append the job title if present
if hr.job_title_id:
append(self.job_title_represent(hr.job_title_id, show_link=False))
# Append the organisation if present (and configured)
if hr.organisation_id and \
current.deployment_settings.get_hrm_show_organisation():
htable = current.s3db.hrm_human_resource
append(htable.organisation_id.represent(hr.organisation_id,
show_link = False))
return ", ".join(representation)
# =============================================================================
class hrm_TrainingRepresent(S3Represent):
"""
Represent a Training by its Course
- used from within hrm_programme_hours
"""
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingRepresent, self).__init__(lookup = "hrm_training")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
ttable = self.table
ctable = current.s3db.hrm_course
left = [ctable.on(ctable.id == ttable.course_id)]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(ttable.id,
ctable.name,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
name = row["hrm_course.name"]
if not name:
name = current.messages.UNKNOWN_OPT
return name
# =============================================================================
class hrm_TrainingEventRepresent(S3Represent):
""" Representation of training_event_id """
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingEventRepresent, self).__init__(lookup = "hrm_training_event")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None, pe_id=False):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
@param pe_id: whether to include pe_id in the output rows
(True when called from pr_PersonEntityRepresent)
"""
s3db = current.s3db
etable = self.table
ctable = s3db.hrm_course
stable = s3db.org_site
left = [ctable.on(ctable.id == etable.course_id),
stable.on(stable.site_id == etable.site_id),
]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
fields = [etable.id,
etable.name,
etable.start_date,
etable.instructor,
etable.person_id,
ctable.name,
ctable.code,
stable.name,
]
if pe_id:
fields.insert(0, etable.pe_id)
rows = current.db(query).select(*fields,
left = left,
)
instructors = current.deployment_settings.get_hrm_training_instructors()
if instructors in ("internal", "both"):
# Bulk-represent internal instructors to suppress
# per-row DB lookups in represent_row:
key = str(etable.person_id)
etable.person_id.represent.bulk([row[key] for row in rows])
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
NB This needs to be machine-parseable by training.xsl
@param row: the Row
"""
# Do we have a Name?
name = row.get("hrm_training_event.name")
if name:
return name
# Course Details
course = row.get("hrm_course")
if not course:
return current.messages.UNKNOWN_OPT
name = course.get("name")
if not name:
name = current.messages.UNKNOWN_OPT
representation = ["%s --" % name]
append = representation.append
code = course.get("code")
if code:
append("(%s)" % code)
# Venue and instructor
event = row.hrm_training_event
try:
site = row.org_site.name
except AttributeError:
site = None
instructors = current.deployment_settings.get_hrm_training_instructors()
instructor = None
if instructors in ("internal", "both"):
person_id = event.get("person_id")
if person_id:
instructor = self.table.person_id.represent(person_id)
if instructor is None and instructors in ("external", "both"):
instructor = event.get("instructor")
if instructor and site:
append("%s - {%s}" % (instructor, site))
elif instructor:
append("%s" % instructor)
elif site:
append("{%s}" % site)
# Start date
start_date = event.start_date
if start_date:
# Easier for users & machines
start_date = S3DateTime.date_represent(start_date, format="%Y-%m-%d")
append("[%s]" % start_date)
return " ".join(representation)
# =============================================================================
#def hrm_position_represent(id, row=None):
# """
# """
# if row:
# id = row.id
# elif not id:
# return current.messages["NONE"]
# db = current.db
# s3db = current.s3db
# table = s3db.hrm_position
# jtable = s3db.hrm_job_title
# otable = s3db.org_organisation
# query = (table.id == id) & \
# (table.job_title_id == jtable.id)
# (table.organisation_id == otable.id)
# position = db(query).select(jtable.name,
# otable.name,
# limitby = (0, 1),
# ).first()
# try:
# represent = position.hrm_job_title.name
# if position.org_organisation:
# represent = "%s (%s)" % (represent,
# position.org_organisation.name)
# except:
# return current.messages["NONE"]
# return represent
#
# =============================================================================
def hrm_human_resource_onaccept(form):
""" On-accept for HR records """
if "vars" in form:
# e.g. coming from staff/create
form_vars = form.vars
elif "id" in form:
# e.g. coming from user/create or from hrm_site_onaccept or req_onaccept
form_vars = form
elif hasattr(form, "vars"):
# SQLFORM e.g. ?
form_vars = form.vars
else:
# e.g. Coming from s3_register callback
form_vars = form
record_id = form_vars.get("id")
if not record_id:
return
db = current.db
s3db = current.s3db
auth = current.auth
request = current.request
settings = current.deployment_settings
# Get the 'full' record
htable = db.hrm_human_resource
record = db(htable.id == record_id).select(htable.id, # needed for update_record
htable.type,
htable.person_id,
htable.organisation_id,
htable.location_id,
htable.job_title_id,
htable.site_id,
htable.site_contact,
htable.status,
htable.deleted,
htable.deleted_fk,
limitby = (0, 1),
).first()
job_title_id = record.job_title_id
if job_title_id and settings.get_hrm_multiple_job_titles():
# Update the link table
ltable = db.hrm_job_title_human_resource
query = (ltable.human_resource_id == record_id) & \
(ltable.job_title_id == job_title_id)
exists = db(query).select(ltable.id, # needed for update_record
ltable.main,
limitby = (0, 1),
).first()
if exists:
if not exists.main:
exists.update_record(main = True)
else:
# Insert record
ltable.insert(human_resource_id = record_id,
job_title_id = job_title_id,
main = True,
start_date = request.utcnow,
)
data = Storage()
site_id = record.site_id
organisation_id = record.organisation_id
# Affiliation, record ownership and component ownership
s3db.pr_update_affiliations(htable, record)
# Realm_entity for the pr_person record
ptable = s3db.pr_person
person_id = record.person_id
person = Storage(id = person_id)
if settings.get_auth_person_realm_human_resource_site_then_org():
# Set pr_person.realm_entity to the human_resource's site pe_id or organisation_pe_id
entity = s3db.pr_get_pe_id("org_site", site_id) or \
s3db.pr_get_pe_id("org_organisation", organisation_id)
if entity:
auth.set_realm_entity(ptable, person,
entity = entity,
force_update = True)
tracker = S3Tracker()
if person_id:
# Set person record to follow HR record
# (Person base location remains untouched)
pr_tracker = tracker(ptable, person_id)
pr_tracker.check_in(htable, record_id, timestmp = request.utcnow)
if record.type == 1:
# Staff
vol = False
location_lookup = settings.get_hrm_location_staff()
elif record.type == 2:
# Volunteer
vol = True
location_lookup = settings.get_hrm_location_vol()
# Add deploy_application when creating inside deploy module
if request.controller == "deploy":
user_organisation_id = auth.user.organisation_id
ltable = s3db.deploy_application
if user_organisation_id:
query = (ltable.human_resource_id == record_id) & \
((ltable.organisation_id == None) |
(ltable.organisation_id == user_organisation_id))
else:
query = (ltable.human_resource_id == record_id)
exists = db(query).select(ltable.id,
limitby = (0, 1),
).first()
if not exists:
# Is there a Deployable Team for this user_org?
dotable = s3db.deploy_organisation
exists = db(dotable.organisation_id == user_organisation_id)
if exists:
# Insert record in this Deployable Team
ltable.insert(human_resource_id = record_id,
organisation_id = user_organisation_id,
)
else:
# Insert record in the global Deployable Team
ltable.insert(human_resource_id = record_id,
)
# Determine how the HR is positioned
address = None
update_location_from_site = False
site_contact = record.site_contact
hstable = s3db.hrm_human_resource_site
query = (hstable.human_resource_id == record_id)
if site_id:
# Add/update the record in the link table
this = db(query).select(hstable.id,
limitby = (0, 1),
).first()
if this:
db(query).update(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
else:
hstable.insert(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
if location_lookup == "site_id" or location_lookup[0] == "site_id":
# Use site location as HR base location
update_location_from_site = True
elif location_lookup[0] == "person_id":
# Only use site location as HR base location if the Person
# has no Home Address
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type == 1) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
).first()
if not address:
update_location_from_site = True
else:
# location_lookup == "person_id"
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
else:
# Delete any links in the link table
db(query).delete()
if "person_id" in location_lookup:
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
if update_location_from_site:
# Use the site location as base location of the HR
stable = db.org_site
site = db(stable.site_id == site_id).select(stable.location_id,
limitby = (0, 1),
).first()
try:
data.location_id = location_id = site.location_id
except AttributeError:
current.log.error("Can't find site with site_id ", site_id)
data.location_id = location_id = None
elif address:
# Use the address as base location of the HR
data.location_id = location_id = address.location_id
elif vol:
# No known address and not updating location from site
# => fall back to the HR's location_id if known
if record.location_id:
# Add a new Address for the person from the HR location
location_id = record.location_id
pe = db(ptable.id == person_id).select(ptable.pe_id,
limitby = (0, 1),
).first()
try:
pe_id = pe.pe_id
except AttributeError:
current.log.error("Can't find person with id ", person_id)
else:
atable.insert(type = 1,
pe_id = pe_id,
location_id = location_id,
)
else:
data.location_id = location_id = None
else:
data.location_id = location_id = None
# Update HR base location
hrm_tracker = tracker(htable, record_id)
if location_id:
# Set Base Location
hrm_tracker.set_base_location(location_id)
else:
# Unset Base Location
hrm_tracker.set_base_location(None)
if settings.get_hrm_site_contact_unique():
# Ensure only one Site Contact per Site
if site_contact and site_id:
# Set all others in this Facility to not be the Site Contact
# @ToDo: deployment_setting to allow multiple site contacts
query = (htable.site_id == site_id) & \
(htable.site_contact == True) & \
(htable.id != record_id)
# Prevent overwriting the person_id field!
htable.person_id.update = None
db(query).update(site_contact = False)
if vol:
request_vars = request.vars
programme_id = request_vars.get("programme_id", None)
if programme_id:
# Have we already got a record for this programme?
table = s3db.hrm_programme_hours
query = (table.deleted == False) & \
(table.person_id == person_id)
existing = db(query).select(table.programme_id,
orderby = table.date,
).last()
if existing and existing.programme_id == programme_id:
# No action required
pass
else:
# Insert new record
table.insert(person_id = person_id,
date = request.utcnow,
programme_id = programme_id,
)
# Add record owner (user)
ltable = s3db.pr_person_user
utable = auth.settings.table_user
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id) & \
(utable.id == ltable.user_id)
user = db(query).select(utable.id,
utable.organisation_id,
utable.site_id,
limitby = (0, 1),
).first()
if user:
user_id = user.id
data.owned_by_user = user_id
if data:
record.update_record(**data)
if user and organisation_id:
profile = {}
if not user.organisation_id:
# Set the Organisation in the Profile, if not already set
profile["organisation_id"] = organisation_id
if not user.site_id:
# Set the Site in the Profile, if not already set
profile["site_id"] = site_id
else:
# How many active HR records does the user have?
query = (htable.deleted == False) & \
(htable.status == 1) & \
(htable.person_id == person_id)
rows = db(query).select(htable.id,
limitby = (0, 2),
)
if len(rows) == 1:
# We can safely update
profile["organisation_id"] = organisation_id
profile["site_id"] = site_id
if profile:
db(utable.id == user_id).update(**profile)
# =============================================================================
def hrm_compose():
"""
Send message to people/teams/participants
@ToDo: Better rewritten as an S3Method
"""
s3db = current.s3db
get_vars = current.request.get_vars
pe_id = None
if "human_resource.id" in get_vars:
fieldname = "human_resource.id"
record_id = get_vars.get(fieldname)
table = s3db.pr_person
htable = s3db.hrm_human_resource
query = (htable.id == record_id) & \
(htable.person_id == table.id)
title = current.T("Send a message to this person")
# URL to redirect to after message sent
url = URL(f="compose",
vars={fieldname: record_id})
elif "group_id" in get_vars:
fieldname = "group_id"
record_id = get_vars.group_id
table = s3db.pr_group
query = (table.id == record_id)
title = current.T("Send a message to this team")
# URL to redirect to after message sent
url = URL(f="compose",
vars={fieldname: record_id})
elif "training_event.id" in get_vars:
fieldname = "training_event.id"
record_id = get_vars.get(fieldname)
pe_id = get_vars.pe_id
title = current.T("Message Participants")
# URL to redirect to after message sent
url = URL(f="training_event",
args = record_id,
)
else:
current.session.error = current.T("Record not found")
redirect(URL(f="index"))
if not pe_id:
db = current.db
pe = db(query).select(table.pe_id,
limitby = (0, 1),
).first()
if not pe:
current.session.error = current.T("Record not found")
redirect(URL(f="index"))
pe_id = pe.pe_id
if "hrm_id" in get_vars:
# Get the individual's communications options & preference
ctable = s3db.pr_contact
contact = db(ctable.pe_id == pe_id).select(ctable.contact_method,
limitby = (0, 1),
orderby = "priority",
).first()
if contact:
s3db.msg_outbox.contact_method.default = contact.contact_method
else:
current.session.error = current.T("No contact method found")
redirect(URL(f="index"))
# Create the form
output = current.msg.compose(recipient = pe_id,
url = url)
output["title"] = title
response = current.response
representation = s3_get_extension()
response.headers["Content-Type"] = \
response.s3.content_type.get(representation, "text/html")
response.view = "msg/compose.html"
return output
# =============================================================================
def hrm_map_popup(r):
"""
Custom output to place inside a Map Popup
- called from postp of human_resource controller
"""
T = current.T
db = current.db
s3db = current.s3db
CONTACT_OPTS = current.msg.CONTACT_OPTS
record = r.record
if not record:
return ""
person_id = record.person_id
output = TABLE()
append = output.append
# Edit button
append(TR(TD(A(T("Edit"),
_target = "_blank",
_id = "edit-btn",
_href = URL(args = [r.id, "update"])
))))
# First name, last name
append(TR(TD(B("%s:" % T("Name"))),
TD(s3_fullname(person_id))))
# Job Title
if record.job_title_id:
field = r.table.job_title_id
append(TR(TD(B("%s:" % field.label)),
TD(field.represent(record.job_title_id))))
# Organization (better with just name rather than Represent)
# @ToDo: Make this configurable - some deployments will only see
# their staff so this is a meaningless field
#table = s3db.org_organisation
#query = (table.id == record.organisation_id)
#name = db(query).select(table.name,
# limitby = (0, 1),
# ).first().name
#append(TR(TD(B("%s:" % r.table.organisation_id.label)),
# TD(name)))
# Components link to the Person record
# Skills
table = s3db.hrm_competency
stable = s3db.hrm_skill
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.skill_id == stable.id)
skills = db(query).select(stable.name)
if skills:
vals = [skill.name for skill in skills]
if len(skills) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Skills"))),
TD(represent)))
# Certificates
table = s3db.hrm_certification
ctable = s3db.hrm_certificate
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.certificate_id == ctable.id)
certificates = db(query).select(ctable.name)
if certificates:
vals = [cert.name for cert in certificates]
if len(certificates) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Certificates"))),
TD(represent)))
# Trainings
table = s3db.hrm_training
etable = s3db.hrm_training_event
ctable = s3db.hrm_course
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.training_event_id == etable.id) & \
(etable.course_id == ctable.id)
trainings = db(query).select(ctable.name)
if trainings:
vals = [train.name for train in trainings]
if len(trainings) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Trainings"))),
TD(represent)))
if record.location_id:
table = s3db.gis_location
query = (table.id == record.location_id)
location = db(query).select(table.path,
table.addr_street,
limitby = (0, 1),
).first()
# City
# Street address
if location.addr_street:
append(TR(TD(B("%s:" % table.addr_street.label)),
TD(location.addr_street)))
# Mobile phone number & Email address
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ptable.id == person_id) & \
(ctable.pe_id == ptable.pe_id) & \
(ctable.deleted == False)
contacts = db(query).select(ctable.contact_method,
ctable.value,
)
email = mobile_phone = ""
for contact in contacts:
if contact.contact_method == "EMAIL":
email = contact.value
elif contact.contact_method == "SMS":
mobile_phone = contact.value
if mobile_phone:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("SMS"))),
TD(mobile_phone)))
# Office number
if record.site_id:
table = s3db.org_office
query = (table.site_id == record.site_id)
office = db(query).select(table.phone1,
limitby = (0, 1),
).first()
if office and office.phone1:
append(TR(TD(B("%s:" % T("Office Phone"))),
TD(office.phone1)))
else:
# @ToDo: Support other Facility Types (Hospitals & Shelters)
pass
# Email address (as hyperlink)
if email:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("EMAIL"))),
TD(A(email,
_href = "mailto:%s" % email,
))))
return output
# =============================================================================
def hrm_training_month(row):
""" Year/Month of the start date of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return "%s/%02d" % (date.year, date.month)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def hrm_training_year(row):
""" The Year of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return date.year
else:
return current.messages["NONE"]
# =============================================================================
def hrm_training_job_title(row):
"""
Which Job Titles(s) the person is active with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
jtable = s3db.hrm_job_title
query = (table.person_id == person_id) & \
(table.status != 2) & \
(table.job_title_id == jtable.id)
jobs = current.db(query).select(jtable.name,
distinct = True,
orderby = jtable.name,
)
if jobs:
output = ""
for job in jobs:
jobtitle = job.name
if output:
output = "%s, %s" % (output, jobtitle)
else:
output = jobtitle
return output
return current.messages["NONE"]
# =============================================================================
def hrm_training_organisation(row):
"""
Which Organisation(s)/Branch(es) the person is actively affiliated with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
query = (table.person_id == person_id) & \
(table.status != 2)
orgs = current.db(query).select(table.organisation_id,
distinct = True,
)
if orgs:
output = ""
represent = s3db.org_OrganisationRepresent()
for org in orgs:
org_repr = represent(org.organisation_id)
if output:
output = "%s, %s" % (output, org_repr)
else:
output = org_repr
return output
return current.messages["NONE"]
# =============================================================================
def hrm_rheader(r, tabs=None, profile=False):
""" Resource headers for component views """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if record is None:
# List or Create form: rheader makes no sense here
return None
T = current.T
table = r.table
resourcename = r.name
if resourcename == "person":
record_id = r.id
db = current.db
s3db = current.s3db
htable = s3db.hrm_human_resource
settings = current.deployment_settings
get_vars = r.get_vars
hr = get_vars.get("human_resource.id", None)
if hr:
name = s3db.hrm_human_resource_represent(int(hr))
else:
# Look up HR record ID (required for link URL construction)
# @ToDo handle multiple HR records (which one are we looking at?)
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr = db(query).select(htable.id,
limitby = (0, 1),
).first()
if hr:
hr = hr.id
name = s3_fullname(record)
group = get_vars.get("group", None)
if group is None:
controller = r.controller
if controller == "vol":
group = "volunteer"
else:
group = "staff"
use_cv = settings.get_hrm_cv_tab()
record_tab = settings.get_hrm_record_tab()
experience_tab = None
service_record = ""
tbl = TABLE(TR(TH(name,
# @ToDo: Move to CSS
_style = "padding-top:15px",
),
),
)
experience_tab2 = None
if group == "volunteer":
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both", "activity"):
# Integrated into Record tab
#experience_tab = (T("Hours"), "hours")
# Show all Hours spent on both Programmes/Activities & Trainings
# - last month & last year
now = r.utcnow
last_year = now - datetime.timedelta(days=365)
if vol_experience == "activity":
ahtable = db.vol_activity_hours
attable = db.vol_activity_hours_activity_type
bquery = (ahtable.deleted == False) & \
(ahtable.person_id == record_id)
bleft = [attable.on(ahtable.id == attable.activity_hours_id),
]
dfield = ahtable.date
fields = [dfield,
ahtable.hours,
ahtable.id,
#ahtable.training,
attable.activity_type_id,
]
else:
ptable = s3db.hrm_programme
phtable = db.hrm_programme_hours
bquery = (phtable.deleted == False) & \
(phtable.person_id == record_id)
bleft = None
query = (phtable.programme_id == ptable.id)
query &= bquery
row = db(query).select(ptable.name,
phtable.date,
orderby = phtable.date,
).last()
if row:
programme = row.hrm_programme.name
else:
programme = ""
dfield = phtable.date
fields = [dfield,
phtable.hours,
phtable.training,
]
training_hours_year = 0
training_hours_month = 0
query = bquery & \
(dfield > last_year.date())
rows = db(query).select(*fields,
left = bleft)
programme_hours_year = 0
programme_hours_month = 0
last_month = now - datetime.timedelta(days=30)
last_month = last_month.date()
if vol_experience == "activity":
activity_hour_ids = []
ahappend = activity_hour_ids.append
activity_type_ids = []
atappend = activity_type_ids.append
for row in rows:
atappend(row["vol_activity_hours_activity_type.activity_type_id"])
ah_id = row["vol_activity_hours.id"]
if ah_id in activity_hour_ids:
# Don't double-count when more than 1 Activity Type
continue
ahappend(ah_id)
hours = row["vol_activity_hours.hours"]
if hours:
programme_hours_year += hours
if row["vol_activity_hours.date"] > last_month:
programme_hours_month += hours
# Uniquify
activity_type_ids = list(set(activity_type_ids))
# Represent
activity_types = s3db.vol_activity_activity_type.activity_type_id.represent.bulk(activity_type_ids)
NONE = current.messages["NONE"]
if activity_types == [NONE]:
activity_types = NONE
else:
activity_types = list(activity_types.values())
activity_types.remove(NONE)
activity_types = ", ".join([s3_str(v) for v in activity_types])
else:
for row in rows:
hours = row.hours
if hours:
training = row.training
if training:
training_hours_year += hours
if row.date > last_month:
training_hours_month += hours
else:
programme_hours_year += hours
if row.date > last_month:
programme_hours_month += hours
vol_active = settings.get_hrm_vol_active()
if vol_active:
if hr:
dtable = s3db.vol_details
row = db(dtable.human_resource_id == hr).select(dtable.active,
limitby = (0, 1),
).first()
if row and row.active:
active = TD(DIV(T("Yes"),
# @ToDo: Move to CSS
_style = "color:green",
))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style = "color:red",
))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style = "color:red",
))
vol_active_tooltip = settings.get_hrm_vol_active_tooltip()
if vol_active_tooltip:
tooltip = SPAN(_class = "tooltip",
_title = "%s|%s" % (T("Active"),
T(vol_active_tooltip)),
_style = "display:inline-block",
)
else:
tooltip = ""
active_cells = [TH("%s:" % T("Active?"), tooltip),
active]
else:
active_cells = []
if vol_experience == "activity":
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Activity Types")),
str(activity_types),
)
row3 = TR(TH("%s:" % T("Activity Hours (Month)")),
str(programme_hours_month),
)
row4 = TR(TH("%s:" % T("Activity Hours (Year)")),
str(programme_hours_year),
)
else:
if programme:
row1 = TR(TH("%s:" % T("Program")),
programme,
*active_cells
)
else:
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Program Hours (Month)")),
str(programme_hours_month),
TH("%s:" % T("Training Hours (Month)")),
str(training_hours_month)
)
row3 = TR(TH("%s:" % T("Program Hours (Year)")),
str(programme_hours_year),
TH("%s:" % T("Training Hours (Year)")),
str(training_hours_year)
)
row4 = ""
tbl = TABLE(TR(TH(name,
_colspan = 4,
),
),
row1,
row2,
row3,
row4,
)
service_record = A(T("Service Record"),
_href = URL(c = "vol",
f = "human_resource",
args = [hr, "form"]
),
_id = "service_record",
_class = "action-btn"
)
if vol_experience == "both" and not use_cv:
experience_tab2 = (T("Experience"), "experience")
elif vol_experience == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
elif settings.get_hrm_staff_experience() == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
if settings.get_hrm_id_cards():
card_button = A(T("ID Card"),
data = {"url": URL(f = "human_resource",
args = ["%s.card" % hr]
),
},
_class = "action-btn s3-download-button",
_script = "alert('here')",
)
else:
card_button = ""
if settings.get_hrm_use_certificates() and not use_cv:
certificates_tab = (T("Certificates"), "certification")
else:
certificates_tab = None
if settings.get_hrm_use_credentials():
credentials_tab = (T("Credentials"), "credential")
else:
credentials_tab = None
if settings.get_hrm_vol_availability_tab():
availability_tab = (T("Availability"), "availability")
else:
availability_tab = None
if settings.get_hrm_unavailability():
unavailability_tab = (T("Availability"), "unavailability", {}, "organize")
else:
unavailability_tab = None
medical_tab = settings.get_hrm_use_medical() or None
if medical_tab:
medical_tab = (T(medical_tab), "medical")
description_tab = settings.get_hrm_use_description() or None
if description_tab:
description_tab = (T(description_tab), "physical_description")
if settings.get_hrm_use_education() and not use_cv:
education_tab = (T("Education"), "education")
else:
education_tab = None
if settings.get_hrm_use_id():
id_tab = (T("ID"), "identity")
else:
id_tab = None
if settings.get_hrm_use_address():
address_tab = (T("Address"), "address")
else:
address_tab = None
if settings.get_hrm_salary():
salary_tab = (T("Salary"), "salary")
else:
salary_tab = None
if settings.get_hrm_use_skills() and not use_cv:
skills_tab = (T("Skills"), "competency")
else:
skills_tab = None
if record_tab != "record":
teams = settings.get_hrm_teams()
if teams:
teams_tab = (T(teams), "group_membership")
else:
teams_tab = None
else:
teams_tab = None
trainings_tab = instructor_tab = None
if settings.get_hrm_use_trainings():
if not use_cv:
trainings_tab = (T("Trainings"), "training")
if settings.get_hrm_training_instructors() in ("internal", "both"):
instructor_tab = (T("Instructor"), "training_event")
if use_cv:
trainings_tab = (T("CV"), "cv")
hr_tab = None
duplicates_tab = None
if not record_tab:
record_method = None
elif record_tab == "record":
record_method = "record"
if not profile and current.auth.s3_has_role("ADMIN"):
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr_records = db(query).count()
if hr_records > 1:
duplicates_tab = (T("Duplicates"),
"human_resource",
{"hr": "all"}, # Ensure no &human_resource.id=XXXX
)
else:
# Default
record_method = "human_resource"
record_label = settings.get_hrm_record_label()
if profile:
# Configure for personal mode
if record_method:
hr_tab = (T(record_label), record_method)
tabs = [(T("Person Details"), None),
(T("User Account"), "user"),
hr_tab,
id_tab,
medical_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
teams_tab,
unavailability_tab,
#(T("Assets"), "asset"),
]
#elif current.session.s3.hrm.mode is not None:
# # Configure for personal mode
# tabs = [(T("Person Details"), None),
# id_tab,
# description_tab,
# address_tab,
# ]
# contacts_tabs = settings.get_pr_contacts_tabs()
# if "all" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("all"),
# "contacts",
# ))
# if "public" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
# "public_contacts",
# ))
# if "private" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
# "private_contacts",
# ))
# if record_method is not None:
# hr_tab = (T("Positions"), "human_resource")
# tabs += [availability_tab,
# trainings_tab,
# certificates_tab,
# skills_tab,
# credentials_tab,
# experience_tab,
# experience_tab2,
# hr_tab,
# teams_tab,
# (T("Assets"), "asset"),
# ]
else:
# Configure for HR manager mode
hr_record = record_label
if group == "staff":
awards_tab = None
elif group == "volunteer":
if settings.get_hrm_use_awards() and not use_cv:
awards_tab = (T("Awards"), "award")
else:
awards_tab = None
if record_method:
hr_tab = (T(hr_record), record_method)
tabs = [(T("Person Details"), None, {"native": True}),
hr_tab,
duplicates_tab,
id_tab,
medical_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
salary_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
awards_tab,
teams_tab,
unavailability_tab,
(T("Assets"), "asset"),
]
# Add role manager tab if a user record exists
user_id = current.auth.s3_get_user_id(record_id)
if user_id:
tabs.append((T("Roles"), "roles"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader_btns = DIV(service_record, card_button,
# @ToDo: Move to CSS
_style = "margin-bottom:10px",
_class = "rheader-btns",
)
rheader = DIV(rheader_btns,
A(s3_avatar_represent(record_id,
"pr_person",
_class = "rheader-avatar",
),
_href = URL(f="person",
args = [record_id, "image", "create"],
vars = get_vars,
),
),
tbl,
rheader_tabs,
)
elif resourcename == "activity":
# Tabs
tabs = [(T("Activity Details"), None),
(T("Hours"), "hours"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
TR(TH("%s: " % table.sector_id.label),
table.sector_id.represent(record.sector_id)),
# @ToDo: (ltable)
#TR(TH("%s: " % table.activity_type_id.label),
# table.activity_type_id.represent(record.activity_type_id)),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id)),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date)),
),
rheader_tabs,
)
elif resourcename == "training_event":
settings = current.deployment_settings
# Tabs
if not tabs:
tabs = [(T("Training Event Details"), None),
(T("Participants"), "participant"),
]
if settings.has_module("dc"):
label = settings.get_dc_response_label()
if label == "Survey":
label = T("Surveys")
else:
label = T("Assessments")
tabs.append((label, "target"),)
rheader_tabs = s3_rheader_tabs(r, tabs)
action = ""
if settings.has_module("msg"):
permit = current.auth.permission.has_permission
if permit("update", c="hrm", f="compose") and permit("update", c="msg"):
# @ToDo: Be able to see who has been messaged, whether messages bounced, receive confirmation responses, etc
action = A(T("Message Participants"),
_href = URL(f = "compose",
vars = {"training_event.id": record.id,
"pe_id": record.pe_id,
},
),
_class = "action-btn send"
)
if settings.get_hrm_event_types():
event_type = TR(TH("%s: " % table.event_type_id.label),
table.event_type_id.represent(record.event_type_id))
event_name = TR(TH("%s: " % table.name.label),
record.name)
else:
event_type = ""
event_name = ""
instructors = settings.get_hrm_training_instructors()
if instructors == "internal":
instructors = TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id))
elif instructors == "external":
instructors = TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor))
elif instructors == "both":
instructors = TAG[""](TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor)))
elif instructors == "multiple":
itable = current.s3db.hrm_training_event_instructor
pfield = itable.person_id
instructors = current.db(itable.training_event_id == r.id).select(pfield)
represent = pfield.represent
instructors = ",".join([represent(i.person_id) for i in instructors])
instructors = TR(TH("%s: " % T("Instructors")),
instructors)
else:
instructors = ""
rheader = DIV(TABLE(event_type,
event_name,
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id)),
TR(TH("%s: " % table.course_id.label),
table.course_id.represent(record.course_id)),
TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id)),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date)),
instructors,
TR(TH(action,
_colspan = 2,
)),
),
rheader_tabs,
)
elif resourcename == "certificate":
# Tabs
tabs = [(T("Certificate Details"), None),
]
settings = current.deployment_settings
if settings.get_hrm_use_skills() and settings.get_hrm_certificate_skill():
tabs.append((T("Skill Equivalence"), "certificate_skill"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "certification":
# Tabs
tabs = [(T("Certification Details"), None),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.certificate_id.label),
table.certificate_id.represent(record.certificate_id)),
),
rheader_tabs,
)
elif resourcename == "course":
# Tabs
tabs = [(T("Course Details"), None),
(T("Course Certificates"), "course_certificate"),
(T("Trainees"), "training"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "programme":
# Tabs
tabs = [(T("Program Details"), None),
(T("Volunteer Hours"), "person"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "shift":
db = current.db
s3db = current.s3db
record_id = r.id
# Look up Site
stable = s3db.org_site_shift
link = db(stable.shift_id == record_id).select(stable.site_id,
limitby = (0, 1),
).first()
if link:
site_id = link.site_id
else:
site_id = None
# Look up Assigned
htable = s3db.hrm_human_resource_shift
link = db(htable.shift_id == record_id).select(htable.human_resource_id,
limitby = (0, 1),
).first()
if link:
human_resource_id = link.human_resource_id
else:
human_resource_id = None
rheader = DIV(TABLE(TR(TH("%s: " % stable.site_id.label),
stable.site_id.represent(site_id),
),
TR(TH("%s: " % table.skill_id.label),
table.skill_id.represent(record.skill_id),
TH("%s: " % table.job_title_id.label),
table.job_title_id.represent(record.job_title_id),
),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date),
TH("%s: " % table.end_date.label),
table.end_date.represent(record.end_date),
),
TR(TH("%s: " % htable.human_resource_id.label),
htable.human_resource_id.represent(human_resource_id),
),
),
)
else:
rheader = None
return rheader
# =============================================================================
def hrm_competency_controller():
"""
RESTful CRUD controller
- used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
table = r.table
get_vars = r.get_vars
person_id = get_vars.get("~.person_id", None)
if person_id:
try:
person_id = int(person_id)
except ValueError:
pass
else:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
# Additional filtering of the profile section by skill type
skill_type_name = get_vars.get("~.skill_id$skill_type_id$name")
if skill_type_name:
ttable = s3db.hrm_skill_type
query = (ttable.name == skill_type_name)
rows = current.db(query).select(ttable.id)
skill_type_ids = [row.id for row in rows]
if skill_type_ids:
field = table.skill_id
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if hasattr(requires, "set_filter"):
requires.set_filter(filterby = "skill_type_id",
filter_opts = skill_type_ids,
)
elif not r.id:
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$hrm_human_resource.job_title_id$name",
],
label = T("Search"),
comment = T("You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."),
),
S3OptionsFilter("skill_id",
label = T("Skills"),
options = lambda: \
s3_get_filter_opts("hrm_skill", translate=True),
),
S3OptionsFilter("competency_id",
label = T("Competency"),
options = lambda: \
s3_get_filter_opts("hrm_competency_rating", translate=True),
),
]
s3db.configure("hrm_competency",
filter_widgets = filter_widgets,
list_fields = ["person_id",
"skill_id",
"competency_id",
"comments",
],
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Custom action button to add the member to a team
S3CRUD.action_buttons(r)
args = ["[id]", "group_membership"]
s3.actions.append({"label": str(T("Add to a Team")),
"_class": "action-btn",
"url": URL(f = "person",
args = args),
}
)
return output
s3.postp = postp
return current.rest_controller("hrm", "competency",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "competency.xsl"),
#csv_template = ("hrm", "competency"),
)
# =============================================================================
def hrm_credential_controller():
"""
RESTful CRUD controller
- could be used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
s3 = current.response.s3
def prep(r):
table = r.table
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
if r.record:
table.person_id.comment = None
table.person_id.writable = False
return True
s3.prep = prep
return current.rest_controller("hrm", "credential",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "credential.xsl"),
#csv_template = ("hrm", "credential"),
)
# =============================================================================
def hrm_experience_controller():
"""
Experience Controller, defined in the model for use from
multiple controllers for unified menus
- used for Adding/Editing on Profile page
"""
def prep(r):
if r.method in ("create", "update"):
# Coming from Profile page?
field = current.s3db.hrm_experience.person_id
person_id = current.request.get_vars.get("~.person_id", None)
if person_id:
field.default = person_id
field.readable = field.writable = False
elif r.method == "update":
# Workaround until generic solution available:
refresh = r.get_vars.get("refresh")
if refresh and refresh.startswith("profile-list-hrm_experience"):
field.readable = field.writable = False
return True
current.response.s3.prep = prep
return current.rest_controller("hrm", "experience",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "experience.xsl"),
#csv_template = ("hrm", "experience"),
)
# =============================================================================
def hrm_group_controller():
"""
Team controller
- uses the group table from PR
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
team_name = settings.get_hrm_teams()
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
if team_name == "Teams":
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
# Default anyway
#elif team_name == "Groups":
# _group_type.label = T("Group Type")
# table.description.label = T("Group Description")
# table.name.label = T("Group Name")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
# We use crud_form
#_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
s3.filter = (table.system == False) & \
(_group_type == 3)
if team_name == "Teams":
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Team"),
title_display = T("Team Details"),
title_list = T("Teams"),
title_update = T("Edit Team"),
label_list_button = T("List Teams"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"))
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_organisation_team = "group_id")
# Pre-process
def prep(r):
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"])
teams_orgs = settings.get_hrm_teams_orgs()
if teams_orgs:
if teams_orgs == 1:
multiple = False
else:
multiple = True
ottable = s3db.org_organisation_team
label = ottable.organisation_id.label
ottable.organisation_id.label = ""
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("organisation_team",
label = label,
fields = ["organisation_id"],
multiple = multiple,
),
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"organisation_team.organisation_id$name",
"organisation_team.organisation_id$acronym",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all."),
#_class="filter-search",
),
S3OptionsFilter("organisation_team.organisation_id",
label = T("Organization"),
#hidden=True,
),
]
list_fields = ["organisation_team.organisation_id",
"name",
"description",
"comments",
]
s3db.configure("pr_group",
create_next = create_next,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
else:
s3db.configure("pr_group",
create_next = create_next,
)
if r.interactive or r.representation in ("aadata", "xls", "pdf"):
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
if r.representation == "xls":
# Modify Title of Report to show Team Name
s3.crud_strings.pr_group_membership.title_list = r.record.name
# Make it match Import sheets
tablename = "pr_group_membership"
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
s3db.hrm_human_resource.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
S3CRUD.action_buttons(r, update_url=update_url)
if current.deployment_settings.has_module("msg") and \
current.auth.permission.has_permission("update", c="hrm",
f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn send",
"label": s3_str(T("Send Message"))})
return output
s3.postp = postp
if team_name == "Team":
label = T("Team Details")
elif team_name == "Group":
label = T("Group Details")
else:
label = T("Basic Details")
tabs = [(label, None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership"),
(T("Documents"), "document"),
]
return current.rest_controller("pr", "group",
csv_stylesheet = ("hrm", "group.xsl"),
csv_template = "group",
rheader = lambda r: \
s3db.pr_rheader(r, tabs=tabs),
)
# =============================================================================
def hrm_human_resource_controller(extra_filter = None):
"""
Human Resources Controller, defined in the model for use from
multiple controllers for unified menus
- used for Summary & Profile views, Imports and S3AddPersonWidget
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
def prep(r):
# Apply extra filter from controller
if extra_filter is not None:
r.resource.add_filter(extra_filter)
c = r.controller
deploy = c == "deploy"
vol = c == "vol"
if deploy:
# Apply availability filter
s3db.deploy_availability_filter(r)
elif settings.get_hrm_unavailability():
# Apply availability filter
s3db.pr_availability_filter(r)
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if method in ("form", "lookup"):
return True
elif method == "profile":
# Adapt list_fields for pr_address
s3db.table("pr_address") # must load model before get_config
list_fields = s3db.get_config("pr_address", "list_fields")
list_fields.append("comments")
# Show training date without time
s3db.hrm_training.date.represent = lambda d: \
S3DateTime.date_represent(d, utc=True)
# Adapt list_fields for hrm_training
list_fields = ["course_id",
"training_event_id$site_id",
"date",
"hours",
"grade",
"comments",
]
if deploy:
list_fields.append("course_id$course_job_title.job_title_id")
s3db.configure("hrm_training",
list_fields = list_fields,
)
# Adapt list_fields for hrm_experience
s3db.table("hrm_experience") # Load normal model
s3db.configure("hrm_experience",
list_fields = [#"code",
"employment_type",
"activity_type",
"organisation_id",
"organisation",
"job_title_id",
"job_title",
"responsibilities",
"start_date",
"end_date",
"hours",
"location_id",
"supervisor_id",
"comments",
],
)
# Get the person's full name for header, and pe_id for
# context filtering
table = r.table
record = r.record
person_id = record.person_id
ptable = db.pr_person
person = db(ptable.id == person_id).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.pe_id,
limitby = (0, 1),
).first()
name = s3_fullname(person)
pe_id = person.pe_id
comments = table.organisation_id.represent(record.organisation_id)
if record.job_title_id:
comments = (SPAN("%s, " % \
s3_str(table.job_title_id.represent(record.job_title_id))),
comments)
# Configure widgets
contacts_widget = {"label": "Contacts",
"label_create": "Add Contact",
"tablename": "pr_contact",
"type": "datalist",
"filter": FS("pe_id") == pe_id,
"icon": "phone",
# Default renderer:
#"list_layout": s3db.pr_render_contact,
"orderby": "priority asc",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "contact",
}
address_widget = {"label": "Address",
"label_create": "Add Address",
"type": "datalist",
"tablename": "pr_address",
"filter": FS("pe_id") == pe_id,
"icon": "home",
# Default renderer:
#"list_layout": s3db.pr_render_address,
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "address",
}
skills_widget = {"label": "Skills",
"label_create": "Add Skill",
"type": "datalist",
"tablename": "hrm_competency",
"filter": FS("person_id") == person_id,
"icon": "comment-alt",
# Default renderer:
#"list_layout": hrm_competency_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "competency",
}
trainings_widget = {"label": "Trainings",
"label_create": "Add Training",
"type": "datalist",
"tablename": "hrm_training",
"filter": FS("person_id") == person_id,
"icon": "wrench",
# Default renderer:
#"list_layout": hrm_training_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "training",
}
experience_widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datalist",
"tablename": "hrm_experience",
"filter": FS("person_id") == person_id,
"icon": "truck",
# Default renderer:
#"list_layout": hrm_experience_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "experience",
}
docs_widget = {"label": "Documents",
"label_create": "Add Document",
"type": "datalist",
"tablename": "doc_document",
"filter": FS("doc_id") == record.doc_id,
"icon": "attachment",
# Default renderer:
#"list_layout": s3db.doc_document_list_layout,
}
profile_widgets = [contacts_widget,
address_widget,
skills_widget,
trainings_widget,
experience_widget,
docs_widget,
]
if settings.get_hrm_use_education():
education_widget = {"label": "Education",
"label_create": "Add Education",
"type": "datalist",
"tablename": "pr_education",
"filter": FS("person_id") == person_id,
"icon": "book",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "education",
}
profile_widgets.insert(-1, education_widget)
if deploy:
credentials_widget = {# @ToDo: deployment_setting for Labels
"label": "Sectors",
"label_create": "Add Sector",
"type": "datalist",
"tablename": "hrm_credential",
"filter": FS("person_id") == person_id,
"icon": "tags",
# Default renderer:
#"list_layout": hrm_credential_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "credential",
}
profile_widgets.insert(2, credentials_widget)
# Organizer-widget to record periods of unavailability:
#profile_widgets.append({"label": "Unavailability",
# "type": "organizer",
# "tablename": "deploy_unavailability",
# "master": "pr_person/%s" % person_id,
# "component": "unavailability",
# "icon": "calendar",
# "url": URL(c="deploy", f="person",
# args = [person_id, "unavailability"],
# ),
# })
if settings.get_hrm_unavailability():
unavailability_widget = {"label": "Unavailability",
"type": "organizer",
"tablename": "pr_unavailability",
"master": "pr_person/%s" % person_id,
"component": "unavailability",
"icon": "calendar",
"url": URL(c="pr", f="person",
args = [person_id, "unavailability"],
),
}
profile_widgets.insert(-1, unavailability_widget)
# Configure resource
s3db.configure("hrm_human_resource",
profile_cols = 1,
profile_header = DIV(A(s3_avatar_represent(person_id,
tablename = "pr_person",
_class = "media-object",
),
_class = "pull-left",
#_href = event_url,
),
H2(name),
P(comments),
_class = "profile-header",
),
profile_title = "%s : %s" % (
s3_str(s3.crud_strings["hrm_human_resource"].title_display),
s3_str(name),
),
profile_widgets = profile_widgets,
)
elif method == "summary":
# CRUD Strings
if deploy:
deploy_team = settings.get_deploy_team_label()
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("%(team)s Members") % {"team": T(deploy_team)}
else:
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("Staff & Volunteers")
# Filter Widgets
filter_widgets = hrm_human_resource_filters(resource_type = "both",
hrm_type_opts = s3db.hrm_type_opts)
# List Fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
]
# Report Options
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
"job_title_id",
(T("Training"), "training.course_id"),
]
rappend = report_fields.append
if settings.get_hrm_use_national_id():
list_fields.append((T("National ID"), "person_id$national_id.value"))
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and not vol:
list_fields.append("code")
if vol:
vol_active = settings.get_hrm_vol_active()
if vol_active:
list_fields.append((T("Active"), "details.active"))
rappend((T("Active"), "details.active"))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
list_fields.append((T("Program"), "person_id$hours.programme_id"))
rappend((T("Program"), "person_id$hours.programme_id"))
elif settings.get_hrm_staff_departments():
list_fields.extend(("department_id",
"site_id"))
report_fields.extend(("site_id",
"department_id"))
else:
list_fields.append("site_id")
rappend("site_id")
list_fields.extend(((T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
rappend("location_id$%s" % level)
if deploy:
rappend((T("Credential"), "credential.job_title_id"))
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
rappend((teams, "group_membership.group_id"))
if settings.get_org_regions():
rappend("organisation_id$organisation_region.region_id")
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
totals = True,
)
)
# Configure resource
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
list_fields = list_fields,
report_options = report_options,
)
# Remove controller filter
#s3.filter = None
#elif r.representation in ("geojson", "plain") or deploy:
# # No filter
# pass
#else:
# if vol:
# # Default to Volunteers
# type_filter = FS("type") == 2
# else:
# # Default to Staff
# type_filter = FS("type") == 1
# r.resource.add_filter(type_filter)
# Others
if r.interactive:
if method == "create" and not r.component:
if not settings.get_hrm_mix_staff():
# Need to either create a Staff or a Volunteer through separate forms
if vol:
c = "vol"
f = "volunteer"
else:
c = "hrm"
f = "staff"
redirect(URL(c=c, f=f,
args=r.args,
vars=r.vars))
elif method == "delete":
if deploy:
# Delete the Application, not the HR
atable = s3db.deploy_application
app = db(atable.human_resource_id == r.id).select(atable.id,
limitby = (0, 1),
).first()
if not app:
current.session.error = "Cannot find Application to delete!"
redirect(URL(args = "summary"))
redirect(URL(f="application",
args = [app.id, "delete"],
))
else:
# Don't redirect
pass
elif method == "profile":
# Don't redirect
pass
# Now done in s3merge
#elif method == "deduplicate":
# # Don't use AddPersonWidget here
# from gluon.sqlhtml import OptionsWidget
# field = r.table.person_id
# field.requires = IS_ONE_OF(db, "pr_person.id",
# label = field.represent)
# field.widget = OptionsWidget.widget
elif r.id:
# Redirect to person controller
if r.record.type == 2:
group = "volunteer"
else:
group = "staff"
if r.function == "trainee":
fn = "trainee_person"
else:
fn = "person"
redirect(URL(f = fn,
args = [method] if method else [],
vars = {"human_resource.id" : r.id,
"group" : group
},
))
elif r.representation == "xls" and not r.component:
hrm_xls_list_fields(r)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
if r.controller == "deploy":
# Application is deleted, not HR
deletable = True
# Open Profile page
read_url = URL(args = ["[id]", "profile"])
update_url = URL(args = ["[id]", "profile"])
else:
deletable = settings.get_hrm_deletable()
# Standard CRUD buttons
read_url = None
update_url = None
S3CRUD.action_buttons(r,
deletable = deletable,
read_url = read_url,
update_url = update_url)
if "msg" in settings.modules and \
settings.get_hrm_compose_button() and \
current.auth.permission.has_permission("update",
c="hrm",
f="compose"):
s3.actions.append({"url": URL(f="compose",
vars = {"human_resource.id": "[id]"},
),
"_class": "action-btn send",
"label": s3_str(T("Send Message"))
})
elif r.representation == "plain":
# Map Popups
output = hrm_map_popup(r)
return output
s3.postp = postp
return current.rest_controller("hrm", "human_resource")
# =============================================================================
def hrm_person_controller(**attr):
"""
Persons Controller, defined in the model for use from
multiple controllers for unified menus
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
T = current.T
db = current.db
s3db = current.s3db
#auth = current.auth
response = current.response
session = current.session
settings = current.deployment_settings
s3 = response.s3
configure = s3db.configure
set_method = s3db.set_method
# Custom Method(s) for Contacts
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
set_method("pr", "person",
method = "contacts",
action = s3db.pr_Contacts)
if "public" in contacts_tabs:
set_method("pr", "person",
method = "public_contacts",
action = s3db.pr_Contacts)
if "private" in contacts_tabs:
set_method("pr", "person",
method = "private_contacts",
action = s3db.pr_Contacts)
# Custom Method for CV
set_method("pr", "person",
method = "cv",
action = hrm_CV)
# Custom Method for Medical
set_method("pr", "person",
method = "medical",
action = hrm_Medical)
# Custom Method for HR Record
set_method("pr", "person",
method = "record",
action = hrm_Record)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person",
asset_asset = "assigned_to_id",
)
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
deletable = False,
editable = False,
insertable = False,
)
get_vars = current.request.get_vars
group = get_vars.get("group", "staff")
hr_id = get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
table = s3db.hrm_human_resource
table.type.default = 1
get_vars["xsltmode"] = "staff"
if hr_id:
hr = db(table.id == hr_id).select(table.type,
limitby = (0, 1),
).first()
if hr:
group = "volunteer" if hr.type == 2 else "staff"
# Also inform the back-end of this finding
get_vars["group"] = group
# Configure person table
table = db.pr_person
tablename = "pr_person"
configure(tablename,
deletable = False,
)
#mode = session.s3.hrm.mode
#if mode is not None:
# # Configure for personal mode
# s3.crud_strings[tablename].update(
# title_display = T("Personal Profile"),
# title_update = T("Personal Profile"))
# # People can view their own HR data, but not edit it
# # - over-ride in Template if need to make any elements editable
# configure("hrm_human_resource",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_certification",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_credential",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_competency",
# deletable = False,
# editable = False,
# insertable = True, # Can add unconfirmed
# )
# configure("hrm_training", # Can add but not provide grade
# deletable = False,
# editable = False,
# insertable = True,
# )
# configure("hrm_experience",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("pr_group_membership",
# deletable = False,
# editable = False,
# insertable = False,
# )
#else:
# Configure for HR manager mode
if settings.get_hrm_staff_label() == T("Contacts"):
s3.crud_strings[tablename].update(
title_upload = T("Import Contacts"),
title_display = T("Contact Details"),
title_update = T("Contact Details")
)
elif group == "volunteer":
s3.crud_strings[tablename].update(
title_upload = T("Import Volunteers"),
title_display = T("Volunteer Details"),
title_update = T("Volunteer Details")
)
else:
s3.crud_strings[tablename].update(
title_upload = T("Import Staff"),
title_display = T("Staff Member Details"),
title_update = T("Staff Member Details")
)
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the
organisation/branch before processing a new data import
"""
if s3.import_replace:
resource, tree = data
if tree is not None:
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3db.resource("hrm_human_resource", filter=query)
# Use cascade=True so that the deletion gets
# rolled back if the import fails:
resource.delete(format = "xml",
cascade = True,
)
s3.import_prep = import_prep
# CRUD pre-process
def prep(r):
# Filter to just those people with an active HR record
r.resource.add_filter(FS("human_resource.id") != None)
# Plug-in role matrix for Admins/OrgAdmins
S3PersonRoleManager.set_method(r, entity="pr_person")
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if r.representation == "s3json":
current.xml.show_ids = True
elif r.interactive and method != "import":
if not r.component:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 5-120
dob = table.date_of_birth
dob.widget = S3CalendarWidget(past_months = 1440,
future_months = -60,
)
person_details_table = s3db.pr_person_details
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
person_details_table.occupation.readable = person_details_table.occupation.writable = False
# Organisation Dependent Fields
# - deprecated (IFRC template only)
#set_org_dependent_field = settings.set_org_dependent_field
#set_org_dependent_field("pr_person", "middle_name")
#set_org_dependent_field("pr_person_details", "father_name")
#set_org_dependent_field("pr_person_details", "mother_name")
#set_org_dependent_field("pr_person_details", "grandfather_name")
#set_org_dependent_field("pr_person_details", "affiliations")
#set_org_dependent_field("pr_person_details", "company")
else:
component_name = r.component_name
if component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
elif component_name == "appraisal":
mission_id = r.get_vars.get("mission_id", None)
if mission_id:
hatable = r.component.table
# Lookup Code
mtable = s3db.deploy_mission
mission = db(mtable.id == mission_id).select(mtable.code,
limitby = (0, 1),
).first()
if mission:
hatable.code.default = mission.code
# Lookup Job Title
atable = db.deploy_assignment
htable = db.hrm_human_resource
query = (atable.mission_id == mission_id) & \
(atable.human_resource_id == htable.id) & \
(htable.person_id == r.id)
assignment = db(query).select(atable.job_title_id,
limitby = (0, 1),
).first()
if assignment:
hatable.job_title_id.default = assignment.job_title_id
elif component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False,
)
elif component_name == "group_membership":
hrm_configure_pr_group_membership()
elif component_name == "image":
if r.method == "create":
# Coming from the rheader...simplify UI
table = s3db.pr_image
f = table.profile
f.default = True
f.readable = f.writable = False
table.image.comment = None
table.type.readable = table.type.writable = False
table.url.readable = table.url.writable = False
table.description.readable = table.description.writable = False
elif component_name == "salary":
hrm_configure_salary(r)
elif component_name == "user":
r.component.configure(deletable = False)
current.auth.configure_user_fields()
utable = db.auth_user
# Don't allow password changes here (doesn't require old password)
utable.password.readable = utable.password.writable = False
# User cannot amend their own Org/Site/Link
f = utable.organisation_id
f.writable = False
f.comment = None
f = utable.site_id
f.writable = False
f.comment = None
f = utable.link_user_to
f.writable = False
f.comment = None
def auth_user_onaccept(form):
language = form.vars.get("language")
if language:
T.force(language)
session.s3.language = language
s3db.configure("auth_user",
onaccept = auth_user_onaccept
)
if method == "record" or r.component_name == "human_resource":
table = s3db.hrm_human_resource
table.person_id.writable = table.person_id.readable = False
table.site_id.readable = table.site_id.writable = True
#org = session.s3.hrm.org
#f = table.organisation_id
#if org is None:
# f.widget = None
# f.writable = False
#else:
# f.default = org
# f.readable = f.writable = False
# table.site_id.requires = IS_EMPTY_OR(
# IS_ONE_OF(db,
# "org_site.%s" % s3db.super_key(db.org_site),
# s3db.org_site_represent,
# filterby="organisation_id",
# filter_opts=(session.s3.hrm.org,),
# ))
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
resource = r.resource
#if mode is not None:
# resource.build_query(id=auth.s3_logged_in_person())
if method not in ("deduplicate", "search_ac"):
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="staff"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="staff"))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False,
)
elif r.representation == "aadata":
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href = URL(c="asset", f="asset"),
_id = "add-btn",
_class = "action-btn",
)
return output
s3.postp = postp
# REST Interface
#orgname = session.s3.hrm.orgname
_attr = {"csv_stylesheet": ("hrm", "person.xsl"),
"csv_template": "staff",
"csv_extra_fields": [{"label": "Type",
"field": s3db.hrm_human_resource.type,
},
],
# Better in the native person controller (but this isn't always accessible):
#"deduplicate": "",
#"orgname": orgname,
"replace_option": T("Remove existing data before import"),
"rheader": hrm_rheader,
}
_attr.update(attr)
return current.rest_controller("pr", "person", **_attr)
# =============================================================================
def hrm_training_controller():
"""
Training Controller, defined in the model for use from
multiple controllers for unified menus
- used for Searching for Participants
- used for Adding/Editing on Profile page
"""
s3db = current.s3db
def prep(r):
method = r.method
if r.interactive or r.representation == "aadata":
s3db.configure("hrm_training",
#insertable = False,
listadd = False,
)
if method in ("create", "update"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = s3db.hrm_training.person_id
field.default = person_id
field.readable = field.writable = False
# @ToDo: Complete
#elif method == "import":
# # Allow course to be populated onaccept from training_event_id
# table = s3db.hrm_training
# s3db.configure("hrm_training",
# onvalidation = hrm_training_onvalidation,
# )
# table.course_id.requires = IS_EMPTY_OR(table.course_id.requires)
# f = table.training_event_id
# training_event_id = r.get_vars.get("~.training_event_id", None)
# if training_event_id:
# f.default = training_event_id
# else:
# f.writable = True
if method == "report":
# Configure virtual fields for reports
s3db.configure("hrm_training", extra_fields=["date"])
table = s3db.hrm_training
table.year = Field.Method("year", hrm_training_year)
table.month = Field.Method("month", hrm_training_month)
# Can't reliably link to persons as these are imported in random order
# - do this postimport if desired (see RMS)
#elif method == "import":
# # If users accounts are created for imported participants
# s3db.configure("auth_user",
# create_onaccept = lambda form: current.auth.s3_approve_user(form.vars),
# )
return True
current.response.s3.prep = prep
return current.rest_controller("hrm", "training",
csv_stylesheet = ("hrm", "training.xsl"),
csv_template = ("hrm", "training"),
csv_extra_fields = [{"label": "Training Event",
"field": s3db.hrm_training.training_event_id,
},
],
)
# =============================================================================
def hrm_training_event_controller():
"""
Training Event Controller, defined in the model for use from
multiple controllers for unified menus
"""
s3 = current.response.s3
def prep(r):
if r.component_name == "target":
tablename = "dc_target"
# Simplify
table = r.component.table
table.location_id.readable = table.location_id.writable = False
#table.organisation_id.readable = table.organisation_id.writable = False
#table.comments.readable = table.comments.writable = False
# CRUD strings
T = current.T
label = current.deployment_settings.get_dc_response_label()
if label == "Survey":
#label = T("Survey")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Survey"),
title_display = T("Survey Details"),
title_list = T("Surveys"),
title_update = T("Edit Survey"),
title_upload = T("Import Surveys"),
label_list_button = T("List Surveys"),
label_delete_button = T("Delete Survey"),
msg_record_created = T("Survey added"),
msg_record_modified = T("Survey updated"),
msg_record_deleted = T("Survey deleted"),
msg_list_empty = T("No Surveys currently registered"))
else:
#label = T("Assessment")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Assessment"),
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
title_upload = T("Import Assessments"),
label_list_button = T("List Assessments"),
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"))
# Open in native controller
current.s3db.configure(tablename,
linkto = lambda record_id: \
URL(c="dc", f="target",
args = [record_id, "read"],
),
linkto_update = lambda record_id: \
URL(c="dc", f="target",
args = [record_id, "update"],
),
)
elif r.component_name == "participant" and \
(r.interactive or \
r.representation in ("aadata", "pdf", "xls")):
# Use appropriate CRUD strings
T = current.T
s3.crud_strings["hrm_training"] = Storage(
label_create = T("Add Participant"),
title_display = T("Participant Details"),
title_list = T("Participants"),
title_update = T("Edit Participant"),
title_upload = T("Import Participants"),
label_list_button = T("List Participants"),
label_delete_button = T("Remove Participant"),
msg_record_created = T("Participant added"),
msg_record_modified = T("Participant updated"),
msg_record_deleted = T("Participant removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Participants registered"))
# Hide/default fields which get populated from the Event
record = r.record
s3db = current.s3db
table = s3db.hrm_training
field = table.course_id
field.readable = False
field.writable = False
field.default = record.course_id
field = table.date
field.readable = False
field.writable = False
field.default = record.start_date
field = table.hours
field.readable = False
field.writable = False
field.default = record.hours
# Suitable list_fields
settings = current.deployment_settings
list_fields = ["person_id",
]
if settings.get_hrm_use_job_titles():
list_fields.append((T("Job Title"), "job_title")) # Field.Method
list_fields += [(settings.get_hrm_organisation_label(), "organisation"), # Field.Method
"grade",
]
if settings.get_hrm_course_pass_marks():
list_fields.append("grade_details")
if settings.get_hrm_use_certificates():
list_fields.append("certification_from_training.number")
s3db.configure("hrm_training",
list_fields = list_fields
)
return True
s3.prep = prep
#def postp(r, output):
# if r.interactive:
# # @ToDo: Restore once the other part is working
# if r.component_name == "participant" and \
# isinstance(output, dict):
# showadd_btn = output.get("showadd_btn", None)
# if showadd_btn:
# # Add an Import button
# if s3.crud.formstyle == "bootstrap":
# _class = "s3_modal"
# else:
# _class = "action-btn s3_modal"
# import_btn = S3CRUD.crud_button(label = current.T("Import Participants"),
# _class = _class,
# _href = URL(f="training", args="import.popup",
# vars={"~.training_event_id":r.id}),
# )
# output["showadd_btn"] = TAG[""](showadd_btn, import_btn)
# return output
#s3.postp = postp
return current.rest_controller("hrm", "training_event",
rheader = hrm_rheader,
)
# =============================================================================
def hrm_xls_list_fields(r, staff=True, vol=True):
"""
Configure Human Resource list_fields for XLS Export
- match the XLS Import
- no l10n if column labels
- simple represents
"""
s3db = current.s3db
settings = current.deployment_settings
table = r.table
table.organisation_id.represent = s3db.org_OrganisationRepresent(acronym = False,
parent = False)
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
current.messages["NONE"] = "" # Don't want to see "-"
ptable = s3db.pr_person
ptable.middle_name.represent = lambda v: v or ""
ptable.last_name.represent = lambda v: v or ""
list_fields = [("First Name", "person_id$first_name"),
("Middle Name", "person_id$middle_name"),
("Last Name", "person_id$last_name"),
]
if staff and vol:
list_fields.insert(0, ("Type", "type"))
if settings.get_hrm_use_code():
list_fields.append(("Staff ID", "code"))
list_fields.append(("Sex", "person_id$gender"))
#if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
# @ToDo: Smart Handling for emptying the Root if org == root
# @ToDo: Smart Handling for when we have Sub-Branches
list_fields += [(settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"), # Not imported
("Organisation", "organisation_id"),
]
else:
list_fields.append(("Organisation", "organisation_id"))
if (staff and settings.get_hrm_use_job_titles()) or \
(vol and settings.get_hrm_vol_roles()):
table.job_title_id.represent = S3Represent("hrm_job_title", translate=True) # Need to reinitialise to get the new value for NONE
list_fields.append(("Job Title", "job_title_id"))
if (staff and settings.get_hrm_staff_departments()) or \
(vol and settings.get_hrm_vol_departments()):
table.department_id.represent = S3Represent("hrm_department") # Need to reinitialise to get the new value for NONE
list_fields.append(("Department", "department_id"))
if staff or ("site_id" in settings.get_hrm_location_vol()):
list_fields += [("Office", "site_id"),
("Facility Type", "site_id$instance_type"),
]
list_fields += [("Email", "email.value"),
("Mobile Phone", "phone.value"),
("DOB", "person_id$date_of_birth"),
("Start Date", "start_date"),
("End Date", "end_date"), # Not reimported
("Status", "status"),
("Essential", "essential"), # Not reimported
]
gtable = s3db.gis_location
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
gtable[level].represent = lambda v: v or ""
if level == "L0":
list_fields.append(("Home Country", "home_address.location_id$%s" % level))
else:
list_fields.append(("Home %s" % level, "home_address.location_id$%s" % level))
gtable.addr_street.represent = lambda v: v or ""
list_fields.append(("Home Address", "home_address.location_id$addr_street"))
if settings.get_gis_postcode_selector():
gtable.addr_postcode.represent = lambda v: v or ""
list_fields.append(("Home Postcode", "home_address.location_id$addr_postcode"))
if settings.get_hrm_use_trainings():
s3db.hrm_training.course_id.represent = S3Represent("hrm_course", translate=True) # Need to reinitialise to get the new value for NONE
list_fields.append(("Trainings", "person_id$training.course_id"))
if settings.get_hrm_use_certificates():
# @ToDo: Make Importable
s3db.hrm_certification.certificate_id.represent = S3Represent("hrm_certificate") # Need to reinitialise to get the new value for NONE
list_fields.append(("Certificates", "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
s3db.hrm_competency.skill_id.represent = S3Represent("hrm_skill") # Need to reinitialise to get the new value for NONE
list_fields.append(("Skills", "person_id$competency.skill_id"))
if settings.get_hrm_use_education():
etable = s3db.pr_education
etable.level_id.represent = S3Represent("pr_education_level") # Need to reinitialise to get the new value for NONE
etable.award.represent = lambda v: v or ""
etable.major.represent = lambda v: v or ""
etable.grade.represent = lambda v: v or ""
etable.year.represent = lambda v: v or ""
etable.institute.represent = lambda v: v or ""
list_fields.extend((("Education Level", "person_id$education.level_id"),
("Degree Name", "person_id$education.award"),
("Major", "person_id$education.major"),
("Grade", "person_id$education.grade"),
("Year", "person_id$education.year"),
("Institute", "person_id$education.institute"),
))
if vol:
if settings.get_hrm_vol_active():
list_fields.append(("Active", "details.active"))
if settings.get_hrm_vol_experience() in ("programme", "both"):
# @ToDo: Make Importable
s3db.hrm_programme_hours.programme_id.represent = S3Represent("hrm_programme") # Need to reinitialise to get the new value for NONE
list_fields.append(("Programs", "person_id$hours.programme_id"))
if settings.get_hrm_use_awards():
list_fields.append(("Awards", "person_id$award.award_id"))
list_fields.append(("Comments", "comments"))
r.resource.configure(list_fields = list_fields)
return list_fields
# =============================================================================
class hrm_CV(S3Method):
"""
Curriculum Vitae, custom profile page with multiple DataTables:
* Awards
* Education
* Experience
* Training
* Skills
"""
def __init__(self, form=None):
"""
Constructor
@param form: widget config to inject at the top of the CV,
or a callable to produce such a widget config
"""
self.form = form
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name == "person" and \
r.id and \
not r.component and \
r.representation in ("html", "aadata"):
T = current.T
s3db = current.s3db
get_config = s3db.get_config
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
controller = "vol"
vol = True
elif r.controller == "deploy":
controller = "deploy"
vol = False
elif r.controller == "member":
controller = "member"
vol = False
else:
controller = "hrm"
vol = False
def dt_row_actions(component, tablename):
def row_actions(r, list_id):
editable = get_config(tablename, "editable")
if editable is None:
editable = True
deletable = get_config(tablename, "deletable")
if deletable is None:
deletable = True
if editable:
# HR Manager
actions = [{"label": T("Open"),
"url": r.url(component = component,
component_id = "[id]",
method = "update.popup",
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
]
else:
# Typically the User's personal profile
actions = [{"label": T("Open"),
"url": r.url(component = component,
component_id = "[id]",
method = "read.popup",
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
]
if deletable:
actions.append({"label": T("Delete"),
"_ajaxurl": r.url(component = component,
component_id = "[id]",
method = "delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
})
return actions
return row_actions
profile_widgets = []
form = self.form
if form:
if callable(form):
form = form(r)
if form is not None:
profile_widgets.append(form)
if vol and settings.get_hrm_use_awards():
tablename = "vol_volunteer_award"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Awards",
#"label_create": "Add Award",
"type": "datatable",
"actions": dt_row_actions("award", tablename),
"tablename": tablename,
"context": "person",
"create_controller": "vol",
"create_function": "person",
"create_component": "award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_education():
tablename = "pr_education"
widget = {"label": "Education",
"label_create": "Add Education",
"type": "datatable",
"actions": dt_row_actions("education", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "education",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if vol:
vol_experience = settings.get_hrm_vol_experience()
experience = vol_experience in ("both", "experience")
missions = None
else:
staff_experience = settings.get_hrm_staff_experience()
experience = staff_experience in ("both", "experience")
missions = staff_experience in ("both", "missions")
if experience:
tablename = "hrm_experience"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Experience",
#"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") == None,
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
# Settings suitable for RMS
"list_fields": ["start_date",
"end_date",
"employment_type",
"organisation",
"job_title",
],
}
profile_widgets.append(widget)
if missions:
tablename = "hrm_experience"
widget = {"label": "Missions",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") != None,
"insert": False,
"pagesize": None, # all records
# Settings suitable for RMS
"list_fields": ["start_date",
"end_date",
"location_id",
#"organisation_id",
"job_title_id",
"job_title",
],
}
profile_widgets.append(widget)
if settings.get_hrm_use_trainings():
tablename = "hrm_training"
if settings.get_hrm_trainings_external():
widget = {"label": "Internal Training",
"label_create": "Add Internal Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == False,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
widget = {"label": "External Training",
"label_create": "Add External Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == True,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
else:
widget = {"label": "Training",
"label_create": "Add Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_skills():
tablename = "hrm_competency"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": label,
#"label_create": "Add Skill",
"type": "datatable",
"actions": dt_row_actions("competency", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "competency",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_certificates():
tablename = "hrm_certification"
widget = {"label": "Certificates",
"label_create": "Add Certificate",
"type": "datatable",
"actions": dt_row_actions("certification", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "certification",
"pagesize": None, # all records
}
profile_widgets.append(widget)
# Person isn't a doc_id
#if settings.has_module("doc"):
# tablename = "doc_document"
# widget = {"label": "Documents",
# "label_create": "Add Document",
# "type": "datatable",
# "actions": dt_row_actions("document", tablename),
# "tablename": tablename,
# "filter": FS("doc_id") == record.doc_id,
# "icon": "attachment",
# "create_controller": controller,
# "create_function": "person",
# "create_component": "document",
# "pagesize": None, # all records
# }
# profile_widgets.append(widget)
if r.representation == "html":
response = current.response
# Maintain normal rheader for consistency
rheader = attr["rheader"]
profile_header = TAG[""](H2(response.s3.crud_strings["pr_person"].title_display),
DIV(rheader(r),
_id = "rheader",
),
)
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if r.representation == "html":
output["title"] = response.title = T("CV")
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class hrm_Medical(S3Method):
"""
HR Medical Tab, custom profile page with multiple elements:
* Physical Description
* Insurance
NB It is expected to create S3SQLCustomForm for these in
customise_hrm_insurance_resource
customise_pr_physical_description_resource
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name != "person" or not r.id or r.component:
r.error(405, current.ERROR.BAD_METHOD)
representation = r.representation
if representation not in ("html", "aadata"):
r.error(405, current.ERROR.BAD_METHOD)
T = current.T
s3db = current.s3db
response = current.response
s3 = response.s3
crud_strings = s3.crud_strings
tablename = r.tablename
# Redefine as non-multiple
s3db.add_components("hrm_human_resource",
hrm_insurance = {"joinby": "human_resource_id",
"multiple": False,
},
)
r.customise_resource("hrm_insurance")
r.customise_resource("pr_physical_description")
profile_widgets = [
{"label": "",
"type": "form",
#"tablename": "pr_physical_description",
#"context": "person",
#"filter": FS("pe_id") == r.record.pe_id,
"tablename": "pr_person",
"context": ("id", "id"),
"sqlform": S3SQLCustomForm("physical_description.blood_type",
"physical_description.medical_conditions",
"physical_description.medication",
"physical_description.diseases",
"physical_description.allergic",
"physical_description.allergies",
),
},
{"label": T("Medical Coverage"),
"type": "form",
"tablename": "hrm_human_resource",
"context": "person",
"sqlform": S3SQLCustomForm("insurance.insurance_number",
"insurance.phone",
"insurance.insurer",
),
},
]
if representation == "html":
# Maintain normal rheader for consistency
title = crud_strings["pr_person"].title_display
PROFILE = "profile" in r.get_vars
profile_header = TAG[""](H2(title),
DIV(hrm_rheader(r, profile=PROFILE),
_id = "rheader",
))
s3.jquery_ready.append('''S3.showHidden('%s',%s,'%s')''' % \
("allergic", json.dumps(["allergies"], separators=SEPARATORS), "pr_person_sub_physical_description"))
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if representation == "html":
output["title"] = response.title = title
return output
# =============================================================================
class hrm_Record(S3Method):
"""
HR Record, custom profile page with multiple DataTables:
* Human Resource
* Hours (for volunteers)
* Teams
"""
def __init__(self,
salary = False,
awards = False,
disciplinary_record = False,
org_experience = False,
other_experience = False
):
"""
Constructor
@param salary: show a Salary widget
@param awards: show an Awards History widget
@param disciplinary_record: show a Disciplinary Record widget
@param org_experience: show widget with Professional Experience
within registered organisations, can be a
dict with overrides for widget defaults
@param other_experience: show widget with Other Experience, can
be a dict with overrides for widget defaults
"""
self.salary = salary
self.awards = awards
self.disciplinary_record = disciplinary_record
self.org_experience = org_experience
self.other_experience = other_experience
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name != "person" or not r.id or r.component:
r.error(405, current.ERROR.BAD_METHOD)
representation = r.representation
if representation not in ("html", "aadata"):
r.error(405, current.ERROR.BAD_METHOD)
r.customise_resource("hrm_human_resource")
T = current.T
s3db = current.s3db
response = current.response
crud_strings = response.s3.crud_strings
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
VOL = True
controller = "vol"
else:
VOL = r.get_vars["group"] == "volunteer"
controller = "hrm"
# @ToDo: Check editable/deletable config if-necessary (see hrm_CV)
def dt_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": r.url(component = component,
component_id = "[id]",
method = "update.popup",
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": r.url(component = component,
component_id = "[id]",
method = "delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
table = s3db.hrm_human_resource
label = settings.get_hrm_record_label()
code = table.code
if VOL:
widget_filter = FS("type") == 2
if settings.get_hrm_use_code() is True:
code.readable = code.writable = True
#elif controller = "hrm":
else:
#widget_filter = FS("type") == 1
widget_filter = None
if settings.get_hrm_use_code():
code.readable = code.writable = True
profile_widgets = [
{"label": label,
"type": "form",
"tablename": "hrm_human_resource",
"context": "person",
"filter": widget_filter,
},
]
if VOL:
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
ctablename = "hrm_programme_hours"
# Exclude records which are just to link to Programme
filter_ = (FS("hours") != None)
list_fields = ["id",
"date",
]
phtable = s3db.hrm_programme_hours
r.customise_resource(ctablename)
if phtable.programme_id.readable:
list_fields.append("programme_id")
# Exclude Training Hours
filter_ &= (FS("programme_id") != None)
if phtable.place.readable:
# RMS
list_fields += ["place",
"event",
]
if phtable.job_title_id.readable:
list_fields.append("job_title_id")
list_fields.append("hours")
crud_strings_ = crud_strings[ctablename]
hours_widget = {"label": crud_strings_["title_list"],
"label_create": crud_strings_["label_create"],
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": ctablename,
"context": "person",
"filter": filter_,
"list_fields": list_fields,
"create_controller": controller,
"create_function": "person",
"create_component": "hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
elif vol_experience == "activity":
# Exclude records which are just to link to Activity & also Training Hours
#filter_ = (FS("hours") != None) & \
# (FS("activity_id") != None)
list_fields = ["id",
"date",
"activity_id",
"job_title_id",
"hours",
]
#if s3db.vol_activity_hours.job_title_id.readable:
# list_fields.append("job_title_id")
#list_fields.append("hours")
hours_widget = {"label": "Activity Hours",
# Don't Add Hours here since the Activity List will be very hard to find the right one in
"insert": False,
#"label_create": "Add Activity Hours",
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": "vol_activity_hours",
"context": "person",
#"filter": filter_,
"list_fields": list_fields,
#"create_controller": controller,
#"create_function": "person",
#"create_component": "activity_hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
teams = settings.get_hrm_teams()
if teams:
hrm_configure_pr_group_membership()
if teams == "Teams":
label_create = "Add Team"
elif teams == "Groups":
label_create = "Add Group"
teams_widget = {"label": teams,
"label_create": label_create,
"type": "datatable",
"actions": dt_row_actions("group_membership"),
"tablename": "pr_group_membership",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "group_membership",
"pagesize": None, # all records
}
profile_widgets.append(teams_widget)
if controller == "hrm":
org_experience = self.org_experience
if org_experience:
# Use primary hrm/experience controller
# (=> defaults to staff-style experience form)
# Need different action URLs
def experience_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": URL(f="experience",
args = ["[id]", "update.popup"],
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": URL(f="experience",
args = ["[id]", "delete.json"],
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
# Configure widget, apply overrides
widget = {"label": T("Experience"),
"label_create": T("Add Experience"),
"type": "datatable",
"actions": experience_row_actions("experience"),
"tablename": "hrm_experience",
"pagesize": None, # all records
}
if isinstance(org_experience, dict):
widget.update(org_experience)
# Retain the person filter
person_filter = FS("person_id") == r.id
widget_filter = widget.get("filter")
if widget_filter:
widget["filter"] = person_filter & widget_filter
else:
widget["filter"] = person_filter
profile_widgets.append(widget)
other_experience = self.other_experience
if other_experience:
# Use experience component in hrm/person controller
# (=> defaults to vol-style experience form)
# Configure widget and apply overrides
widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience"),
"tablename": "hrm_experience",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
}
if isinstance(other_experience, dict):
widget.update(other_experience)
profile_widgets.append(widget)
if self.awards:
widget = {"label": T("Awards"),
"label_create": T("Add Award"),
"type": "datatable",
"actions": dt_row_actions("staff_award"),
"tablename": "hrm_award",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "staff_award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.disciplinary_record:
widget = {"label": T("Disciplinary Record"),
"label_create": T("Add Disciplinary Action"),
"type": "datatable",
"actions": dt_row_actions("disciplinary_action"),
"tablename": "hrm_disciplinary_action",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "disciplinary_action",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.salary:
widget = {"label": T("Salary"),
"label_create": T("Add Salary"),
"type": "datatable",
"actions": dt_row_actions("salary"),
"tablename": "hrm_salary",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "salary",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if representation == "html":
# Maintain normal rheader for consistency
title = crud_strings["pr_person"].title_display
PROFILE = "profile" in r.get_vars
profile_header = TAG[""](H2(title),
DIV(hrm_rheader(r, profile=PROFILE),
_id = "rheader",
))
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if representation == "html":
output["title"] = response.title = title
return output
# =============================================================================
def hrm_configure_salary(r):
"""
Configure the salary tab
@param r: the S3Request
"""
hr_id = None
multiple = False
# Get all accessible HR records of this person
resource = r.resource
rows = resource.select(["human_resource.id",
"human_resource.type",
], as_rows=True)
# Only staff records, of course
rows = [row for row in rows if row["hrm_human_resource.type"] == 1]
HR_ID = "hrm_human_resource.id"
if len(rows) == 1:
hr_id = rows[0][HR_ID]
multiple = False
else:
hr_id = [row[HR_ID] for row in rows]
multiple = True
component = r.component
ctable = component.table
field = ctable.human_resource_id
list_fields = [fs for fs in component.list_fields() if fs != "person_id"]
if multiple or not hr_id:
# Default to the staff record selected in URL
default_hr_id = hr_id
if "human_resource.id" in r.get_vars:
try:
default_hr_id = int(r.get_vars["human_resource.id"])
except ValueError:
pass
if default_hr_id in hr_id:
field.default = default_hr_id
# Filter field options
field.requires = IS_ONE_OF(current.db, "hrm_human_resource.id",
current.s3db.hrm_human_resource_represent,
sort = True,
filterby = "id",
filter_opts = hr_id,
)
# Show the list_field
if "human_resource_id" not in list_fields:
list_fields.insert(1, "human_resource_id")
else:
# Only one HR record => set as default and make read-only
field.default = hr_id
field.writable = False
# Hiding the field can be confusing if there are mixed single/multi HR
#field.readable = False
# Hide the list field
if "human_resource_id" in list_fields:
list_fields.remove("human_resource_id")
component.configure(list_fields = list_fields)
# =============================================================================
def hrm_configure_pr_group_membership():
"""
Configures the labels and CRUD Strings of pr_group_membership
"""
T = current.T
s3db = current.s3db
settings = current.deployment_settings
request = current.request
function = request.function
tablename = "pr_group_membership"
table = s3db.pr_group_membership
if settings.get_hrm_teams() == "Teams":
table.group_id.label = T("Team Name")
table.group_head.label = T("Team Leader")
if function == "person":
ADD_MEMBERSHIP = T("Add Membership")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = T("Memberships"),
title_update = T("Edit Membership"),
label_list_button = T("List Memberships"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Removed from Team"),
msg_list_empty = T("Not yet a Member of any Team"))
elif function in ("group", "group_membership"):
ADD_MEMBER = T("Add Team Member")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBER,
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
label_list_button = T("List Members"),
label_delete_button = T("Remove Person from Team"),
msg_record_created = T("Person added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Person removed from Team"),
msg_list_empty = T("This Team has no Members yet"))
else:
table.group_head.label = T("Group Leader")
if function in ("group", "group_membership"):
# Don't create Persons here as they need to be HRMs
table.person_id.comment = None
phone_label = settings.get_ui_label_mobile_phone()
site_label = settings.get_org_site_label()
list_fields = ["person_id",
"group_head",
(T("Email"), "person_id$email.value"),
(phone_label, "person_id$phone.value"),
(current.messages.ORGANISATION,
"person_id$human_resource.organisation_id"),
(site_label, "person_id$human_resource.site_id"),
]
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
else:
# Person
list_fields = ["group_id",
"group_head",
"group_id$description",
]
orderby = table.group_id
s3db.configure(tablename,
list_fields = list_fields,
orderby = orderby,
)
# =============================================================================
def hrm_competency_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Skills on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_competency.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_competency.skill_id"]
organisation = raw["hrm_competency.organisation_id"] or ""
if organisation:
#org_url = URL(c="org", f="organisation",
# args = [organisation, "profile"],
# )
org_url = URL(c="org", f="organisation",
args = [organisation],
)
organisation = P(ICON("organisation"),
" ",
SPAN(A(record["hrm_competency.organisation_id"],
_href = org_url,
)
),
" ",
_class = "card_1_line",
)
competency = raw["hrm_competency.competency_id"] or ""
if competency:
competency = P(ICON("certificate"),
" ",
SPAN(record["hrm_competency.competency_id"]),
" ",
_class = "card_1_line",
)
comments = raw["hrm_competency.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_competency
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="competency",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.T("Edit Skill"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(organisation,
competency,
P(SPAN(comments),
" ",
_class = "card_manylines",
),
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_credential_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Credentials on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_credential.id"]
item_class = "thumbnail"
raw = record["_row"]
start_date = raw["hrm_credential.start_date"]
end_date = raw["hrm_credential.end_date"]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record["hrm_credential.start_date"],
record["hrm_credential.end_date"],
)
elif start_date:
dates = "%s - " % record["hrm_credential.start_date"]
else:
dates = " - %s" % record["hrm_credential.end_date"]
date = P(ICON("calendar"),
" ",
SPAN(dates),
" ",
_class = "card_1_line",
)
else:
date = ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_credential
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="credential",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.response.s3.crud_strings["hrm_credential"].title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % record["hrm_credential.job_title_id"],
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(date,
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_experience_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Experience on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_experience.id"]
item_class = "thumbnail"
raw = record._row
card_line = lambda icon, item: P(ICON(icon),
SPAN(item),
_class = "card_1_line",
)
# Organisation
colname = "hrm_experience.organisation_id"
organisation_id = raw[colname]
if organisation_id:
org_url = URL(c="org", f="organisation",
args = [organisation_id],
)
organisation = A(record[colname],
_href = org_url,
)
else:
# Try free-text field
organisation = raw["hrm_experience.organisation"]
if organisation:
organisation = card_line("organisation", organisation)
else:
organisation = ""
# Activity Type
colname = "hrm_experience.activity_type"
activity_type = raw[colname]
if activity_type:
activity_type = card_line("activity", record[colname])
else:
activity_type = ""
# Key Responsibilities
colname = "hrm_experience.responsibilities"
responsibilities = raw[colname]
if responsibilities:
responsibilities = card_line("responsibility", record[colname])
else:
responsibilities = ""
# Location
colname = "hrm_experience.location_id"
location_id = raw[colname]
if location_id:
#location_url = URL(c="gis", f="location",
# args = [location_id, "profile"],
# )
location_url = URL(c="gis", f="location",
args = [location_id],
)
location = card_line("location",
A(record[colname],
_href = location_url,
))
else:
location = ""
# Hours
hours = raw["hrm_experience.hours"]
if hours:
hours = card_line("time", hours)
else:
hours = ""
# Start and End Dates
colname_start = "hrm_experience.start_date"
colname_end = "hrm_experience.end_date"
start_date = raw[colname_start]
end_date = raw[colname_end]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record[colname_start],
record[colname_end],
)
elif start_date:
dates = "%s - " % record[colname_start]
else:
dates = " - %s" % record[colname_end]
date = card_line("calendar", dates)
else:
date = ""
# Supervisor
colname = "hrm_experience.supervisor_id"
supervisor_id = raw[colname]
if supervisor_id:
#person_url = URL(c="hrm", f="person",
# args = [supervisor_id, "profile"],
# )
person_url = URL(c="hrm", f="person",
args = [supervisor_id],
)
supervisor = card_line("user",
A(record[colname],
_href = person_url,
))
else:
supervisor = ""
# Comments
comments = raw["hrm_experience.comments"] or ""
# Job title as card title, indicate employment type if given
colname = "hrm_experience.job_title_id"
if raw[colname]:
title = record[colname]
job_title = card_line("star", title)
else:
title = ""
job_title = ""
position = raw["hrm_experience.job_title"]
if position:
title = position
else:
job_title = ""
colname = "hrm_experience.employment_type"
if raw[colname]:
employment_type = record[colname]
if title:
title = "%s (%s)" % (title, employment_type)
else:
title = employment_type
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_experience
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="experience",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.T("Edit Experience"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(title,
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(organisation,
location,
date,
hours,
supervisor,
activity_type,
job_title,
responsibilities,
P(SPAN(comments),
" ",
_class = "card_manylines",
),
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_training_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Trainings on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_training.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_training.course_id"]
date = raw["hrm_training.date"] or ""
if date:
date = P(ICON("calendar"),
" ",
SPAN(record["hrm_training.date"]),
" ",
_class="card_1_line",
)
grade = raw["hrm_training.grade"] or ""
if grade:
grade = P(ICON("certificate"),
" ",
SPAN(record["hrm_training.grade"]),
" ",
_class="card_1_line",
)
hours = raw["hrm_training.hours"] or ""
if hours:
hours = P(ICON("time"),
" ",
SPAN(hours),
" ",
_class="card_1_line",
)
site = raw["hrm_training_event.site_id"] or ""
if site:
#site_id = raw["hrm_training_event.site_id"]
#site_url = URL(c="org", f="site", args=[site_id, "profile"])
site_url = "#"
site = P(ICON("site"),
" ",
SPAN(A(record["hrm_training_event.site_id"],
_href = site_url,
)
),
" ",
_class="card_1_line",
)
job_title = raw["hrm_course_job_title.job_title_id"] or ""
if job_title:
job_title = P(ICON("star"),
" ",
SPAN(record["hrm_course_job_title.job_title_id"],
),
" ",
_class="card_1_line",
)
else:
job_title = ""
comments = raw["hrm_training.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_training
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="training",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.T("Edit Training"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(job_title,
site,
date,
hours,
grade,
P(SPAN(comments),
" ",
_class = "card_manylines",
),
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_human_resource_filters(resource_type = None,
module = None,
hrm_type_opts = None):
"""
Get filter widgets for human resources
@param resource_type: the HR type (staff/volunteer/both) if
pre-determined, otherwise None to render a
filter widget
@param module: the controller prefix of the request to render
module-specific widgets, defaults to
current.request.controller
"""
T = current.T
settings = current.deployment_settings
if not module:
module = current.request.controller
text_search_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$email.value",
#"organisation_id",
]
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and resource_type != "volunteer":
text_search_fields.append("code")
if settings.get_hrm_use_national_id():
text_search_fields.append("person_id$national_id.value")
filter_widgets = [S3TextFilter(text_search_fields,
label = T("Search"),
),
]
append_filter = filter_widgets.append
if module == "deploy" and current.auth.s3_has_role("ADMIN"):
dotable = current.s3db.deploy_organisation
deploying_orgs = current.db(dotable.deleted == False).count()
if deploying_orgs > 1:
append_filter(S3OptionsFilter("application.organisation_id",
label = T("Deployment Team"),
))
# Type filter (only if not pre-filtered)
if not resource_type in ("staff", "volunteer"):
append_filter(S3OptionsFilter("type",
label = T("Type"),
options = hrm_type_opts,
cols = 2,
hidden = True,
))
# Region filter (only if using regions in template)
if settings.get_org_regions():
if settings.get_org_regions_hierarchical():
if module == "deploy":
hidden = False
else:
hidden = True
append_filter(S3HierarchyFilter("organisation_id$organisation_region.region_id",
label = T("Region"),
hidden = hidden,
))
else:
append_filter(S3OptionsFilter("organisation_id$organisation_region.region_id",
label = T("Region"),
hidden = True,
))
# Organisation filter
if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
append_filter(S3HierarchyFilter("organisation_id",
leafonly = False,
))
else:
append_filter(S3OptionsFilter("organisation_id",
search = True,
header = "",
#hidden = True,
))
# Location filter (always)
append_filter(S3LocationFilter("location_id",
label = T("Location"),
hidden = True,
))
# Active / Activity / Programme filters (volunteer only)
if module == "vol" or resource_type in ("both", "volunteer"):
vol_active = settings.get_hrm_vol_active()
if vol_active:
# Active filter
append_filter(S3OptionsFilter("details.active",
label = T("Active?"),
cols = 2, #3,
options = {True: T("Yes"),
False: T("No"),
#None: T("Unknown"),
},
hidden = True,
#none = True,
))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Programme filter
append_filter(S3OptionsFilter("person_id$hours.programme_id",
label = T("Program"),
#options = lambda: \
# s3_get_filter_opts("hrm_programme",
# org_filter=True),
hidden = True,
))
elif vol_experience == "activity":
# Programme filter
append_filter(S3OptionsFilter("person_id$activity_hours.activity_hours_activity_type.activity_type_id",
label = T("Activity Types"),
hidden = True,
))
if settings.get_hrm_unavailability():
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
hidden = True,
))
else:
# Site filter (staff only)
filter_widgets.append(S3OptionsFilter("site_id",
hidden = True,
))
if module == "deploy":
# Deployment-specific filters
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available for Deployment"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = True,
hidden = True,
))
# Job title filter
append_filter(S3OptionsFilter("credential.job_title_id",
# @ToDo: deployment_setting for label (this is RDRT-specific)
#label = T("Credential"),
label = T("Sector"),
hidden = True,
))
# Last-deployment-date filter
append_filter(S3DateFilter("human_resource_id:deploy_assignment.start_date",
label = T("Deployed"),
hide_time = True,
hidden = True,
))
# Last-response-date filter
append_filter(S3DateFilter("human_resource_id:deploy_response.created_on",
label = T("Responded"),
hide_time = True,
hidden = True,
))
# Certificate filter
if settings.get_hrm_use_certificates():
append_filter(S3OptionsFilter("certification.certificate_id",
# Better to default (easier to customise/consistency)
#label = T("Certificate"),
hidden = True,
))
# Skills filter
if settings.get_hrm_use_skills():
append_filter(S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
hidden = module != "req",
))
# Training filter
if settings.get_hrm_use_trainings():
if settings.get_hrm_training_filter_and():
append_filter(S3OptionsFilter("trainings.course_id",
label = T("Training"),
hidden = True,
operator = "contains",
))
else:
append_filter(S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
))
# Group (team) membership filter
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
append_filter(S3OptionsFilter("group_membership.group_id",
label = T(teams),
hidden = True,
))
return filter_widgets
# END =========================================================================
|
flavour/eden
|
modules/s3db/hrm.py
|
Python
|
mit
| 471,286
| 0.010189
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from os.path import dirname, abspath, join, curdir
from nose.tools import assert_equals, with_setup
from tests.asserts import prepare_stdout
def test_imports_terrain_under_path_that_is_run():
old_path = abspath(curdir)
os.chdir(join(abspath(dirname(__file__)), 'simple_features', '1st_feature_dir'))
status, output = subprocess.getstatusoutput('python -c "from lettuce import world;assert hasattr(world, \'works_fine\'); print \'it passed!\'"')
assert_equals(status, 0)
assert_equals(output, "it passed!")
os.chdir(old_path)
@with_setup(prepare_stdout)
def test_after_each_all_is_executed_before_each_all():
"terrain.before.each_all and terrain.after.each_all decorators"
from lettuce import step
from lettuce import Runner
from lettuce.terrain import before, after, world
world.all_steps = []
@before.all
def set_state_to_before():
world.all_steps.append('before')
@step('append 1 in world all steps')
def append_1_in_world_all_steps(step):
world.all_steps.append("1")
@step('append 2 more')
def append_2_more(step):
world.all_steps.append("2")
@step('append 3 in world all steps')
def append_during_to_all_steps(step):
world.all_steps.append("3")
@after.all
def set_state_to_after(total):
world.all_steps.append('after')
runner = Runner(join(abspath(dirname(__file__)), 'simple_features', '2nd_feature_dir'))
runner.run()
assert_equals(
world.all_steps,
['before', '1', '2', '3', 'after']
)
|
adw0rd/lettuce-py3
|
tests/functional/test_terrain.py
|
Python
|
gpl-3.0
| 2,378
| 0.002103
|
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
setup(name='peloton_bloomfilters',
author = 'Adam DePrince',
author_email = 'adam@pelotoncycle.com',
url = 'https://github.com/pelotoncycle/peloton_bloomfilters',
version='0.0.1',
description="Peloton Cycle's Bloomin fast Bloomfilters",
ext_modules=(
[
Extension(
name='peloton_bloomfilters',
sources=['peloton_bloomfiltersmodule.c']),
]
)
)
|
pelotoncycle/shared_memory_bloomfilter
|
setup.py
|
Python
|
gpl-3.0
| 572
| 0.012238
|
from django.forms.fields import *
from corehq.apps.sms.forms import BackendForm
from dimagi.utils.django.fields import TrimmedCharField
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
class TelerivetBackendForm(BackendForm):
api_key = TrimmedCharField()
project_id = TrimmedCharField()
phone_id = TrimmedCharField()
webhook_secret = TrimmedCharField()
def clean_webhook_secret(self):
# Circular import
from corehq.apps.telerivet.models import TelerivetBackend
value = self.cleaned_data.get("webhook_secret", None)
backend = TelerivetBackend.by_webhook_secret(value)
if backend is not None and backend._id != self._cchq_backend_id:
raise ValidationError(_("Already in use."))
return value
|
gmimano/commcaretest
|
corehq/apps/telerivet/forms.py
|
Python
|
bsd-3-clause
| 825
| 0.002424
|
"""Render meshes using OpenDR.
Code is from:
https://github.com/akanazawa/hmr/blob/master/src/util/renderer.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import cv2
import numpy as np
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
colors = {
# colorbline/print/copy safe:
'light_blue': [0.65098039, 0.74117647, 0.85882353],
'light_pink': [.9, .7, .7], # This is used to do no-3d
}
class SMPLRenderer(object):
"""Utility class to render SMPL models."""
def __init__(self, img_size=224, flength=500., face_path='smpl_faces.npy'):
self.faces = np.load(face_path)
self.w = img_size
self.h = img_size
self.flength = flength
def __call__(self,
verts,
cam=None,
img=None,
do_alpha=False,
far=None,
near=None,
color_id=0,
img_size=None):
# cam is 3D [f, px, py]
if img is not None:
h, w = img.shape[:2]
elif img_size is not None:
h = img_size[0]
w = img_size[1]
else:
h = self.h
w = self.w
if cam is None:
cam = [self.flength, w / 2., h / 2.]
use_cam = ProjectPoints(
f=cam[0] * np.ones(2),
rt=np.zeros(3),
t=np.zeros(3),
k=np.zeros(5),
c=cam[1:3])
if near is None:
near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
if far is None:
far = np.maximum(np.max(verts[:, 2]) + 25, 25)
imtmp = render_model(
verts,
self.faces,
w,
h,
use_cam,
do_alpha=do_alpha,
img=img,
far=far,
near=near,
color_id=color_id)
return (imtmp * 255).astype('uint8')
def rotated(self,
verts,
deg,
cam=None,
axis='y',
img=None,
do_alpha=True,
far=None,
near=None,
color_id=0,
img_size=None):
if axis == 'y':
around = cv2.Rodrigues(np.array([0, math.radians(deg), 0]))[0]
elif axis == 'x':
around = cv2.Rodrigues(np.array([math.radians(deg), 0, 0]))[0]
else:
around = cv2.Rodrigues(np.array([0, 0, math.radians(deg)]))[0]
center = verts.mean(axis=0)
new_v = np.dot((verts - center), around) + center
return self.__call__(
new_v,
cam,
img=img,
do_alpha=do_alpha,
far=far,
near=near,
img_size=img_size,
color_id=color_id)
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def _rotateY(points, angle):
"""Rotate the points by a specified angle."""
ry = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
[-np.sin(angle), 0., np.cos(angle)]])
return np.dot(points, ry)
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(120),
color=colors['light_pink']):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge(
(b_channel, g_channel, r_channel, alpha.astype(imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color_id=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1 else img
if color_id is None:
color = colors['light_blue']
else:
color_list = colors.values()
color = color_list[color_id % len(color_list)]
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp
|
deepmind/Temporal-3D-Pose-Kinetics
|
third_party/hmr/renderer.py
|
Python
|
apache-2.0
| 5,948
| 0.009247
|
# --coding: utf8--
import requests
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
class Country(models.Model):
"""
Модель страны.
"""
title = models.CharField(
u'название', max_length=255)
class Meta:
verbose_name = u'страна'
verbose_name_plural = u'страны'
ordering = ['title']
def __unicode__(self):
return self.title
class BaseAddress(models.Model):
"""
Базовый класс адреса с ГЕО данными.
"""
country = models.ForeignKey(
Country,
verbose_name=u'страна')
area = models.CharField(
u'область', max_length=255, blank=True)
subarea = models.CharField(
u'район', max_length=255, blank=True)
locality = models.CharField(
u'населенный пункт', max_length=255)
street = models.CharField(
u'улица', max_length=255, blank=True)
house = models.CharField(
u'дом', max_length=50, blank=True)
apartment = models.CharField(
u'офис', max_length=10, blank=True)
zip = models.CharField(
u'почтовый индекс', max_length=10, blank=True)
coordinates = models.PointField(
u'координаты', blank=True, null=True) # широта долгота
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'адрес'
verbose_name_plural = u'адреса'
def __unicode__(self):
return ', '.join(part for part in [self.zip, self.country.title,
self.area, self.subarea,
self.locality, self.street,
self.house] if part)
def fetch_coordinates(self):
"""
Запрос координатов объекта с Яндекса.
"""
query = ',+'.join(
part for part in [self.country.title, self.area, self.subarea,
self.locality, self.street, self.house] if part)
url = u'http://geocode-maps.yandex.ru/1.x/?geocode=%s&format=json' % (
query)
try:
r = requests.get(url).json()
except requests.exceptions.RequestException:
return None
try:
longitude, latitude = (r['response']['GeoObjectCollection']
['featureMember'][0]['GeoObject']['Point']
['pos']).split(' ')
return GEOSGeometry(U'POINT(%s %s)' % (longitude, latitude))
except (KeyError, IndexError):
return None
def get_short_address(self):
return ', '.join(part for part in [self.area, self.locality] if part)
class Region(models.Model):
"""
Класс для географического региона.
"""
name = models.CharField(u'название', max_length=255)
coordinates = models.PolygonField(u'координаты')
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'регион'
verbose_name_plural = u'регионы'
ordering = ['name']
def __unicode__(self):
return self.name
|
minidron/django-geoaddress
|
django_geoaddress/models.py
|
Python
|
gpl-2.0
| 3,479
| 0
|
from distutils.core import setup
from distutils.extension import Extension
setup(
name='wordcloud',
version='1.1.3',
url='https://github.com/amueller/word_cloud',
description='A little word cloud generator',
license='MIT',
ext_modules=[Extension("wordcloud.query_integral_image",
["wordcloud/query_integral_image.c"])],
packages=['wordcloud'],
package_data={'wordcloud': ['stopwords', 'DroidSansMono.ttf']}
)
|
asgeirrr/word_cloud
|
setup.py
|
Python
|
mit
| 469
| 0
|
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import sys
import tempfile
import itertools
import unittest
try:
import flask.ext.autodoc
except ImportError as e:
raise unittest.SkipTest('Flask-Autodoc not installed')
try:
import digits
except ImportError:
# Add path for DIGITS package
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import digits.config; digits.config.load_config()
from digits.webapp import app, _doc as doc
from . import generate_docs as _
def check_doc_file(generator, doc_filename):
"""
Checks that the output generated by generator matches the contents of doc_filename
"""
with tempfile.NamedTemporaryFile(suffix='.md') as tmp_file:
generator.generate(tmp_file.name)
tmp_file.seek(0)
with open(doc_filename) as doc_file:
# memory friendly
for doc_line, tmp_line in itertools.izip(doc_file, tmp_file):
doc_line = doc_line.strip()
tmp_line = tmp_line.strip()
if doc_line.startswith('*Generated') and \
tmp_line.startswith('*Generated'):
# If the date is different, that's not a problem
pass
elif doc_line != tmp_line:
print '(Previous)', doc_line
print '(New) ', tmp_line
raise RuntimeError('%s needs to be regenerated. Use scripts/generate_docs.py' % doc_filename)
def test_api_md():
"""API.md out-of-date"""
with app.app_context():
generator = _.ApiDocGenerator(doc)
check_doc_file(generator,
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'docs', 'API.md'))
def test_flask_routes_md():
"""FlaskRoutes.md out-of-date"""
with app.app_context():
generator = _.FlaskRoutesDocGenerator(doc)
check_doc_file(generator,
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'docs', 'FlaskRoutes.md'))
|
liyongsea/DIGITS
|
scripts/test_generate_docs.py
|
Python
|
bsd-3-clause
| 2,056
| 0.005837
|
# -*- coding:utf8 -*-
a = 3
b = 4
print a+b
|
LyanJin/J_lyan
|
New.py
|
Python
|
epl-1.0
| 44
| 0.022727
|
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import eng_notation
import math
class abstract_converter(object):
def external_to_internal(self, v):
"""
Convert from user specified value to value acceptable to underlying primitive.
The underlying primitive usually expects strings.
"""
raise NotImplementedError
def internal_to_external(self, s):
"""
Convert from underlying primitive value to user specified value.
The underlying primitive usually expects strings.
"""
raise NotImplementedError
def help(self):
return "Any string is acceptable"
class identity_converter(abstract_converter):
def external_to_internal(self,v):
return v
def internal_to_external(self, s):
return s
########################################################################
# Commonly used converters
########################################################################
class chooser_converter(abstract_converter):
"""
Convert between a set of possible choices and an index.
Used in the chooser base and all sub-classes.
"""
def __init__(self, choices):
#choices must be a list because tuple does not have .index() in python2.5
self._choices = list(choices)
def external_to_internal(self, choice):
return self._choices.index(choice)
def internal_to_external(self, index):
return self._choices[index]
def help(self):
return 'Enter a possible value in choices: "%s"'%str(self._choices)
class bool_converter(abstract_converter):
"""
The internal representation is boolean.
The external representation is specified.
Used in the check box form.
"""
def __init__(self, true, false):
self._true = true
self._false = false
def external_to_internal(self, v):
return bool(v)
def internal_to_external(self, v):
if v: return self._true
else: return self._false
def help(self):
return "Value must be cast-able to type bool."
class eval_converter(abstract_converter):
"""
A catchall converter when int and float are not enough.
Evaluate the internal representation with python's eval().
Possible uses, set a complex number, constellation points.
Used in text box.
"""
def __init__(self, formatter=lambda x: '%s'%(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return eval(s)
def help(self):
return "Value must be evaluatable by python's eval."
class str_converter(abstract_converter):
def __init__(self, formatter=lambda x: '%s'%(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return str(s)
class int_converter(abstract_converter):
def __init__(self, formatter=lambda x: '%d'%round(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return int(s, 0)
def help(self):
return "Enter an integer. Leading 0x indicates hex"
class float_converter(abstract_converter):
def __init__(self, formatter=eng_notation.num_to_str):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return eng_notation.str_to_num(s)
def help(self):
return "Enter a float with optional scale suffix. E.g., 100.1M"
class slider_converter(abstract_converter):
"""
Scale values to and from the slider.
"""
def __init__(self, minimum, maximum, num_steps, cast):
assert minimum < maximum
assert num_steps > 0
self._offset = minimum
self._scaler = float(maximum - minimum)/num_steps
self._cast = cast
def external_to_internal(self, v):
#slider's internal representation is an integer
return int(round((v - self._offset)/self._scaler))
def internal_to_external(self, v):
return self._cast(v*self._scaler + self._offset)
def help(self):
return "Value should be within slider range"
class log_slider_converter(slider_converter):
def __init__(self, min_exp, max_exp, num_steps, base):
assert min_exp < max_exp
assert num_steps > 0
self._base = base
slider_converter.__init__(self, minimum=min_exp, maximum=max_exp, num_steps=num_steps, cast=float)
def external_to_internal(self, v):
return slider_converter.external_to_internal(self, math.log(v, self._base))
def internal_to_external(self, v):
return self._base**slider_converter.internal_to_external(self, v)
|
ffu/DSA-3.2.2
|
gr-wxgui/src/python/forms/converters.py
|
Python
|
gpl-3.0
| 5,122
| 0.029871
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import random
from typing import (
Dict,
Callable,
Any,
Union,
List,
TYPE_CHECKING,
Optional,
Iterable,
Awaitable,
cast,
)
import uuid
import asyncio
import logging
from functools import partial
from ..._common import EventData
from ..._eventprocessor.common import CloseReason, LoadBalancingStrategy
from ..._eventprocessor._eventprocessor_mixin import EventProcessorMixin
from ..._utils import get_event_links
from .partition_context import PartitionContext
from .in_memory_checkpoint_store import InMemoryCheckpointStore
from .checkpoint_store import CheckpointStore
from ._ownership_manager import OwnershipManager
from .utils import get_running_loop
from .._async_utils import get_dict_with_loop_if_needed
if TYPE_CHECKING:
from datetime import datetime
from .._consumer_async import EventHubConsumer
from .._consumer_client_async import EventHubConsumerClient
_LOGGER = logging.getLogger(__name__)
class EventProcessor(
EventProcessorMixin
): # pylint:disable=too-many-instance-attributes
"""
An EventProcessor constantly receives events from one or multiple partitions of the Event Hub
in the context of a given consumer group.
"""
def __init__(
self,
eventhub_client: "EventHubConsumerClient",
consumer_group: str,
event_handler: Callable[[PartitionContext, Union[Optional[EventData], List[EventData]]], Awaitable[None]],
*,
batch: Optional[bool] = False,
max_batch_size: Optional[int] = 300,
max_wait_time: Optional[float] = None,
partition_id: Optional[str] = None,
checkpoint_store: Optional[CheckpointStore] = None,
initial_event_position: Union[str, int, "datetime", Dict[str, Any]] = "@latest",
initial_event_position_inclusive: Union[bool, Dict[str, bool]] = False,
load_balancing_interval: float = 10.0,
partition_ownership_expiration_interval: Optional[float] = None,
load_balancing_strategy: LoadBalancingStrategy = LoadBalancingStrategy.GREEDY,
owner_level: Optional[int] = None,
prefetch: Optional[int] = None,
track_last_enqueued_event_properties: bool = False,
error_handler: Optional[
Callable[[PartitionContext, Exception], Awaitable[None]]
] = None,
partition_initialize_handler: Optional[
Callable[[PartitionContext], Awaitable[None]]
] = None,
partition_close_handler: Optional[
Callable[[PartitionContext, CloseReason], Awaitable[None]]
] = None,
loop: Optional[asyncio.AbstractEventLoop] = None
):
self._consumer_group = consumer_group
self._eventhub_client = eventhub_client
self._namespace = (
eventhub_client._address.hostname # pylint: disable=protected-access
)
self._eventhub_name = eventhub_client.eventhub_name
self._partition_id = partition_id
self._event_handler = event_handler
self._batch = batch
self._max_batch_size = max_batch_size
self._max_wait_time = max_wait_time
self._error_handler = error_handler
self._partition_initialize_handler = partition_initialize_handler
self._partition_close_handler = partition_close_handler
self._checkpoint_store = checkpoint_store or InMemoryCheckpointStore()
self._initial_event_position = initial_event_position
self._initial_event_position_inclusive = initial_event_position_inclusive
self._load_balancing_interval = load_balancing_interval
self._ownership_timeout = partition_ownership_expiration_interval \
if partition_ownership_expiration_interval is not None \
else self._load_balancing_interval * 6
self._load_balancing_strategy = load_balancing_strategy or LoadBalancingStrategy.GREEDY
self._tasks = {} # type: Dict[str, asyncio.Task]
self._partition_contexts = {} # type: Dict[str, PartitionContext]
self._owner_level = owner_level
if checkpoint_store and self._owner_level is None:
self._owner_level = 0
self._prefetch = prefetch
self._track_last_enqueued_event_properties = (
track_last_enqueued_event_properties
)
self._id = str(uuid.uuid4())
self._internal_kwargs = get_dict_with_loop_if_needed(loop)
self._running = False
self._consumers = {} # type: Dict[str, EventHubConsumer]
self._ownership_manager = OwnershipManager(
cast("EventHubConsumerClient", self._eventhub_client),
self._consumer_group,
self._id,
self._checkpoint_store,
self._ownership_timeout,
self._load_balancing_strategy,
self._partition_id,
)
def __repr__(self) -> str:
return "EventProcessor: id {}".format(self._id)
async def _cancel_tasks_for_partitions(
self, to_cancel_partitions: Iterable[str]
) -> None:
_LOGGER.debug(
"EventProcessor %r tries to cancel partitions %r",
self._id,
to_cancel_partitions
)
for partition_id in to_cancel_partitions:
task = self._tasks.get(partition_id)
if task:
task.cancel()
_LOGGER.info(
"EventProcessor %r has cancelled partition %r",
self._id,
partition_id
)
if partition_id not in self._consumers: # task is cancelled before the consumer is created
del self._tasks[partition_id]
def _create_tasks_for_claimed_ownership(
self,
claimed_partitions: Iterable[str],
checkpoints: Optional[Dict[str, Dict[str, Any]]] = None,
) -> None:
_LOGGER.debug(
"EventProcessor %r tries to claim partition %r",
self._id,
claimed_partitions
)
for partition_id in claimed_partitions:
if partition_id not in self._tasks or self._tasks[partition_id].done():
checkpoint = checkpoints.get(partition_id) if checkpoints else None
if self._running:
self._tasks[partition_id] = get_running_loop().create_task(
self._receive(partition_id, checkpoint)
)
_LOGGER.info(
"EventProcessor %r has claimed partition %r",
self._id,
partition_id
)
async def _process_error(
self, partition_context: PartitionContext, err: Exception
) -> None:
if self._error_handler:
try:
await self._error_handler(partition_context, err)
except Exception as err_again: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_error. The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
err_again,
)
async def _close_partition(
self, partition_context: PartitionContext, reason: CloseReason
) -> None:
_LOGGER.info(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is being closed. Reason is: %r",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
reason,
)
if self._partition_close_handler:
try:
await self._partition_close_handler(partition_context, reason)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_partition_close. The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
err,
)
await self._process_error(partition_context, err)
async def _on_event_received(
self, partition_context: PartitionContext, event: Union[Optional[EventData], List[EventData]]
) -> None:
if event:
try:
partition_context._last_received_event = event[-1] # type: ignore #pylint:disable=protected-access
except TypeError:
partition_context._last_received_event = event # type: ignore # pylint:disable=protected-access
links = get_event_links(event)
with self._context(links=links):
await self._event_handler(partition_context, event)
else:
await self._event_handler(partition_context, event)
async def _close_consumer(self, partition_context):
partition_id = partition_context.partition_id
try:
await self._consumers[partition_id].close()
del self._consumers[partition_id]
await self._close_partition(
partition_context,
CloseReason.OWNERSHIP_LOST if self._running else CloseReason.SHUTDOWN,
)
await self._ownership_manager.release_ownership(partition_id)
finally:
if partition_id in self._tasks:
del self._tasks[partition_id]
async def _receive(
self, partition_id: str, checkpoint: Optional[Dict[str, Any]] = None
) -> None: # pylint: disable=too-many-statements
try: # pylint:disable=too-many-nested-blocks
_LOGGER.info("start ownership %r, checkpoint %r", partition_id, checkpoint)
(
initial_event_position,
event_position_inclusive,
) = self.get_init_event_position(partition_id, checkpoint)
if partition_id in self._partition_contexts:
partition_context = self._partition_contexts[partition_id]
partition_context._last_received_event = None # pylint:disable=protected-access
else:
partition_context = PartitionContext(
self._namespace,
self._eventhub_name,
self._consumer_group,
partition_id,
self._checkpoint_store,
)
self._partition_contexts[partition_id] = partition_context
event_received_callback = partial(
self._on_event_received, partition_context
)
self._consumers[partition_id] = self.create_consumer( # type: ignore
partition_id,
initial_event_position,
event_position_inclusive,
event_received_callback, # type: ignore
)
if self._partition_initialize_handler:
try:
await self._partition_initialize_handler(partition_context)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_partition_initialize. The exception is %r.",
self._id,
self._eventhub_name,
partition_id,
self._consumer_group,
err,
)
await self._process_error(partition_context, err)
while self._running:
try:
await self._consumers[partition_id].receive(
self._batch, self._max_batch_size, self._max_wait_time
)
except asyncio.CancelledError:
_LOGGER.info(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is cancelled",
self._id,
self._eventhub_name,
partition_id,
self._consumer_group,
)
raise
except Exception as error: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while receiving. The exception is %r.",
self._id,
self._eventhub_name,
partition_id,
self._consumer_group,
error,
)
await self._process_error(partition_context, error)
break
finally:
await asyncio.shield(self._close_consumer(partition_context))
async def start(self) -> None:
"""Start the EventProcessor.
The EventProcessor will try to claim and balance partition ownership with other `EventProcessor`
and asynchronously start receiving EventData from EventHub and processing events.
:return: None
"""
_LOGGER.info("EventProcessor %r is being started", self._id)
if not self._running:
self._running = True
while self._running:
random_jitter = self._load_balancing_interval * random.random() * 0.2
load_balancing_interval = self._load_balancing_interval + random_jitter
try:
claimed_partition_ids = (
await self._ownership_manager.claim_ownership()
)
if claimed_partition_ids:
existing_pids = set(self._tasks.keys())
claimed_pids = set(claimed_partition_ids)
to_cancel_pids = existing_pids - claimed_pids
newly_claimed_pids = claimed_pids - existing_pids
if newly_claimed_pids:
checkpoints = (
await self._ownership_manager.get_checkpoints()
if self._checkpoint_store
else None
)
self._create_tasks_for_claimed_ownership(
newly_claimed_pids, checkpoints
)
else:
_LOGGER.info(
"EventProcessor %r hasn't claimed an ownership. It keeps claiming.",
self._id,
)
to_cancel_pids = set(self._tasks.keys())
await self._cancel_tasks_for_partitions(to_cancel_pids)
except asyncio.CancelledError: # pylint: disable=try-except-raise
raise
except Exception as err: # pylint:disable=broad-except
# ownership_manager.get_checkpoints() and ownership_manager.claim_ownership() may raise exceptions
# when there are load balancing and/or checkpointing (checkpoint_store isn't None).
# They're swallowed here to retry every self._load_balancing_interval seconds. Meanwhile this event
# processor won't lose the partitions it has claimed before.
# If it keeps failing, other EventProcessors will start to claim ownership of the partitions
# that this EventProcessor is working on. So two or multiple EventProcessors may be working
# on the same partition for a short while.
# Setting owner_level would create exclusive connection to the partition and
# alleviate duplicate-receiving greatly.
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r consumer group %r. "
"An error occurred while load-balancing and claiming ownership. "
"The exception is %r. Retrying after %r seconds",
self._id,
self._eventhub_name,
self._consumer_group,
err,
load_balancing_interval
)
await self._process_error(None, err) # type: ignore
await asyncio.sleep(load_balancing_interval, **self._internal_kwargs)
async def stop(self) -> None:
"""Stop the EventProcessor.
The EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions
it is working on.
Other running EventProcessor will take over these released partitions.
A stopped EventProcessor can be restarted by calling method `start` again.
:return: None
"""
self._running = False
pids = list(self._tasks.keys())
await self._cancel_tasks_for_partitions(pids)
_LOGGER.info("EventProcessor %r tasks have been cancelled.", self._id)
while self._tasks:
await asyncio.sleep(1, **self._internal_kwargs)
_LOGGER.info("EventProcessor %r has been stopped.", self._id)
|
Azure/azure-sdk-for-python
|
sdk/eventhub/azure-eventhub/azure/eventhub/aio/_eventprocessor/event_processor.py
|
Python
|
mit
| 18,174
| 0.002531
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Error(msrest.serialization.Model):
"""this is the management partner operations error.
:param error: this is the ExtendedErrorInfo property.
:type error: ~azure.mgmt.managementpartner.models.ExtendedErrorInfo
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ExtendedErrorInfo'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ExtendedErrorInfo(msrest.serialization.Model):
"""this is the extended error info.
:param code: this is the error response code. Possible values include: "NotFound", "Conflict",
"BadRequest".
:type code: str or ~azure.mgmt.managementpartner.models.ErrorResponseCode
:param message: this is the extended error info message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtendedErrorInfo, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class OperationDisplay(msrest.serialization.Model):
"""this is the management partner operation.
:param provider: the is management partner provider.
:type provider: str
:param resource: the is management partner resource.
:type resource: str
:param operation: the is management partner operation.
:type operation: str
:param description: the is management partner operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationList(msrest.serialization.Model):
"""this is the management partner operations list.
:param value: this is the operation response list.
:type value: list[~azure.mgmt.managementpartner.models.OperationResponse]
:param next_link: Url to get the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class OperationResponse(msrest.serialization.Model):
"""this is the management partner operations response.
:param name: this is the operation response name.
:type name: str
:param display: this is the operation display.
:type display: ~azure.mgmt.managementpartner.models.OperationDisplay
:param origin: the is operation response origin information.
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationResponse, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
class PartnerResponse(msrest.serialization.Model):
"""this is the management partner operations response.
Variables are only populated by the server, and will be ignored when sending a request.
:param etag: Type of the partner.
:type etag: int
:ivar id: Identifier of the partner.
:vartype id: str
:ivar name: Name of the partner.
:vartype name: str
:ivar type: Type of resource. "Microsoft.ManagementPartner/partners".
:vartype type: str
:param partner_id: This is the partner id.
:type partner_id: str
:param partner_name: This is the partner name.
:type partner_name: str
:param tenant_id: This is the tenant id.
:type tenant_id: str
:param object_id: This is the object id.
:type object_id: str
:param version: This is the version.
:type version: int
:param updated_time: This is the DateTime when the partner was updated.
:type updated_time: ~datetime.datetime
:param created_time: This is the DateTime when the partner was created.
:type created_time: ~datetime.datetime
:param state: This is the partner state. Possible values include: "Active", "Deleted".
:type state: str or ~azure.mgmt.managementpartner.models.ManagementPartnerState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'etag': {'key': 'etag', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'partner_id': {'key': 'properties.partnerId', 'type': 'str'},
'partner_name': {'key': 'properties.partnerName', 'type': 'str'},
'tenant_id': {'key': 'properties.tenantId', 'type': 'str'},
'object_id': {'key': 'properties.objectId', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'int'},
'updated_time': {'key': 'properties.updatedTime', 'type': 'iso-8601'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PartnerResponse, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.id = None
self.name = None
self.type = None
self.partner_id = kwargs.get('partner_id', None)
self.partner_name = kwargs.get('partner_name', None)
self.tenant_id = kwargs.get('tenant_id', None)
self.object_id = kwargs.get('object_id', None)
self.version = kwargs.get('version', None)
self.updated_time = kwargs.get('updated_time', None)
self.created_time = kwargs.get('created_time', None)
self.state = kwargs.get('state', None)
|
Azure/azure-sdk-for-python
|
sdk/managementpartner/azure-mgmt-managementpartner/azure/mgmt/managementpartner/models/_models.py
|
Python
|
mit
| 7,226
| 0.00083
|
from django.http.response import HttpResponse
from django.shortcuts import render_to_response, render
from Browser.models import UserInfo
from Browser.views import cellar, administrator
def simple_response(request, *args, **kwargs):
template_name = kwargs["path"]
if kwargs["type"] :
template_name = kwargs["type"] + "/" + template_name
userInfo = UserInfo.getUserInfo(request)
context = {
"isMetic" : userInfo.isMetic(),
"isYeoman" : userInfo.isYeoman(),
"isAdmin" : userInfo.isAdmin(),
"isSuper" : userInfo.isSuper()
}
return HttpResponse(render(request, template_name, context))
|
SonienTaegi/CELLAR
|
Browser/views/__init__.py
|
Python
|
gpl-2.0
| 672
| 0.013393
|
import logging
import shlex
import subprocess
import json
from airflow.hooks.aws_emr import EMRHook
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.exceptions import AirflowException
from slackclient import SlackClient
from time import sleep
import os
class AwsEMROperator(BaseOperator):
ui_color = '#00BFFF'
sc = None
@apply_defaults
def __init__(
self,
event_xcoms=None,
aws_emr_conn_id='aws_default',
xcom_push=True,
command_args=[[]],
channel="#airflow_notifications",
download_these_files=[],
start_cluster=False,
terminate_cluster=False,
xcom_task_id="job_runner",
dn_dir="./tmp",
username='airflow',
method='chat.postMessage',
icon_url='https://raw.githubusercontent.com/airbnb/airflow/master/airflow/www/static/pin_100.png',
*args,
**kwargs):
"""
Start by just invoking something.
"""
super(AwsEMROperator, self).__init__(*args, **kwargs)
self.channel = channel
self.username = username
self.icon_url = icon_url
self.download_these_files = download_these_files
self.conn_id = aws_emr_conn_id
self.method = 'chat.postMessage'
self.command_args = command_args
self.start_cluster = start_cluster
self.terminate_cluster = terminate_cluster
self.dn_dir = dn_dir
self.xcom_task_id = xcom_task_id
def slack_connect(self):
self.sc = SlackClient(self.token)
def slack_message(self, text):
self.token = os.environ["SLACK_API_TOKEN"]
if not self.sc:
self.slack_connect()
api_params = {
'channel': self.channel,
'username': self.username,
'text': text,
'icon_url': self.icon_url,
'link_names': 1
}
self.sc.api_call(self.method, **api_params)
def construct_command(self):
command = "aws emr create-cluster"
for key, value in self.command_args:
command = command + " " + key + " " + value
logging.info("Command is: " + command)
return shlex.split(command)
def exec_command(self, command):
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr != b'':
logging.info("Non zero exit code.")
logging.info(stderr)
raise AirflowException("The return code is non zero: " +
stderr)
print(stdout)
print(type(stdout))
try:
output = json.loads(stdout.replace("\n", ""))["ClusterId"]
except TypeError:
output = json.loads(stdout.decode("utf-8")\
.replace("\n",""))["ClusterId"]
logging.info("output_id: " + output)
return output
def execute(self, context):
s3_hook = S3Hook()
for bucket, key in self.download_these_files:
print(bucket)
print(key)
basename = os.path.basename(key)
print(basename)
print(os.path.join(self.dn_dir, basename))
local_path = os.path.join(self.dn_dir, basename)
s3_hook.download_file(bucket, key, local_path)
job_monitor = EMRHook(emr_conn_id="S3_default")
if self.start_cluster:
output_id = self.exec_command(self.construct_command())
context['ti'].xcom_push(key="code", value=output_id)
if self.terminate_cluster:
output_id = context['ti'].xcom_pull(
task_id=self.xcom_task_id, key="code")
self.slack_message("""
@channel\n ----------------------------------------\nThe Cluster is being terminated for this job. \n ----------------------------------------\nProcess id = %s
""" % output_id)
job_monitor.terminate_job(output_id)
self.slack_message("""
@channel
The task Id of the new job is: %s
""" %
output_id)
while True:
if job_monitor.isRunning(output_id):
sleep(900)
elif job_monitor.isSuccessfull(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process is Successful.\n Manual check is always a good thing. \n ----------------------------------------\nProcess id = %s
""" % output_id)
break
elif job_monitor.isTerminated(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process has been terminated\n ----------------------------------------\nProcess id = %s
""" % output_id)
raise AirflowException("The process is terminated")
elif job_monitor.isWaiting(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process is WAITING\n ----------------------------------------\nProcess id = %s
""" % output_id)
raise AirflowException(
"Somebody needs to go see whats up. Spark Job is in Waiting State for id: %s" % output_id)
else:
sleep(300)
# def slack_message():
# token = os.environ["SLACK_API_TOKEN"]
# sc = SlackClient(token)
# api_params = {
# 'channel': "airflow_notifications",
# 'username': "airflow",
# 'text': "ssup @channel",
# 'icon_url': 'https://raw.githubusercontent.com/airbnb/airflow/master/airflow/www/static/pin_100.png',
# 'link_names': 1
# }
# sc.api_call("chat.postMessage", **api_params)
|
brandsoulmates/incubator-airflow
|
airflow/operators/aws_emr_operator.py
|
Python
|
apache-2.0
| 5,997
| 0.001834
|
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.deeplab import SeparableASPP
class TestSeparableASPP(unittest.TestCase):
def setUp(self):
self.in_channels = 128
self.out_channels = 32
self.link = SeparableASPP(
self.in_channels, self.out_channels)
def check_call(self):
xp = self.link.xp
x = chainer.Variable(xp.random.uniform(
low=-1, high=1, size=(2, self.in_channels, 64, 64)
).astype(xp.float32))
y = self.link(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, xp.ndarray)
self.assertEqual(y.shape, (2, self.out_channels, 64, 64))
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
|
chainer/chainercv
|
tests/links_tests/model_tests/deeplab_tests/test_aspp.py
|
Python
|
mit
| 975
| 0
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import logging
from prettytable import PrettyTable
from six.moves import zip
from st2client import formatters
from st2client.utils import strutil
from st2client.utils.terminal import get_terminal_size
LOG = logging.getLogger(__name__)
# Minimum width for the ID to make sure the ID column doesn't wrap across
# multiple lines
MIN_ID_COL_WIDTH = 26
DEFAULT_ATTRIBUTE_DISPLAY_ORDER = ['id', 'name', 'pack', 'description']
class MultiColumnTable(formatters.Formatter):
@classmethod
def format(cls, entries, *args, **kwargs):
attributes = kwargs.get('attributes', [])
attribute_transform_functions = kwargs.get('attribute_transform_functions', {})
widths = kwargs.get('widths', [])
widths = widths or []
if not widths and attributes:
# Dynamically calculate column size based on the terminal size
lines, cols = get_terminal_size()
if attributes[0] == 'id':
# consume iterator and save as entries so collection is accessible later.
entries = [e for e in entries]
# first column contains id, make sure it's not broken up
first_col_width = cls._get_required_column_width(values=[e.id for e in entries],
minimum_width=MIN_ID_COL_WIDTH)
cols = (cols - first_col_width)
col_width = int(math.floor((cols / len(attributes))))
else:
col_width = int(math.floor((cols / len(attributes))))
first_col_width = col_width
widths = []
for index in range(0, len(attributes)):
if index == 0:
widths.append(first_col_width)
else:
widths.append(col_width)
if not attributes or 'all' in attributes:
attributes = sorted([attr for attr in entries[0].__dict__
if not attr.startswith('_')])
# Determine table format.
if len(attributes) == len(widths):
# Customize width for each column.
columns = zip(attributes, widths)
else:
# If only 1 width value is provided then
# apply it to all columns else fix at 28.
width = widths[0] if len(widths) == 1 else 28
columns = zip(attributes,
[width for i in range(0, len(attributes))])
# Format result to table.
table = PrettyTable()
for column in columns:
table.field_names.append(column[0])
table.max_width[column[0]] = column[1]
table.padding_width = 1
table.align = 'l'
table.valign = 't'
for entry in entries:
# TODO: Improve getting values of nested dict.
values = []
for field_name in table.field_names:
if '.' in field_name:
field_names = field_name.split('.')
value = getattr(entry, field_names.pop(0), {})
for name in field_names:
value = cls._get_field_value(value, name)
if type(value) is str:
break
value = strutil.unescape(value)
values.append(value)
else:
value = cls._get_simple_field_value(entry, field_name)
transform_function = attribute_transform_functions.get(field_name,
lambda value: value)
value = transform_function(value=value)
value = strutil.unescape(value)
values.append(value)
table.add_row(values)
return table
@staticmethod
def _get_simple_field_value(entry, field_name):
"""
Format a value for a simple field.
"""
value = getattr(entry, field_name, '')
if isinstance(value, (list, tuple)):
if len(value) == 0:
value = ''
elif isinstance(value[0], (str, unicode)):
# List contains simple string values, format it as comma
# separated string
value = ', '.join(value)
return value
@staticmethod
def _get_field_value(value, field_name):
r_val = value.get(field_name, None)
if r_val is None:
return ''
if isinstance(r_val, list) or isinstance(r_val, dict):
return r_val if len(r_val) > 0 else ''
return r_val
@staticmethod
def _get_friendly_column_name(name):
if not name:
return None
friendly_name = name.replace('_', ' ').replace('.', ' ').capitalize()
return friendly_name
@staticmethod
def _get_required_column_width(values, minimum_width=0):
max_width = len(max(values, key=len)) if values else minimum_width
return max_width if max_width > minimum_width else minimum_width
class PropertyValueTable(formatters.Formatter):
@classmethod
def format(cls, subject, *args, **kwargs):
attributes = kwargs.get('attributes', None)
attribute_display_order = kwargs.get('attribute_display_order',
DEFAULT_ATTRIBUTE_DISPLAY_ORDER)
attribute_transform_functions = kwargs.get('attribute_transform_functions', {})
if not attributes or 'all' in attributes:
attributes = sorted([attr for attr in subject.__dict__
if not attr.startswith('_')])
for attr in attribute_display_order[::-1]:
if attr in attributes:
attributes.remove(attr)
attributes = [attr] + attributes
table = PrettyTable()
table.field_names = ['Property', 'Value']
table.max_width['Property'] = 20
table.max_width['Value'] = 60
table.padding_width = 1
table.align = 'l'
table.valign = 't'
for attribute in attributes:
if '.' in attribute:
field_names = attribute.split('.')
value = cls._get_attribute_value(subject, field_names.pop(0))
for name in field_names:
value = cls._get_attribute_value(value, name)
if type(value) is str:
break
else:
value = cls._get_attribute_value(subject, attribute)
transform_function = attribute_transform_functions.get(attribute,
lambda value: value)
value = transform_function(value=value)
if type(value) is dict or type(value) is list:
value = json.dumps(value, indent=4)
value = strutil.unescape(value)
table.add_row([attribute, value])
return table
@staticmethod
def _get_attribute_value(subject, attribute):
if isinstance(subject, dict):
r_val = subject.get(attribute, None)
else:
r_val = getattr(subject, attribute, None)
if r_val is None:
return ''
if isinstance(r_val, list) or isinstance(r_val, dict):
return r_val if len(r_val) > 0 else ''
return r_val
|
grengojbo/st2
|
st2client/st2client/formatters/table.py
|
Python
|
apache-2.0
| 8,219
| 0.001095
|
import functools
import random
from collections import defaultdict
from mpmath.libmp.libintmath import ifac
from ..core import Basic, Tuple, sympify
from ..core.compatibility import as_int, is_sequence
from ..matrices import zeros
from ..polys import lcm
from ..utilities import flatten, has_dups, has_variety
from ..utilities.iterables import minlex, runs
def _af_rmul(a, b):
"""
Return the product b*a; input and output are array forms. The ith value
is a[b[i]].
Examples
========
>>> Permutation.print_cyclic = False
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmuln
"""
return [a[i] for i in b]
def _af_rmuln(*abc):
"""
Given [a, b, c, ...] return the product of ...*c*b*a using array forms.
The ith value is a[b[c[i]]].
Examples
========
>>> Permutation.print_cyclic = False
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmul
"""
a = abc
m = len(a)
if m == 3:
p0, p1, p2 = a
return [p0[p1[i]] for i in p2]
if m == 4:
p0, p1, p2, p3 = a
return [p0[p1[p2[i]]] for i in p3]
if m == 5:
p0, p1, p2, p3, p4 = a
return [p0[p1[p2[p3[i]]]] for i in p4]
if m == 6:
p0, p1, p2, p3, p4, p5 = a
return [p0[p1[p2[p3[p4[i]]]]] for i in p5]
if m == 7:
p0, p1, p2, p3, p4, p5, p6 = a
return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6]
if m == 8:
p0, p1, p2, p3, p4, p5, p6, p7 = a
return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7]
if m == 1:
return a[0][:]
if m == 2:
a, b = a
return [a[i] for i in b]
if m == 0:
raise ValueError('String must not be empty')
p0 = _af_rmuln(*a[:m//2])
p1 = _af_rmuln(*a[m//2:])
return [p0[i] for i in p1]
def _af_parity(pi):
"""
Computes the parity of a permutation in array form.
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that x > y but p[x] < p[y].
Examples
========
>>> _af_parity([0, 1, 2, 3])
0
>>> _af_parity([3, 2, 0, 1])
1
See Also
========
Permutation
"""
n = len(pi)
a = [0] * n
c = 0
for j in range(n):
if a[j] == 0:
c += 1
a[j] = 1
i = j
while pi[i] != j:
i = pi[i]
a[i] = 1
return (n - c) % 2
def _af_invert(a):
"""
Finds the inverse, ~A, of a permutation, A, given in array form.
Examples
========
>>> A = [1, 2, 0, 3]
>>> _af_invert(A)
[2, 0, 1, 3]
>>> _af_rmul(_, A)
[0, 1, 2, 3]
See Also
========
Permutation, __invert__
"""
inv_form = [0] * len(a)
for i, ai in enumerate(a):
inv_form[ai] = i
return inv_form
def _af_pow(a, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> _af_pow(p._array_form, 4)
[0, 1, 2, 3]
"""
if n == 0:
return list(range(len(a)))
if n < 0:
return _af_pow(_af_invert(a), -n)
if n == 1:
return a[:]
elif n == 2:
b = [a[i] for i in a]
elif n == 3:
b = [a[a[i]] for i in a]
elif n == 4:
b = [a[a[a[i]]] for i in a]
else:
# use binary multiplication
b = list(range(len(a)))
while 1:
if n & 1:
b = [b[i] for i in a]
n -= 1
if not n:
break
if n % 4 == 0:
a = [a[a[a[i]]] for i in a]
n = n // 4
elif n % 2 == 0:
a = [a[i] for i in a]
n = n // 2
return b
def _af_commutes_with(a, b):
"""
Checks if the two permutations with array forms
given by ``a`` and ``b`` commute.
Examples
========
>>> _af_commutes_with([1, 2, 0], [0, 2, 1])
False
See Also
========
Permutation, commutes_with
"""
return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1))
class Cycle(dict):
"""
Wrapper around dict which provides the functionality of a disjoint cycle.
A cycle shows the rule to use to move subsets of elements to obtain
a permutation. The Cycle class is more flexible than Permutation in
that 1) all elements need not be present in order to investigate how
multiple cycles act in sequence and 2) it can contain singletons:
A Cycle will automatically parse a cycle given as a tuple on the rhs:
>>> Cycle(1, 2)(2, 3)
Cycle(1, 3, 2)
The identity cycle, Cycle(), can be used to start a product:
>>> Cycle()(1, 2)(2, 3)
Cycle(1, 3, 2)
The array form of a Cycle can be obtained by calling the list
method (or passing it to the list function) and all elements from
0 will be shown:
>>> a = Cycle(1, 2)
>>> a.list()
[0, 2, 1]
>>> list(a)
[0, 2, 1]
If a larger (or smaller) range is desired use the list method and
provide the desired size -- but the Cycle cannot be truncated to
a size smaller than the largest element that is out of place:
>>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3)
>>> b.list()
[0, 2, 1, 3, 4]
>>> b.list(b.size + 1)
[0, 2, 1, 3, 4, 5]
>>> b.list(-1)
[0, 2, 1]
Singletons are not shown when printing with one exception: the largest
element is always shown -- as a singleton if necessary:
>>> Cycle(1, 4, 10)(4, 5)
Cycle(1, 5, 4, 10)
>>> Cycle(1, 2)(4)(5)(10)
Cycle(1, 2)(10)
The array form can be used to instantiate a Permutation so other
properties of the permutation can be investigated:
>>> Perm(Cycle(1, 2)(3, 4).list()).transpositions()
[(1, 2), (3, 4)]
Notes
=====
The underlying structure of the Cycle is a dictionary and although
the __iter__ method has been redefined to give the array form of the
cycle, the underlying dictionary items are still available with the
such methods as items():
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
See Also
========
Permutation
"""
def __missing__(self, arg):
"""Enter arg into dictionary and return arg."""
arg = as_int(arg)
self[arg] = arg
return arg
def __iter__(self):
for i in self.list():
yield i
def __call__(self, *other):
"""Return product of cycles processed from R to L.
Examples
========
>>> Cycle(1, 2)(2, 3)
Cycle(1, 3, 2)
An instance of a Cycle will automatically parse list-like
objects and Permutations that are on the right. It is more
flexible than the Permutation in that all elements need not
be present:
>>> a = Cycle(1, 2)
>>> a(2, 3)
Cycle(1, 3, 2)
>>> a(2, 3)(4, 5)
Cycle(1, 3, 2)(4, 5)
"""
rv = Cycle(*other)
for k, v in zip(list(self.keys()), [rv[v] for v in self.values()]):
rv[k] = v
return rv
def list(self, size=None):
"""Return the cycles as an explicit list starting from 0 up
to the greater of the largest value in the cycles and size.
Truncation of trailing unmoved items will occur when size
is less than the maximum element in the cycle; if this is
desired, setting ``size=-1`` will guarantee such trimming.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Cycle(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Cycle(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
if size is not None:
big = max([k for k, v in self.items() if v != k] + [0])
size = max(size, big + 1)
else:
size = self.size
return [self[i] for i in range(size)]
def __repr__(self):
"""We want it to print as a Cycle, not as a dict.
Examples
========
>>> Cycle(1, 2)
Cycle(1, 2)
>>> print(_)
Cycle(1, 2)
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
"""
if not self:
return 'Cycle()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += f'({big})'
return f'Cycle{s}'
def __init__(self, *args):
"""Load up a Cycle instance with the values for the cycle.
Examples
========
>>> Cycle(1, 2, 6)
Cycle(1, 2, 6)
"""
super().__init__()
if not args:
return
if len(args) == 1:
if isinstance(args[0], Permutation):
for c in args[0].cyclic_form:
self.update(self(*c))
return
elif isinstance(args[0], Cycle):
for k, v in args[0].items():
self[k] = v
return
args = [as_int(a) for a in args]
if any(i < 0 for i in args):
raise ValueError('negative integers are not allowed in a cycle.')
if has_dups(args):
raise ValueError('All elements must be unique in a cycle.')
for i in range(-len(args), 0):
self[args[i]] = args[i + 1]
@property
def size(self):
if not self:
return 0
return max(self.keys()) + 1
def copy(self):
return Cycle(self)
class Permutation(Basic):
"""
A permutation, alternatively known as an 'arrangement number' or 'ordering'
is an arrangement of the elements of an ordered list into a one-to-one
mapping with itself. The permutation of a given arrangement is given by
indicating the positions of the elements after re-arrangement. For
example, if one started with elements [x, y, a, b] (in that order) and
they were reordered as [x, y, b, a] then the permutation would be
[0, 1, 3, 2]. Notice that (in Diofant) the first element is always referred
to as 0 and the permutation uses the indices of the elements in the
original ordering, not the elements (a, b, etc...) themselves.
>>> Permutation.print_cyclic = False
Notes
=====
*Permutations Notation*
Permutations are commonly represented in disjoint cycle or array forms.
*Array Notation and 2-line Form*
In the 2-line form, the elements and their final positions are shown
as a matrix with 2 rows:
[0 1 2 ... n-1]
[p(0) p(1) p(2) ... p(n-1)]
Since the first line is always range(n), where n is the size of p,
it is sufficient to represent the permutation by the second line,
referred to as the "array form" of the permutation. This is entered
in brackets as the argument to the Permutation class:
>>> p = Permutation([0, 2, 1])
>>> p
Permutation([0, 2, 1])
Given i in range(p.size), the permutation maps i to i^p
>>> [i ^ p for i in range(p.size)]
[0, 2, 1]
The composite of two permutations p*q means first apply p, then q, so
i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules:
>>> q = Permutation([2, 1, 0])
>>> [i ^ p ^ q for i in range(3)]
[2, 0, 1]
>>> [i ^ (p*q) for i in range(3)]
[2, 0, 1]
One can use also the notation p(i) = i^p, but then the composition
rule is (p*q)(i) = q(p(i)), not p(q(i)):
>>> [(p*q)(i) for i in range(p.size)]
[2, 0, 1]
>>> [q(p(i)) for i in range(p.size)]
[2, 0, 1]
>>> [p(q(i)) for i in range(p.size)]
[1, 2, 0]
*Disjoint Cycle Notation*
In disjoint cycle notation, only the elements that have shifted are
indicated. In the above case, the 2 and 1 switched places. This can
be entered in two ways:
>>> Permutation(1, 2) == Permutation([[1, 2]]) == p
True
Only the relative ordering of elements in a cycle matter:
>>> Permutation(1, 2, 3) == Permutation(2, 3, 1) == Permutation(3, 1, 2)
True
The disjoint cycle notation is convenient when representing permutations
that have several cycles in them:
>>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]])
True
It also provides some economy in entry when computing products of
permutations that are written in disjoint cycle notation:
>>> Permutation(1, 2)(1, 3)(2, 3)
Permutation([0, 3, 2, 1])
>>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]])
True
Entering a singleton in a permutation is a way to indicate the size of the
permutation. The ``size`` keyword can also be used.
Array-form entry:
>>> Permutation([[1, 2], [9]])
Permutation([0, 2, 1], size=10)
>>> Permutation([[1, 2]], size=10)
Permutation([0, 2, 1], size=10)
Cyclic-form entry:
>>> Permutation(1, 2, size=10)
Permutation([0, 2, 1], size=10)
>>> Permutation(9)(1, 2)
Permutation([0, 2, 1], size=10)
Caution: no singleton containing an element larger than the largest
in any previous cycle can be entered. This is an important difference
in how Permutation and Cycle handle the __call__ syntax. A singleton
argument at the start of a Permutation performs instantiation of the
Permutation and is permitted:
>>> Permutation(5)
Permutation([], size=6)
A singleton entered after instantiation is a call to the permutation
-- a function call -- and if the argument is out of range it will
trigger an error. For this reason, it is better to start the cycle
with the singleton:
The following fails because there is is no element 3:
>>> Permutation(1, 2)(3)
Traceback (most recent call last):
...
IndexError: list index out of range
This is ok: only the call to an out of range singleton is prohibited;
otherwise the permutation autosizes:
>>> Permutation(3)(1, 2)
Permutation([0, 2, 1, 3])
>>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2)
True
*Equality testing*
The array forms must be the same in order for permutations to be equal:
>>> Permutation([1, 0, 2, 3]) == Permutation([1, 0])
False
*Identity Permutation*
The identity permutation is a permutation in which no element is out of
place. It can be entered in a variety of ways. All the following create
an identity permutation of size 4:
>>> I = Permutation([0, 1, 2, 3])
>>> all(p == I for p in [Permutation(3), Permutation(range(4)),
... Permutation([], size=4), Permutation(size=4)])
True
Watch out for entering the range *inside* a set of brackets (which is
cycle notation):
>>> I == Permutation([range(4)])
False
*Permutation Printing*
There are a few things to note about how Permutations are printed.
1) If you prefer one form (array or cycle) over another, you can set that
with the print_cyclic flag.
>>> Permutation(1, 2)(4, 5)(3, 4)
Permutation([0, 2, 1, 4, 5, 3])
>>> p = _
>>> Permutation.print_cyclic = True
>>> p
Permutation(1, 2)(3, 4, 5)
>>> Permutation.print_cyclic = False
2) Regardless of the setting, a list of elements in the array for cyclic
form can be obtained and either of those can be copied and supplied as
the argument to Permutation:
>>> p.array_form
[0, 2, 1, 4, 5, 3]
>>> p.cyclic_form
[[1, 2], [3, 4, 5]]
>>> Permutation(_) == p
True
3) Printing is economical in that as little as possible is printed while
retaining all information about the size of the permutation:
>>> Permutation([1, 0, 2, 3])
Permutation([1, 0, 2, 3])
>>> Permutation([1, 0, 2, 3], size=20)
Permutation([1, 0], size=20)
>>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20)
Permutation([1, 0, 2, 4, 3], size=20)
>>> p = Permutation([1, 0, 2, 3])
>>> Permutation.print_cyclic = True
>>> p
Permutation(3)(0, 1)
>>> Permutation.print_cyclic = False
The 2 was not printed but it is still there as can be seen with the
array_form and size methods:
>>> p.array_form
[1, 0, 2, 3]
>>> p.size
4
*Short introduction to other methods*
The permutation can act as a bijective function, telling what element is
located at a given position
>>> q = Permutation([5, 2, 3, 4, 1, 0])
>>> q.array_form[1] # the hard way
2
>>> q(1) # the easy way
2
>>> {i: q(i) for i in range(q.size)} # showing the bijection
{0: 5, 1: 2, 2: 3, 3: 4, 4: 1, 5: 0}
The full cyclic form (including singletons) can be obtained:
>>> p.full_cyclic_form
[[0, 1], [2], [3]]
Any permutation can be factored into transpositions of pairs of elements:
>>> Permutation([[1, 2], [3, 4, 5]]).transpositions()
[(1, 2), (3, 5), (3, 4)]
>>> Permutation.rmul(*[Permutation([ti], size=6) for ti in _]).cyclic_form
[[1, 2], [3, 4, 5]]
The number of permutations on a set of n elements is given by n! and is
called the cardinality.
>>> p.size
4
>>> p.cardinality
24
A given permutation has a rank among all the possible permutations of the
same elements, but what that rank is depends on how the permutations are
enumerated. (There are a number of different methods of doing so.) The
lexicographic rank is given by the rank method and this rank is used to
increment a permutation with addition/subtraction:
>>> p.rank()
6
>>> p + 1
Permutation([1, 0, 3, 2])
>>> p.next_lex()
Permutation([1, 0, 3, 2])
>>> _.rank()
7
>>> p.unrank_lex(p.size, rank=7)
Permutation([1, 0, 3, 2])
The product of two permutations p and q is defined as their composition as
functions, (p*q)(i) = q(p(i)).
>>> p = Permutation([1, 0, 2, 3])
>>> q = Permutation([2, 3, 1, 0])
>>> list(q*p)
[2, 3, 0, 1]
>>> list(p*q)
[3, 2, 1, 0]
>>> [q(p(i)) for i in range(p.size)]
[3, 2, 1, 0]
The permutation can be 'applied' to any list-like object, not only
Permutations:
>>> p(['zero', 'one', 'four', 'two'])
['one', 'zero', 'four', 'two']
>>> p('zo42')
['o', 'z', '4', '2']
If you have a list of arbitrary elements, the corresponding permutation
can be found with the from_sequence method:
>>> Permutation.from_sequence('SymPy')
Permutation([1, 3, 2, 0, 4])
See Also
========
Cycle
References
==========
* Skiena, S. 'Permutations.' 1.1 in Implementing Discrete Mathematics
Combinatorics and Graph Theory with Mathematica. Reading, MA:
Addison-Wesley, pp. 3-16, 1990.
* Knuth, D. E. The Art of Computer Programming, Vol. 4: Combinatorial
Algorithms, 1st ed. Reading, MA: Addison-Wesley, 2011.
* Wendy Myrvold and Frank Ruskey. 2001. Ranking and unranking
permutations in linear time. Inf. Process. Lett. 79, 6 (September 2001),
281-284. DOI=10.1016/S0020-0190(01)00141-7
* D. L. Kreher, D. R. Stinson 'Combinatorial Algorithms'
CRC Press, 1999
* Graham, R. L.; Knuth, D. E.; and Patashnik, O.
Concrete Mathematics: A Foundation for Computer Science, 2nd ed.
Reading, MA: Addison-Wesley, 1994.
* https://en.wikipedia.org/wiki/Permutation
* https://en.wikipedia.org/wiki/Lehmer_code
"""
is_Permutation = True
_array_form = None
_cyclic_form = None
_cycle_structure = None
_size = None
_rank = None
def __new__(cls, *args, **kwargs):
"""
Constructor for the Permutation object from a list or a
list of lists in which all elements of the permutation may
appear only once.
Examples
========
>>> Permutation.print_cyclic = False
Permutations entered in array-form are left unaltered:
>>> Permutation([0, 2, 1])
Permutation([0, 2, 1])
Permutations entered in cyclic form are converted to array form;
singletons need not be entered, but can be entered to indicate the
largest element:
>>> Permutation([[4, 5, 6], [0, 1]])
Permutation([1, 0, 2, 3, 5, 6, 4])
>>> Permutation([[4, 5, 6], [0, 1], [19]])
Permutation([1, 0, 2, 3, 5, 6, 4], size=20)
All manipulation of permutations assumes that the smallest element
is 0 (in keeping with 0-based indexing in Python) so if the 0 is
missing when entering a permutation in array form, an error will be
raised:
>>> Permutation([2, 1])
Traceback (most recent call last):
...
ValueError: Integers 0 through 2 must be present.
If a permutation is entered in cyclic form, it can be entered without
singletons and the ``size`` specified so those values can be filled
in, otherwise the array form will only extend to the maximum value
in the cycles:
>>> Permutation([[1, 4], [3, 5, 2]], size=10)
Permutation([0, 4, 3, 5, 1, 2], size=10)
>>> _.array_form
[0, 4, 3, 5, 1, 2, 6, 7, 8, 9]
"""
size = kwargs.pop('size', None)
if size is not None:
size = int(size)
# a) ()
# b) (1) = identity
# c) (1, 2) = cycle
# d) ([1, 2, 3]) = array form
# e) ([[1, 2]]) = cyclic form
# f) (Cycle) = conversion to permutation
# g) (Permutation) = adjust size or return copy
ok = True
if not args: # a
return _af_new(list(range(size or 0)))
elif len(args) > 1: # c
return _af_new(Cycle(*args).list(size))
if len(args) == 1:
a = args[0]
if isinstance(a, Perm): # g
if size is None or size == a.size:
return a
return Perm(a.array_form, size=size)
if isinstance(a, Cycle): # f
return _af_new(a.list(size))
if not is_sequence(a): # b
return _af_new(list(range(a + 1)))
if has_variety(is_sequence(ai) for ai in a):
ok = False
else:
ok = False
if not ok:
raise ValueError('Permutation argument must be a list of ints, '
'a list of lists, Permutation or Cycle.')
# safe to assume args are valid; this also makes a copy
# of the args
args = list(args[0])
is_cycle = args and is_sequence(args[0])
if is_cycle: # e
args = [[int(i) for i in c] for c in args]
else: # d
args = [int(i) for i in args]
# if there are n elements present, 0, 1, ..., n-1 should be present
# unless a cycle notation has been provided. A 0 will be added
# for convenience in case one wants to enter permutations where
# counting starts from 1.
temp = flatten(args)
if has_dups(temp):
if is_cycle:
raise ValueError('there were repeated elements; to resolve '
f"cycles use Cycle{''.join([str(tuple(c)) for c in args])}.")
raise ValueError('there were repeated elements.')
temp = set(temp)
if not is_cycle and \
any(i not in temp for i in range(len(temp))):
raise ValueError(f'Integers 0 through {max(temp)} must be present.')
if is_cycle:
# it's not necessarily canonical so we won't store
# it -- use the array form instead
c = Cycle()
for ci in args:
c = c(*ci)
aform = c.list()
else:
aform = list(args)
if size and size > len(aform):
# don't allow for truncation of permutation which
# might split a cycle and lead to an invalid aform
# but do allow the permutation size to be increased
aform.extend(list(range(len(aform), size)))
size = len(aform)
args = Tuple(*[sympify(_) for _ in aform])
obj = Basic.__new__(cls, args)
obj._array_form = aform
obj._size = size
return obj
@staticmethod
def _af_new(perm):
"""A method to produce a Permutation object from a list;
the list is bound to the _array_form attribute, so it must
not be modified; this method is meant for internal use only;
the list ``a`` is supposed to be generated as a temporary value
in a method, so p = Perm._af_new(a) is the only object
to hold a reference to ``a``::
Examples
========
>>> Permutation.print_cyclic = False
>>> a = [2, 1, 3, 0]
>>> p = Permutation._af_new(a)
>>> p
Permutation([2, 1, 3, 0])
"""
p = Basic.__new__(Perm, Tuple(perm))
p._array_form = perm
p._size = len(perm)
return p
def _hashable_content(self):
# the array_form (a list) is the Permutation arg, so we need to
# return a tuple, instead
return tuple(self.array_form)
@property
def array_form(self):
"""
Return a copy of the attribute _array_form
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> Permutation([[2, 0, 3, 1]]).array_form
[3, 2, 0, 1]
>>> Permutation([2, 0, 3, 1]).array_form
[2, 0, 3, 1]
>>> Permutation([[1, 2], [4, 5]]).array_form
[0, 2, 1, 3, 5, 4]
"""
return self._array_form[:]
def list(self, size=None):
"""Return the permutation as an explicit list, possibly
trimming unmoved elements if size is less than the maximum
element in the permutation; if this is desired, setting
``size=-1`` will guarantee such trimming.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Permutation(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
>>> Permutation(3).list(-1)
[]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
rv = self.array_form
if size is not None:
if size > self.size:
rv.extend(list(range(self.size, size)))
else:
# find first value from rhs where rv[i] != i
i = self.size - 1
while rv:
if rv[-1] != i:
break
rv.pop()
i -= 1
return rv
@property
def cyclic_form(self):
"""
This is used to convert to the cyclic notation
from the canonical notation. Singletons are omitted.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([0, 3, 1, 2])
>>> p.cyclic_form
[[1, 3, 2]]
>>> Permutation([1, 0, 2, 4, 3, 5]).cyclic_form
[[0, 1], [3, 4]]
See Also
========
array_form, full_cyclic_form
"""
if self._cyclic_form is not None:
return list(self._cyclic_form)
array_form = self.array_form
unchecked = [True] * len(array_form)
cyclic_form = []
for i in range(len(array_form)):
if unchecked[i]:
cycle = []
cycle.append(i)
unchecked[i] = False
j = i
while unchecked[array_form[j]]:
j = array_form[j]
cycle.append(j)
unchecked[j] = False
if len(cycle) > 1:
cyclic_form.append(cycle)
assert cycle == list(minlex(cycle, is_set=True))
cyclic_form.sort()
self._cyclic_form = cyclic_form[:]
return cyclic_form
@property
def full_cyclic_form(self):
"""Return permutation in cyclic form including singletons.
Examples
========
>>> Permutation([0, 2, 1]).full_cyclic_form
[[0], [1, 2]]
"""
need = set(range(self.size)) - set(flatten(self.cyclic_form))
rv = self.cyclic_form
rv.extend([[i] for i in need])
rv.sort()
return rv
@property
def size(self):
"""
Returns the number of elements in the permutation.
Examples
========
>>> Permutation([[3, 2], [0, 1]]).size
4
See Also
========
cardinality, length, order, rank
"""
return self._size
def support(self):
"""Return the elements in permutation, P, for which P[i] != i.
Examples
========
>>> p = Permutation([[3, 2], [0, 1], [4]])
>>> p.array_form
[1, 0, 3, 2, 4]
>>> p.support()
[0, 1, 2, 3]
"""
a = self.array_form
return [i for i, e in enumerate(a) if a[i] != i]
def __add__(self, other):
"""Return permutation that is other higher in rank than self.
The rank is the lexicographical rank, with the identity permutation
having rank of 0.
Examples
========
>>> Permutation.print_cyclic = False
>>> I = Permutation([0, 1, 2, 3])
>>> a = Permutation([2, 1, 3, 0])
>>> I + a.rank() == a
True
See Also
========
__sub__, inversion_vector
"""
rank = (self.rank() + other) % self.cardinality
rv = Perm.unrank_lex(self.size, rank)
rv._rank = rank
return rv
def __sub__(self, other):
"""Return the permutation that is other lower in rank than self.
See Also
========
__add__
"""
return self.__add__(-other)
@staticmethod
def rmul(*args):
"""
Return product of Permutations [a, b, c, ...] as the Permutation whose
ith value is a(b(c(i))).
a, b, c, ... can be Permutation objects or tuples.
Examples
========
>>> Permutation.print_cyclic = False
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(Permutation.rmul(a, b))
[1, 2, 0]
>>> [a(b(i)) for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
Notes
=====
All items in the sequence will be parsed by Permutation as
necessary as long as the first item is a Permutation:
>>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b)
True
The reverse order of arguments will raise a TypeError.
"""
rv = args[0]
for i in range(1, len(args)):
rv = args[i]*rv
return rv
@staticmethod
def rmul_with_af(*args):
"""
Same as rmul, but the elements of args are Permutation objects
which have _array_form.
"""
a = [x._array_form for x in args]
rv = _af_new(_af_rmuln(*a))
return rv
def mul_inv(self, other):
"""
other*~self, self and other have _array_form
"""
a = _af_invert(self._array_form)
b = other._array_form
return _af_new(_af_rmul(a, b))
def __rmul__(self, other):
"""This is needed to coerse other to Permutation in rmul."""
return Perm(other)*self
def __mul__(self, other):
"""
Return the product a*b as a Permutation; the ith value is b(a(i)).
Examples
========
>>> Permutation.print_cyclic = False
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
This handles operands in reverse order compared to _af_rmul and rmul:
>>> al = list(a)
>>> bl = list(b)
>>> _af_rmul(al, bl)
[1, 2, 0]
>>> [al[bl[i]] for i in range(3)]
[1, 2, 0]
It is acceptable for the arrays to have different lengths; the shorter
one will be padded to match the longer one:
>>> b*Permutation([1, 0])
Permutation([1, 2, 0])
>>> Permutation([1, 0])*b
Permutation([2, 0, 1])
It is also acceptable to allow coercion to handle conversion of a
single list to the left of a Permutation:
>>> [0, 1]*a # no change: 2-element identity
Permutation([1, 0, 2])
>>> [[0, 1]]*a # exchange first two elements
Permutation([0, 1, 2])
You cannot use more than 1 cycle notation in a product of cycles
since coercion can only handle one argument to the left. To handle
multiple cycles it is convenient to use Cycle instead of Permutation:
>>> Cycle(1, 2)(2, 3)
Cycle(1, 3, 2)
"""
a = self.array_form
# __rmul__ makes sure the other is a Permutation
b = other.array_form
if not b:
perm = a
else:
b.extend(list(range(len(b), len(a))))
perm = [b[i] for i in a] + b[len(a):]
return _af_new(perm)
def commutes_with(self, other):
"""
Checks if the elements are commuting.
Examples
========
>>> a = Permutation([1, 4, 3, 0, 2, 5])
>>> b = Permutation([0, 1, 2, 3, 4, 5])
>>> a.commutes_with(b)
True
>>> b = Permutation([2, 3, 5, 4, 1, 0])
>>> a.commutes_with(b)
False
"""
a = self.array_form
b = other.array_form
return _af_commutes_with(a, b)
def __pow__(self, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> p**4
Permutation([0, 1, 2, 3])
"""
if type(n) == Perm:
raise NotImplementedError(
'p**p is not defined; do you mean p^p (conjugate)?')
n = int(n)
return _af_new(_af_pow(self.array_form, n))
def __rxor__(self, i):
"""Return self(i) when ``i`` is an int.
Examples
========
>>> p = Permutation(1, 2, 9)
>>> 2 ^ p == p(2) == 9
True
"""
if int(i) == i:
return self(i)
else:
raise NotImplementedError(
f'i^p = p(i) when i is an integer, not {i}.')
def __xor__(self, h):
"""Return the conjugate permutation ``~h*self*h``.
If ``a`` and ``b`` are conjugates, ``a = h*b*~h`` and
``b = ~h*a*h`` and both have the same cycle structure.
Examples
========
>>> Permutation.print_cyclic = True
>>> p = Permutation(1, 2, 9)
>>> q = Permutation(6, 9, 8)
>>> p*q != q*p
True
Calculate and check properties of the conjugate:
>>> c = p ^ q
>>> c == ~q*p*q and p == q*c*~q
True
The expression q^p^r is equivalent to q^(p*r):
>>> r = Permutation(9)(4, 6, 8)
>>> q ^ p ^ r == q ^ (p*r)
True
If the term to the left of the conjugate operator, i, is an integer
then this is interpreted as selecting the ith element from the
permutation to the right:
>>> all(i ^ p == p(i) for i in range(p.size))
True
Note that the * operator as higher precedence than the ^ operator:
>>> q ^ r*p ^ r == q ^ (r*p) ^ r == Permutation(9)(1, 6, 4)
True
Notes
=====
In Python the precedence rule is p^q^r = (p^q)^r which differs
in general from p^(q^r)
>>> q ^ p ^ r
Permutation(9)(1, 4, 8)
>>> q ^ (p ^ r)
Permutation(9)(1, 8, 6)
For a given r and p, both of the following are conjugates of p:
~r*p*r and r*p*~r. But these are not necessarily the same:
>>> ~r*p*r == r*p*~r
True
>>> p = Permutation(1, 2, 9)(5, 6)
>>> ~r*p*r == r*p*~r
False
The conjugate ~r*p*r was chosen so that ``p^q^r`` would be equivalent
to ``p^(q*r)`` rather than ``p^(r*q)``. To obtain r*p*~r, pass ~r to
this method:
>>> p ^ ~r == r*p*~r
True
"""
if self.size != h.size:
raise ValueError('The permutations must be of equal size.')
a = [None]*self.size
h = h._array_form
p = self._array_form
for i in range(self.size):
a[h[i]] = h[p[i]]
return _af_new(a)
def transpositions(self):
"""
Return the permutation decomposed into a list of transpositions.
It is always possible to express a permutation as the product of
transpositions, see [1]
Examples
========
>>> p = Permutation([[1, 2, 3], [0, 4, 5, 6, 7]])
>>> t = p.transpositions()
>>> t
[(0, 7), (0, 6), (0, 5), (0, 4), (1, 3), (1, 2)]
>>> print(''.join(str(c) for c in t))
(0, 7)(0, 6)(0, 5)(0, 4)(1, 3)(1, 2)
>>> Permutation.rmul(*[Permutation([ti], size=p.size) for ti in t]) == p
True
References
==========
1. https://en.wikipedia.org/wiki/Transposition_%28mathematics%29#Properties
"""
a = self.cyclic_form
res = []
for x in a:
nx = len(x)
if nx == 2:
res.append(tuple(x))
elif nx > 2:
first = x[0]
for y in x[nx - 1:0:-1]:
res.append((first, y))
return res
@classmethod
def from_sequence(cls, i, key=None):
"""Return the permutation needed to obtain ``i`` from the sorted
elements of ``i``. If custom sorting is desired, a key can be given.
Examples
========
>>> Permutation.print_cyclic = True
>>> Permutation.from_sequence('SymPy')
Permutation(4)(0, 1, 3)
>>> _(sorted('SymPy'))
['S', 'y', 'm', 'P', 'y']
>>> Permutation.from_sequence('SymPy', key=lambda x: x.lower())
Permutation(4)(0, 2)(1, 3)
"""
ic = list(zip(i, range(len(i))))
if key:
ic.sort(key=lambda x: key(x[0]))
else:
ic.sort()
return ~Permutation([i[1] for i in ic])
def __invert__(self):
"""
Return the inverse of the permutation.
A permutation multiplied by its inverse is the identity permutation.
Examples
========
>>> p = Permutation([[2, 0], [3, 1]])
>>> ~p
Permutation([2, 3, 0, 1])
>>> _ == p**-1
True
>>> p*~p == ~p*p == Permutation([0, 1, 2, 3])
True
"""
return _af_new(_af_invert(self._array_form))
def __iter__(self):
"""Yield elements from array form.
Examples
========
>>> list(Permutation(range(3)))
[0, 1, 2]
"""
for i in self.array_form:
yield i
def __call__(self, *i):
"""
Allows applying a permutation instance as a bijective function.
Examples
========
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> [p(i) for i in range(4)]
[2, 3, 0, 1]
If an array is given then the permutation selects the items
from the array (i.e. the permutation is applied to the array):
>>> p([x, 1, 0, x**2])
[0, x**2, x, 1]
"""
# list indices can be Integer or int; leave this
# as it is (don't test or convert it) because this
# gets called a lot and should be fast
if len(i) == 1:
i = i[0]
try:
# P(1)
return self._array_form[i]
except TypeError:
# P([a, b, c])
return [i[j] for j in self._array_form]
else:
# P(1, 2, 3)
return self*Permutation(Cycle(*i), size=self.size)
def atoms(self):
"""
Returns all the elements of a permutation
Examples
========
>>> Permutation([0, 1, 2, 3, 4, 5]).atoms()
{0, 1, 2, 3, 4, 5}
>>> Permutation([[0, 1], [2, 3], [4, 5]]).atoms()
{0, 1, 2, 3, 4, 5}
"""
return set(self.array_form)
def next_lex(self):
"""
Returns the next permutation in lexicographical order.
If self is the last permutation in lexicographical order
it returns None.
See [4] section 2.4.
Examples
========
>>> p = Permutation([2, 3, 1, 0])
>>> p = Permutation([2, 3, 1, 0])
>>> p.rank()
17
>>> p = p.next_lex()
>>> p.rank()
18
See Also
========
rank, unrank_lex
"""
perm = self.array_form[:]
n = len(perm)
i = n - 2
while perm[i + 1] < perm[i]:
i -= 1
if i == -1:
return
else:
j = n - 1
while perm[j] < perm[i]:
j -= 1
perm[j], perm[i] = perm[i], perm[j]
i += 1
j = n - 1
while i < j:
perm[j], perm[i] = perm[i], perm[j]
i += 1
j -= 1
return _af_new(perm)
@classmethod
def unrank_nonlex(cls, n, r):
"""
This is a linear time unranking algorithm that does not
respect lexicographic order [3].
Examples
========
>>> Permutation.print_cyclic = False
>>> Permutation.unrank_nonlex(4, 5)
Permutation([2, 0, 3, 1])
>>> Permutation.unrank_nonlex(4, -1)
Permutation([0, 1, 2, 3])
See Also
========
next_nonlex, rank_nonlex
"""
def _unrank1(n, r, a):
if n > 0:
a[n - 1], a[r % n] = a[r % n], a[n - 1]
_unrank1(n - 1, r//n, a)
id_perm = list(range(n))
n = int(n)
r = r % ifac(n)
_unrank1(n, r, id_perm)
return _af_new(id_perm)
def rank_nonlex(self, inv_perm=None):
"""
This is a linear time ranking algorithm that does not
enforce lexicographic order [3].
Examples
========
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_nonlex()
23
See Also
========
next_nonlex, unrank_nonlex
"""
def _rank1(n, perm, inv_perm):
if n == 1:
return 0
s = perm[n - 1]
t = inv_perm[n - 1]
perm[n - 1], perm[t] = perm[t], s
inv_perm[n - 1], inv_perm[s] = inv_perm[s], t
return s + n*_rank1(n - 1, perm, inv_perm)
if inv_perm is None:
inv_perm = (~self).array_form
if not inv_perm:
return 0
perm = self.array_form[:]
r = _rank1(len(perm), perm, inv_perm)
return r
def next_nonlex(self):
"""
Returns the next permutation in nonlex order [3].
If self is the last permutation in this order it returns None.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([2, 0, 3, 1])
>>> p.rank_nonlex()
5
>>> p = p.next_nonlex()
>>> p
Permutation([3, 0, 1, 2])
>>> p.rank_nonlex()
6
See Also
========
rank_nonlex, unrank_nonlex
"""
r = self.rank_nonlex()
if r != ifac(self.size) - 1:
return Perm.unrank_nonlex(self.size, r + 1)
def rank(self):
"""
Returns the lexicographic rank of the permutation.
Examples
========
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank()
0
>>> p = Permutation([3, 2, 1, 0])
>>> p.rank()
23
See Also
========
next_lex, unrank_lex, cardinality, length, order, size
"""
if self._rank is not None:
return self._rank
rank = 0
rho = self.array_form[:]
n = self.size - 1
size = n + 1
psize = int(ifac(n))
for j in range(size - 1):
rank += rho[j]*psize
for i in range(j + 1, size):
if rho[i] > rho[j]:
rho[i] -= 1
psize //= n
n -= 1
self._rank = rank
return rank
@property
def cardinality(self):
"""
Returns the number of all possible permutations.
Examples
========
>>> p = Permutation([0, 1, 2, 3])
>>> p.cardinality
24
See Also
========
length, order, rank, size
"""
return int(ifac(self.size))
def parity(self):
"""
Computes the parity of a permutation.
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that ``x > y`` but ``p[x] < p[y]``.
Examples
========
>>> p = Permutation([0, 1, 2, 3])
>>> p.parity()
0
>>> p = Permutation([3, 2, 0, 1])
>>> p.parity()
1
See Also
========
_af_parity
"""
if self._cyclic_form is not None:
return (self.size - self.cycles) % 2
return _af_parity(self.array_form)
@property
def is_even(self):
"""
Checks if a permutation is even.
Examples
========
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_even
True
>>> p = Permutation([3, 2, 1, 0])
>>> p.is_even
True
See Also
========
is_odd
"""
return not self.is_odd
@property
def is_odd(self):
"""
Checks if a permutation is odd.
Examples
========
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_odd
False
>>> p = Permutation([3, 2, 0, 1])
>>> p.is_odd
True
See Also
========
is_even
"""
return bool(self.parity() % 2)
@property
def is_Singleton(self):
"""
Checks to see if the permutation contains only one number and is
thus the only possible permutation of this set of numbers
Examples
========
>>> Permutation([0]).is_Singleton
True
>>> Permutation([0, 1]).is_Singleton
False
See Also
========
is_Empty
"""
return self.size == 1
@property
def is_Empty(self):
"""
Checks to see if the permutation is a set with zero elements
Examples
========
>>> Permutation([]).is_Empty
True
>>> Permutation([0]).is_Empty
False
See Also
========
is_Singleton
"""
return self.size == 0
@property
def is_Identity(self):
"""
Returns True if the Permutation is an identity permutation.
Examples
========
>>> p = Permutation([])
>>> p.is_Identity
True
>>> p = Permutation([[0], [1], [2]])
>>> p.is_Identity
True
>>> p = Permutation([0, 1, 2])
>>> p.is_Identity
True
>>> p = Permutation([0, 2, 1])
>>> p.is_Identity
False
See Also
========
order
"""
af = self.array_form
return not af or all(i == af[i] for i in range(self.size))
def ascents(self):
"""
Returns the positions of ascents in a permutation, ie, the location
where p[i] < p[i+1]
Examples
========
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.ascents()
[1, 2]
See Also
========
descents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]]
return pos
def descents(self):
"""
Returns the positions of descents in a permutation, ie, the location
where p[i] > p[i+1]
Examples
========
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.descents()
[0, 3]
See Also
========
ascents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] > a[i + 1]]
return pos
def max(self):
"""
The maximum element moved by the permutation.
Examples
========
>>> p = Permutation([1, 0, 2, 3, 4])
>>> p.max()
1
See Also
========
min, descents, ascents, inversions
"""
max = 0
a = self.array_form
for i, ai in enumerate(a):
if ai != i and ai > max:
max = ai
return max
def min(self):
"""
The minimum element moved by the permutation.
Examples
========
>>> p = Permutation([0, 1, 4, 3, 2])
>>> p.min()
2
See Also
========
max, descents, ascents, inversions
"""
a = self.array_form
min = len(a)
for i, ai in enumerate(a):
if ai != i and ai < min:
min = ai
return min
def inversions(self):
"""
Computes the number of inversions of a permutation.
An inversion is where i > j but p[i] < p[j].
For small length of p, it iterates over all i and j
values and calculates the number of inversions.
For large length of p, it uses a variation of merge
sort to calculate the number of inversions.
References
==========
* https://www.cp.eng.chula.ac.th/~prabhas//teaching/algo/algo2008/count-inv.htm
Examples
========
>>> p = Permutation([0, 1, 2, 3, 4, 5])
>>> p.inversions()
0
>>> Permutation([3, 2, 1, 0]).inversions()
6
See Also
========
descents, ascents, min, max
"""
inversions = 0
a = self.array_form
n = len(a)
if n < 130:
for i in range(n - 1):
b = a[i]
for c in a[i + 1:]:
if b > c:
inversions += 1
else:
k = 1
right = 0
arr = a[:]
temp = a[:]
while k < n:
i = 0
while i + k < n:
right = i + k * 2 - 1
if right >= n:
right = n - 1
inversions += _merge(arr, temp, i, i + k, right)
i = i + k * 2
k = k * 2
return inversions
def commutator(self, x):
"""Return the commutator of self and x: ``~x*~self*x*self``
If f and g are part of a group, G, then the commutator of f and g
is the group identity iff f and g commute, i.e. fg == gf.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([0, 2, 3, 1])
>>> x = Permutation([2, 0, 3, 1])
>>> c = p.commutator(x)
>>> c
Permutation([2, 1, 3, 0])
>>> c == ~x*~p*x*p
True
>>> I = Permutation(3)
>>> p = [I + i for i in range(6)]
>>> for i in range(len(p)):
... for j in range(len(p)):
... c = p[i].commutator(p[j])
... if p[i]*p[j] == p[j]*p[i]:
... assert c == I
... else:
... assert c != I
...
References
==========
https://en.wikipedia.org/wiki/Commutator
"""
a = self.array_form
b = x.array_form
n = len(a)
if len(b) != n:
raise ValueError('The permutations must be of equal size.')
inva = [None]*n
for i in range(n):
inva[a[i]] = i
invb = [None]*n
for i in range(n):
invb[b[i]] = i
return _af_new([a[b[inva[i]]] for i in invb])
def signature(self):
"""
Gives the signature of the permutation needed to place the
elements of the permutation in canonical order.
The signature is calculated as (-1)^<number of inversions>
Examples
========
>>> p = Permutation([0, 1, 2])
>>> p.inversions()
0
>>> p.signature()
1
>>> q = Permutation([0, 2, 1])
>>> q.inversions()
1
>>> q.signature()
-1
See Also
========
inversions
"""
if self.is_even:
return 1
return -1
def order(self):
"""
Computes the order of a permutation.
When the permutation is raised to the power of its
order it equals the identity permutation.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([3, 1, 5, 2, 4, 0])
>>> p.order()
4
>>> (p**(p.order()))
Permutation([], size=6)
See Also
========
is_Identity, cardinality, length, rank, size
"""
return functools.reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1)
def length(self):
"""
Returns the number of integers moved by a permutation.
Examples
========
>>> Permutation([0, 3, 2, 1]).length()
2
>>> Permutation([[0, 1], [2, 3]]).length()
4
See Also
========
min, max, support, cardinality, order, rank, size
"""
return len(self.support())
@property
def cycle_structure(self):
"""Return the cycle structure of the permutation as a dictionary
indicating the multiplicity of each cycle length.
Examples
========
>>> Permutation.print_cyclic = True
>>> Permutation(3).cycle_structure
{1: 4}
>>> Permutation(0, 4, 3)(1, 2)(5, 6).cycle_structure
{2: 2, 3: 1}
"""
if self._cycle_structure:
rv = self._cycle_structure
else:
rv = defaultdict(int)
singletons = self.size
for c in self.cyclic_form:
rv[len(c)] += 1
singletons -= len(c)
if singletons:
rv[1] = singletons
self._cycle_structure = rv
return dict(rv) # make a copy
@property
def cycles(self):
"""
Returns the number of cycles contained in the permutation
(including singletons).
Examples
========
>>> Permutation([0, 1, 2]).cycles
3
>>> Permutation([0, 1, 2]).full_cyclic_form
[[0], [1], [2]]
>>> Permutation(0, 1)(2, 3).cycles
2
See Also
========
diofant.functions.combinatorial.numbers.stirling
"""
return len(self.full_cyclic_form)
def index(self):
"""
Returns the index of a permutation.
The index of a permutation is the sum of all subscripts j such
that p[j] is greater than p[j+1].
Examples
========
>>> p = Permutation([3, 0, 2, 1, 4])
>>> p.index()
2
"""
a = self.array_form
return sum(j for j in range(len(a) - 1) if a[j] > a[j + 1])
def runs(self):
"""
Returns the runs of a permutation.
An ascending sequence in a permutation is called a run [5].
Examples
========
>>> p = Permutation([2, 5, 7, 3, 6, 0, 1, 4, 8])
>>> p.runs()
[[2, 5, 7], [3, 6], [0, 1, 4, 8]]
>>> q = Permutation([1, 3, 2, 0])
>>> q.runs()
[[1, 3], [2], [0]]
"""
return runs(self.array_form)
def inversion_vector(self):
"""Return the inversion vector of the permutation.
The inversion vector consists of elements whose value
indicates the number of elements in the permutation
that are lesser than it and lie on its right hand side.
The inversion vector is the same as the Lehmer encoding of a
permutation.
Examples
========
>>> p = Permutation([4, 8, 0, 7, 1, 5, 3, 6, 2])
>>> p.inversion_vector()
[4, 7, 0, 5, 0, 2, 1, 1]
>>> p = Permutation([3, 2, 1, 0])
>>> p.inversion_vector()
[3, 2, 1]
The inversion vector increases lexicographically with the rank
of the permutation, the -ith element cycling through 0..i.
>>> p = Permutation(2)
>>> Permutation.print_cyclic = False
>>> while p:
... print(f'{p} {p.inversion_vector()} {p.rank()}')
... p = p.next_lex()
...
Permutation([0, 1, 2]) [0, 0] 0
Permutation([0, 2, 1]) [0, 1] 1
Permutation([1, 0, 2]) [1, 0] 2
Permutation([1, 2, 0]) [1, 1] 3
Permutation([2, 0, 1]) [2, 0] 4
Permutation([2, 1, 0]) [2, 1] 5
See Also
========
from_inversion_vector
"""
self_array_form = self.array_form
n = len(self_array_form)
inversion_vector = [0] * (n - 1)
for i in range(n - 1):
val = 0
for j in range(i + 1, n):
if self_array_form[j] < self_array_form[i]:
val += 1
inversion_vector[i] = val
return inversion_vector
def rank_trotterjohnson(self):
"""
Returns the Trotter Johnson rank, which we get from the minimal
change algorithm. See [4] section 2.4.
Examples
========
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_trotterjohnson()
0
>>> p = Permutation([0, 2, 1, 3])
>>> p.rank_trotterjohnson()
7
See Also
========
unrank_trotterjohnson, next_trotterjohnson
"""
if self.array_form == [] or self.is_Identity:
return 0
if self.array_form == [1, 0]:
return 1
perm = self.array_form
n = self.size
rank = 0
for j in range(1, n):
k = 1
i = 0
while perm[i] != j:
if perm[i] < j:
k += 1
i += 1
j1 = j + 1
if rank % 2 == 0:
rank = j1*rank + j1 - k
else:
rank = j1*rank + k - 1
return rank
@classmethod
def unrank_trotterjohnson(cls, size, rank):
"""
Trotter Johnson permutation unranking. See [4] section 2.4.
Examples
========
>>> Permutation.unrank_trotterjohnson(5, 10)
Permutation([0, 3, 1, 2, 4])
See Also
========
rank_trotterjohnson, next_trotterjohnson
"""
perm = [0]*size
r2 = 0
n = ifac(size)
pj = 1
for j in range(2, size + 1):
pj *= j
r1 = (rank * pj) // n
k = r1 - j*r2
if r2 % 2 == 0:
for i in range(j - 1, j - k - 1, -1):
perm[i] = perm[i - 1]
perm[j - k - 1] = j - 1
else:
for i in range(j - 1, k, -1):
perm[i] = perm[i - 1]
perm[k] = j - 1
r2 = r1
return _af_new(perm)
def next_trotterjohnson(self):
"""
Returns the next permutation in Trotter-Johnson order.
If self is the last permutation it returns None.
See [4] section 2.4.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([3, 0, 2, 1])
>>> p.rank_trotterjohnson()
4
>>> p = p.next_trotterjohnson()
>>> p
Permutation([0, 3, 2, 1])
>>> p.rank_trotterjohnson()
5
See Also
========
rank_trotterjohnson, unrank_trotterjohnson
"""
pi = self.array_form[:]
n = len(pi)
st = 0
rho = pi[:]
done = False
m = n-1
while m > 0 and not done:
d = rho.index(m)
for i in range(d, m):
rho[i] = rho[i + 1]
par = _af_parity(rho[:m])
if par == 1:
if d == m:
m -= 1
else:
pi[st + d], pi[st + d + 1] = pi[st + d + 1], pi[st + d]
done = True
else:
if d == 0:
m -= 1
st += 1
else:
pi[st + d], pi[st + d - 1] = pi[st + d - 1], pi[st + d]
done = True
if m != 0:
return _af_new(pi)
def get_precedence_matrix(self):
"""
Gets the precedence matrix. This is used for computing the
distance between two permutations.
Examples
========
>>> p = Permutation.josephus(3, 6, 1)
>>> Permutation.print_cyclic = False
>>> p
Permutation([2, 5, 3, 1, 4, 0])
>>> p.get_precedence_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0],
[1, 1, 0, 1, 1, 1],
[1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0]])
See Also
========
get_precedence_distance, get_adjacency_matrix, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(m.rows):
for j in range(i + 1, m.cols):
m[perm[i], perm[j]] = 1
return m
def get_precedence_distance(self, other):
"""
Computes the precedence distance between two permutations.
Suppose p and p' represent n jobs. The precedence metric
counts the number of times a job j is preceded by job i
in both p and p'. This metric is commutative.
Examples
========
>>> p = Permutation([2, 0, 4, 3, 1])
>>> q = Permutation([3, 1, 2, 4, 0])
>>> p.get_precedence_distance(q)
7
>>> q.get_precedence_distance(p)
7
See Also
========
get_precedence_matrix, get_adjacency_matrix, get_adjacency_distance
"""
if self.size != other.size:
raise ValueError('The permutations must be of equal size.')
self_prec_mat = self.get_precedence_matrix()
other_prec_mat = other.get_precedence_matrix()
n_prec = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_prec_mat[i, j] * other_prec_mat[i, j] == 1:
n_prec += 1
d = self.size * (self.size - 1)//2 - n_prec
return d
def get_adjacency_matrix(self):
"""
Computes the adjacency matrix of a permutation.
If job i is adjacent to job j in a permutation p
then we set m[i, j] = 1 where m is the adjacency
matrix of p.
Examples
========
>>> p = Permutation.josephus(3, 6, 1)
>>> p.get_adjacency_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
>>> q = Permutation([0, 1, 2, 3])
>>> q.get_adjacency_matrix()
Matrix([
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(self.size - 1):
m[perm[i], perm[i + 1]] = 1
return m
def get_adjacency_distance(self, other):
"""
Computes the adjacency distance between two permutations.
This metric counts the number of times a pair i,j of jobs is
adjacent in both p and p'. If n_adj is this quantity then
the adjacency distance is n - n_adj - 1 [1]
[1] Reeves, Colin R. Landscapes, Operators and Heuristic search, Annals
of Operational Research, 86, pp 473-490. (1999)
Examples
========
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> p.get_adjacency_distance(q)
3
>>> r = Permutation([0, 2, 1, 4, 3])
>>> p.get_adjacency_distance(r)
4
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_matrix
"""
if self.size != other.size:
raise ValueError('The permutations must be of the same size.')
self_adj_mat = self.get_adjacency_matrix()
other_adj_mat = other.get_adjacency_matrix()
n_adj = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_adj_mat[i, j] * other_adj_mat[i, j] == 1:
n_adj += 1
d = self.size - n_adj - 1
return d
def get_positional_distance(self, other):
"""
Computes the positional distance between two permutations.
Examples
========
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> r = Permutation([3, 1, 4, 0, 2])
>>> p.get_positional_distance(q)
12
>>> p.get_positional_distance(r)
12
See Also
========
get_precedence_distance, get_adjacency_distance
"""
a = self.array_form
b = other.array_form
if len(a) != len(b):
raise ValueError('The permutations must be of the same size.')
return sum(abs(a[i] - b[i]) for i in range(len(a)))
@classmethod
def josephus(cls, m, n, s=1):
"""Return as a permutation the shuffling of range(n) using the Josephus
scheme in which every m-th item is selected until all have been chosen.
The returned permutation has elements listed by the order in which they
were selected.
The parameter ``s`` stops the selection process when there are ``s``
items remaining and these are selected by continuing the selection,
counting by 1 rather than by ``m``.
Consider selecting every 3rd item from 6 until only 2 remain::
choices chosen
======== ======
012345
01 345 2
01 34 25
01 4 253
0 4 2531
0 25314
253140
Examples
========
>>> Permutation.josephus(3, 6, 2).array_form
[2, 5, 3, 1, 4, 0]
References
==========
* https://en.wikipedia.org/wiki/Flavius_Josephus
* https://en.wikipedia.org/wiki/Josephus_problem
"""
from collections import deque
m -= 1
Q = deque(list(range(n)))
perm = []
while len(Q) > max(s, 1):
for _ in range(m):
Q.append(Q.popleft())
perm.append(Q.popleft())
perm.extend(list(Q))
return Perm(perm)
@classmethod
def from_inversion_vector(cls, inversion):
"""
Calculates the permutation from the inversion vector.
Examples
========
>>> Permutation.print_cyclic = False
>>> Permutation.from_inversion_vector([3, 2, 1, 0, 0])
Permutation([3, 2, 1, 0, 4, 5])
"""
size = len(inversion)
N = list(range(size + 1))
perm = []
try:
for k in range(size):
val = N[inversion[k]]
perm.append(val)
N.remove(val)
except IndexError as exc:
raise ValueError('The inversion vector is not valid.') from exc
perm.extend(N)
return _af_new(perm)
@classmethod
def random(cls, n):
"""
Generates a random permutation of length ``n``.
Uses the underlying Python pseudo-random number generator.
Examples
========
>>> Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1]))
True
"""
perm_array = list(range(n))
random.shuffle(perm_array)
return _af_new(perm_array)
@classmethod
def unrank_lex(cls, size, rank):
"""
Lexicographic permutation unranking.
Examples
========
>>> Permutation.print_cyclic = False
>>> a = Permutation.unrank_lex(5, 10)
>>> a.rank()
10
>>> a
Permutation([0, 2, 4, 1, 3])
See Also
========
rank, next_lex
"""
perm_array = [0] * size
psize = 1
for i in range(size):
new_psize = psize*(i + 1)
d = (rank % new_psize) // psize
rank -= d*psize
perm_array[size - i - 1] = d
for j in range(size - i, size):
if perm_array[j] > d - 1:
perm_array[j] += 1
psize = new_psize
return _af_new(perm_array)
# global flag to control how permutations are printed
# when True, Permutation([0, 2, 1, 3]) -> Cycle(1, 2)
# when False, Permutation([0, 2, 1, 3]) -> Permutation([0, 2, 1])
print_cyclic = True
def _merge(arr, temp, left, mid, right):
"""
Merges two sorted arrays and calculates the inversion count.
Helper function for calculating inversions. This method is
for internal use only.
"""
i = k = left
j = mid
inv_count = 0
while i < mid and j <= right:
if arr[i] < arr[j]:
temp[k] = arr[i]
k += 1
i += 1
else:
temp[k] = arr[j]
k += 1
j += 1
inv_count += (mid - i)
while i < mid:
temp[k] = arr[i]
k += 1
i += 1
if j <= right:
k += right - j + 1
j += right - j + 1
arr[left:k + 1] = temp[left:k + 1]
else:
arr[left:right + 1] = temp[left:right + 1]
return inv_count
Perm = Permutation
_af_new = Perm._af_new
|
diofant/diofant
|
diofant/combinatorics/permutations.py
|
Python
|
bsd-3-clause
| 72,579
| 0.000096
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 14:11:31 2015
@author: Martin Friedl
"""
from datetime import date
import numpy as np
from Patterns.GrowthTheoryCell import make_theory_cell
from Patterns.GrowthTheoryCell_100_3BranchDevices import make_theory_cell_3br
from Patterns.GrowthTheoryCell_100_4BranchDevices import make_theory_cell_4br
from gdsCAD_py3.core import Cell, Boundary, CellArray, Layout, Path
from gdsCAD_py3.shapes import Box, Rectangle, Label
from gdsCAD_py3.templates100 import Wafer_GridStyle, dashed_line
WAFER_ID = 'XXXX' # CHANGE THIS FOR EACH DIFFERENT WAFER
PATTERN = 'SQ1.2'
putOnWafer = True # Output full wafer or just a single pattern?
HighDensity = False # High density of triangles?
glbAlignmentMarks = False
tDicingMarks = 10. # Dicing mark line thickness (um)
rotAngle = 0. # Rotation angle of the membranes
wafer_r = 25e3
waferVer = '100 Membranes Multi-Use v1.2'.format(int(wafer_r / 1000))
waferLabel = waferVer + '\n' + date.today().strftime("%d%m%Y")
# Layers
l_smBeam = 0
l_lgBeam = 1
l_drawing = 100
# %% Wafer template for MBE growth
class MBE100Wafer(Wafer_GridStyle):
"""
A 2" wafer divided into square cells
"""
def __init__(self, name, cells=None):
Wafer_GridStyle.__init__(self, name=name, cells=cells, block_gap=1200.)
# The placement of the wafer alignment markers
am_x = 1.5e4
am_y = 1.5e4
self.align_pts = np.array([am_x, am_y])
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(-1, 1))) # Reflect about y-axis
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(1, -1))) # Reflect about x-axis
self.wafer_r = 25e3
self.block_size = np.array([10e3, 10e3])
self._place_blocks(radius=self.wafer_r + 5e3)
# if glbAlignmentMarks:
# self.add_aligment_marks(l_lgBeam)
# self.add_orientation_text(l_lgBeam)
# self.add_dicing_marks() # l_lgBeam, mkWidth=mkWidth Width of dicing marks
self.add_blocks()
self.add_wafer_outline(layers=l_drawing)
self.add_dashed_dicing_marks(layers=[l_lgBeam])
self.add_block_labels(layers=[l_lgBeam])
self.add_prealignment_markers(layers=[l_lgBeam])
self.add_tem_membranes([0.08, 0.012, 0.028, 0.044], 2000, 1, l_smBeam)
self.add_theory_cells()
self.add_chip_labels()
# self.add_blockLabels(l_lgBeam)
# self.add_cellLabels(l_lgBeam)
bottom = np.array([0, -self.wafer_r * 0.9])
# top = np.array([0, -1]) * bottom
self.add_waferLabel(waferLabel, l_drawing, pos=bottom)
def add_block_labels(self, layers):
txtSize = 800
for (i, pt) in enumerate(self.block_pts):
origin = (pt + np.array([0.5, 0.5])) * self.block_size
blk_lbl = self.blockcols[pt[0]] + self.blockrows[pt[1]]
for l in layers:
txt = Label(blk_lbl, txtSize, layer=l)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
lbl_cell = Cell("lbl_" + blk_lbl)
lbl_cell.add(txt)
origin += np.array([0, 0])
self.add(lbl_cell, origin=origin)
def add_dashed_dicing_marks(self, layers):
if type(layers) is not list:
layers = [layers]
width = 10. / 2
dashlength = 2000
r = self.wafer_r
rng = np.floor(self.wafer_r / self.block_size).astype(int)
dmarks = Cell('DIC_MRKS')
for l in layers:
for x in np.arange(-rng[0], rng[0] + 1) * self.block_size[0]:
y = np.sqrt(r ** 2 - x ** 2)
vm = dashed_line([x, y], [x, -y], dashlength, width, layer=l)
dmarks.add(vm)
for y in np.arange(-rng[1], rng[1] + 1) * self.block_size[1]:
x = np.sqrt(r ** 2 - y ** 2)
hm = dashed_line([x, y], [-x, y], dashlength, width, layer=l)
dmarks.add(hm)
self.add(dmarks)
def add_prealignment_markers(self, layers, mrkr_size=7):
if mrkr_size % 2 == 0: # Number is even, but we need odd numbers
mrkr_size += 1
if type(layers) is not list:
layers = [layers]
for l in layers:
rect_size = 10. # 10 um large PAMM rectangles
marker_rect = Rectangle([-rect_size / 2., -rect_size / 2.], [rect_size / 2., rect_size / 2.], layer=l)
marker = Cell('10umMarker')
marker.add(marker_rect)
# Make one arm of the PAMM array
marker_arm = Cell('PAMM_Arm')
# Define the positions of the markers, they increase in spacing by 1 um each time:
mrkr_positions = [75 * n + (n - 1) * n // 2 for n in range(1, (mrkr_size - 1) // 2 + 1)]
for pos in mrkr_positions:
marker_arm.add(marker, origin=[pos, 0])
# Build the final PAMM Marker
pamm_cell = Cell('PAMM_Marker')
pamm_cell.add(marker) # Center marker
pamm_cell.add(marker_arm) # Right arm
pamm_cell.add(marker_arm, rotation=180) # Left arm
pamm_cell.add(marker_arm, rotation=90) # Top arm
pamm_cell.add(marker_arm, rotation=-90) # Bottom arm
for pos in mrkr_positions:
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=90) # Top arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=90)
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=-90) # Bottom arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=-90)
# Make the 4 tick marks that mark the center of the array
h = 30.
w = 100.
tick_mrk = Rectangle([-w / 2., -h / 2.], [w / 2, h / 2.], layer=l)
tick_mrk_cell = Cell("TickMark")
tick_mrk_cell.add(tick_mrk)
pos = mrkr_positions[-1] + 75 + w / 2.
pamm_cell.add(tick_mrk_cell, origin=[pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[-pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[0, pos], rotation=90)
pamm_cell.add(tick_mrk_cell, origin=[0, -pos], rotation=90)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(pamm_cell, origin=(center_x + 2000, center_y))
block.add(pamm_cell, origin=(center_x - 2000, center_y))
def add_tem_membranes(self, widths, length, pitch, layer):
tem_membranes = Cell('TEM_Membranes')
n = 5
curr_y = 0
for width in widths:
membrane = Path([(-length / 2., 0), (length / 2., 0)], width=width, layer=layer)
membrane_cell = Cell('Membrane_w{:.0f}'.format(width * 1000))
membrane_cell.add(membrane)
membrane_array = CellArray(membrane_cell, 1, n, (0, pitch))
membrane_array_cell = Cell('MembraneArray_w{:.0f}'.format(width * 1000))
membrane_array_cell.add(membrane_array)
tem_membranes.add(membrane_array_cell, origin=(0, curr_y))
curr_y += n * pitch
n2 = 3
tem_membranes2 = Cell('Many_TEM_Membranes')
tem_membranes2.add(CellArray(tem_membranes, 1, n2, (0, n * len(widths) * pitch)))
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(tem_membranes2, origin=(center_x, center_y + 2000))
def add_theory_cells(self):
theory_cells = Cell('TheoryCells')
theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-400, 0))
theory_cells.add(make_theory_cell_3br(), origin=(0, 0))
theory_cells.add(make_theory_cell_4br(), origin=(400, 0))
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(theory_cells, origin=(center_x, center_y - 2000))
def add_chip_labels(self):
wafer_lbl = PATTERN + '\n' + WAFER_ID
text = Label(wafer_lbl, 20., layer=l_lgBeam)
text.translate(tuple(np.array(-text.bounding_box.mean(0)))) # Center justify label
chip_lbl_cell = Cell('chip_label')
chip_lbl_cell.add(text)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(chip_lbl_cell, origin=(center_x, center_y - 2850))
class Frame(Cell):
"""
Make a frame for writing to with ebeam lithography
Params:
-name of the frame, just like when naming a cell
-size: the size of the frame as an array [xsize,ysize]
"""
def __init__(self, name, size, border_layers):
if not (type(border_layers) == list):
border_layers = [border_layers]
Cell.__init__(self, name)
self.size_x, self.size_y = size
# Create the border of the cell
for l in border_layers:
self.border = Box(
(-self.size_x / 2., -self.size_y / 2.),
(self.size_x / 2., self.size_y / 2.),
1,
layer=l)
self.add(self.border) # Add border to the frame
self.align_markers = None
def make_align_markers(self, t, w, position, layers, joy_markers=False, camps_markers=False):
if not (type(layers) == list):
layers = [layers]
top_mk_cell = Cell('AlignmentMark')
for l in layers:
if not joy_markers:
am0 = Rectangle((-w / 2., -w / 2.), (w / 2., w / 2.), layer=l)
rect_mk_cell = Cell("RectMarker")
rect_mk_cell.add(am0)
top_mk_cell.add(rect_mk_cell)
elif joy_markers:
crosspts = [(0, 0), (w / 2., 0), (w / 2., t), (t, t), (t, w / 2), (0, w / 2), (0, 0)]
crosspts.extend(tuple(map(tuple, (-np.array(crosspts)).tolist())))
am0 = Boundary(crosspts, layer=l) # Create gdsCAD shape
joy_mk_cell = Cell("JOYMarker")
joy_mk_cell.add(am0)
top_mk_cell.add(joy_mk_cell)
if camps_markers:
emw = 20. # 20 um e-beam marker width
camps_mk = Rectangle((-emw / 2., -emw / 2.), (emw / 2., emw / 2.), layer=l)
camps_mk_cell = Cell("CAMPSMarker")
camps_mk_cell.add(camps_mk)
top_mk_cell.add(camps_mk_cell, origin=[100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[100., -100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., -100.])
self.align_markers = Cell("AlignMarkers")
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, 1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, 1]))
self.add(self.align_markers)
def make_slit_array(self, _pitches, spacing, _widths, _lengths, rot_angle,
array_height, array_width, array_spacing, layers):
if not (type(layers) == list):
layers = [layers]
if not (type(_pitches) == list):
_pitches = [_pitches]
if not (type(_lengths) == list):
_lengths = [_lengths]
if not (type(_widths) == list):
_widths = [_widths]
manyslits = i = j = None
for l in layers:
i = -1
j = -1
manyslits = Cell("SlitArray")
pitch = _pitches[0]
for length in _lengths:
j += 1
i = -1
for width in _widths:
# for pitch in pitches:
i += 1
if i % 3 == 0:
j += 1 # Move to array to next line
i = 0 # Restart at left
pitch_v = pitch / np.cos(np.deg2rad(rot_angle))
# widthV = width / np.cos(np.deg2rad(rotAngle))
nx = int(array_width / (length + spacing))
ny = int(array_height / pitch_v)
# Define the slits
slit = Cell("Slits")
rect = Rectangle((-length / 2., -width / 2.), (length / 2., width / 2.), layer=l)
rect = rect.copy().rotate(rot_angle)
slit.add(rect)
slits = CellArray(slit, nx, ny, (length + spacing, pitch_v))
slits.translate((-(nx - 1) * (length + spacing) / 2., -(ny - 1) * pitch_v / 2.))
slit_array = Cell("SlitArray")
slit_array.add(slits)
text = Label('w/p/l\n%i/%i/%i' % (width * 1000, pitch, length), 5, layer=l)
lbl_vertical_offset = 1.35
if j % 2 == 0:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, -array_height / lbl_vertical_offset)))) # Center justify label
else:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, array_height / lbl_vertical_offset)))) # Center justify label
slit_array.add(text)
manyslits.add(slit_array,
origin=((array_width + array_spacing) * i, (
array_height + 2. * array_spacing) * j - array_spacing / 2.))
self.add(manyslits,
origin=(-i * (array_width + array_spacing) / 2, -(j + 1.5) * (
array_height + array_spacing) / 2))
# %%Create the pattern that we want to write
lgField = Frame("LargeField", (2000., 2000.), []) # Create the large write field
lgField.make_align_markers(20., 200., (850., 850.), l_lgBeam, joy_markers=True, camps_markers=True)
# Define parameters that we will use for the slits
widths = [0.004, 0.008, 0.012, 0.016, 0.028, 0.044]
pitches = [1.0, 2.0]
lengths = [10., 20.]
smFrameSize = 400
slitColumnSpacing = 3.
# Create the smaller write field and corresponding markers
smField1 = Frame("SmallField1", (smFrameSize, smFrameSize), [])
smField1.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField1.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam)
smField2 = Frame("SmallField2", (smFrameSize, smFrameSize), [])
smField2.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField2.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam)
smField3 = Frame("SmallField3", (smFrameSize, smFrameSize), [])
smField3.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField3.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam)
smField4 = Frame("SmallField4", (smFrameSize, smFrameSize), [])
smField4.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField4.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam)
centerAlignField = Frame("CenterAlignField", (smFrameSize, smFrameSize), [])
centerAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
# Add everything together to a top cell
topCell = Cell("TopCell")
topCell.add(lgField)
smFrameSpacing = 400 # Spacing between the three small frames
dx = smFrameSpacing + smFrameSize
dy = smFrameSpacing + smFrameSize
topCell.add(smField1, origin=(-dx / 2., dy / 2.))
topCell.add(smField2, origin=(dx / 2., dy / 2.))
topCell.add(smField3, origin=(-dx / 2., -dy / 2.))
topCell.add(smField4, origin=(dx / 2., -dy / 2.))
topCell.add(centerAlignField, origin=(0., 0.))
topCell.spacing = np.array([4000., 4000.])
# %%Create the layout and output GDS file
layout = Layout('LIBRARY')
if putOnWafer: # Fit as many patterns on a 2inch wafer as possible
wafer = MBE100Wafer('MembranesWafer', cells=[topCell])
layout.add(wafer)
# layout.show()
else: # Only output a single copy of the pattern (not on a wafer)
layout.add(topCell)
layout.show()
filestring = str(waferVer) + '_' + WAFER_ID + '_' + date.today().strftime("%d%m%Y") + ' dMark' + str(tDicingMarks)
filename = filestring.replace(' ', '_') + '.gds'
layout.save(filename)
cell_layout = Layout('LIBRARY')
cell_layout.add(wafer.blocks[0])
cell_layout.save(filestring.replace(' ', '_') + '_block' + '.gds')
# Output up chip for doing aligned jobs
layout_field = Layout('LIBRARY')
layout_field.add(topCell)
layout_field.save(filestring.replace(' ', '_') + '_2mmField.gds')
|
Martin09/E-BeamPatterns
|
100 Wafers - 1cm Squares/Multi-Use Pattern/v1.2/MembraneDesign_100Wafer_v1.1.py
|
Python
|
gpl-3.0
| 17,018
| 0.002585
|
"""
Persistence configuration
"""
PERSISTENCE_BACKEND = 'pypeman.persistence.SqliteBackend'
PERSISTENCE_CONFIG = {"path":'/tmp/to_be_removed_849827198746.sqlite'}
|
jrmi/pypeman
|
pypeman/tests/settings/test_settings_sqlite_persist.py
|
Python
|
apache-2.0
| 164
| 0.006098
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
__all__ = ['Minutes20Test']
class Minutes20Test(BackendTest):
BACKEND = 'minutes20'
def test_new_messages(self):
for message in self.backend.iter_unread_messages():
pass
|
blckshrk/Weboob
|
modules/minutes20/test.py
|
Python
|
agpl-3.0
| 982
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Vodafone España, S.A.
# Author: Andrew Bird
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from wader.common.consts import WADER_CONNTYPE_USB
from core.hardware.zte import (ZTEWCDMADevicePlugin,
ZTEWCDMACustomizer,
ZTEWrapper)
class ZTEMF180Wrapper(ZTEWrapper):
def send_ussd(self, ussd):
"""Sends the ussd command ``ussd``"""
# XXX: assumes it's the same as 637U
# MF180 wants request in ascii chars even though current
# set might be ucs2
return super(ZTEMF180Wrapper, self).send_ussd(ussd, force_ascii=True)
class ZTEMF180Customizer(ZTEWCDMACustomizer):
wrapper_klass = ZTEMF180Wrapper
class ZTEMF180(ZTEWCDMADevicePlugin):
""":class:`~core.plugin.DevicePlugin` for ZTE's MF180"""
name = "ZTE MF180"
version = "0.1"
author = u"Andrew Bird"
custom = ZTEMF180Customizer()
__remote_name__ = "MF180"
__properties__ = {
'ID_VENDOR_ID': [0x19d2],
'ID_MODEL_ID': [0x2003],
}
conntype = WADER_CONNTYPE_USB
zte_mf180 = ZTEMF180()
|
andrewbird/wader
|
plugins/devices/zte_mf180.py
|
Python
|
gpl-2.0
| 1,826
| 0.001644
|
from __future__ import absolute_import, unicode_literals
import mock
import pytest
from ddns_zones_updater.configreader import ConfigReader
from ddns_zones_updater.core import DDNSZoneUpdater
@pytest.fixture
def fake_config_reader_with_two_hosts():
host_1 = mock.Mock(do_update=mock.Mock())
host_2 = mock.Mock(do_update=mock.Mock())
class FakeHostManager(mock.Mock):
__iter__ = mock.Mock(return_value=(h for h in [host_1, host_2]))
class FakeConfigReader(mock.Mock):
hosts = FakeHostManager()
return [host_1, host_2], FakeConfigReader()
@pytest.fixture
def updater_without_calling_init(request):
patcher = mock.patch.object(DDNSZoneUpdater, "__init__", return_value=None)
patcher.start()
request.addfinalizer(patcher.stop)
return DDNSZoneUpdater("path/to/config.ini")
@mock.patch.object(ConfigReader, "read")
@mock.patch.object(ConfigReader, "__init__", return_value=None)
def test_initializer(mock_init, mock_read):
DDNSZoneUpdater("/tmp/foo.ini")
mock_init.assert_called_once_with("/tmp/foo.ini")
mock_read.assert_called_once_with()
def test_get_current_wan_ip(updater_without_calling_init):
updater = updater_without_calling_init
with mock.patch("ipgetter.myip", return_value="149.0.0.31") as mock_my_ip:
assert updater.current_wan_ip() == "149.0.0.31"
mock_my_ip.assert_called_once_with()
def test_run(updater_without_calling_init, fake_config_reader_with_two_hosts):
updater = updater_without_calling_init
hosts, updater.config = fake_config_reader_with_two_hosts
with mock.patch("ipgetter.myip", return_value="1.1.1.1") as mock_my_ip:
updater.run()
for host in hosts:
host.do_update.assert_called_once_with("1.1.1.1")
mock_my_ip.assert_called_once_with()
|
bh/python-ddns-zones-updater
|
tests/test_core.py
|
Python
|
gpl-2.0
| 1,816
| 0
|
#!/usr/bin/env python
import unittest
from test import support
import socket
import urllib.request
import sys
import os
import email.message
def _open_with_retry(func, host, *args, **kwargs):
# Connecting to remote hosts is flaky. Make it more robust
# by retrying the connection several times.
last_exc = None
for i in range(3):
try:
return func(host, *args, **kwargs)
except IOError as err:
last_exc = err
continue
except:
raise
raise last_exc
class URLTimeoutTest(unittest.TestCase):
TIMEOUT = 10.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
f = _open_with_retry(urllib.request.urlopen, "http://www.python.org/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.python.org/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
def urlopen(self, *args):
return _open_with_retry(urllib.request.urlopen, *args)
def test_basic(self):
# Simple test expected to pass.
open_url = self.urlopen("http://www.python.org/")
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assert_(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assert_(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_readlines(self):
# Test both readline and readlines.
open_url = self.urlopen("http://www.python.org/")
try:
self.assert_(isinstance(open_url.readline(), bytes),
"readline did not return bytes")
self.assert_(isinstance(open_url.readlines(), list),
"readlines did not return a list")
finally:
open_url.close()
def test_info(self):
# Test 'info'.
open_url = self.urlopen("http://www.python.org/")
try:
info_obj = open_url.info()
finally:
open_url.close()
self.assert_(isinstance(info_obj, email.message.Message),
"object returned by 'info' is not an instance of "
"email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.python.org/"
open_url = self.urlopen(URL)
try:
gotten_url = open_url.geturl()
finally:
open_url.close()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.python.org/XXXinvalidXXX"
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
def test_fileno(self):
if sys.platform in ('win32',):
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
return
# Make sure fd returned by fileno is valid.
open_url = self.urlopen("http://www.python.org/")
fd = open_url.fileno()
FILE = os.fdopen(fd, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from file created using fd "
"returned by fileno failed")
finally:
FILE.close()
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
# urllib.urlopen, "http://www.sadflkjsasadf.com/")
urllib.request.urlopen,
"http://www.python.invalid./")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
def urlretrieve(self, *args):
return _open_with_retry(urllib.request.urlretrieve, *args)
def test_basic(self):
# Test basic functionality.
file_location,info = self.urlretrieve("http://www.python.org/")
self.assert_(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = open(file_location, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from the file location returned"
" by urlretrieve failed")
finally:
FILE.close()
os.unlink(file_location)
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
file_location,info = self.urlretrieve("http://www.python.org/",
support.TESTFN)
self.assertEqual(file_location, support.TESTFN)
self.assert_(os.path.exists(file_location))
FILE = open(file_location, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from temporary file failed")
finally:
FILE.close()
os.unlink(file_location)
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
file_location, header = self.urlretrieve("http://www.python.org/")
os.unlink(file_location)
self.assert_(isinstance(header, email.message.Message),
"header is not an instance of email.message.Message")
def test_main():
support.requires('network')
support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests)
if __name__ == "__main__":
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.0/Lib/test/test_urllibnet.py
|
Python
|
mit
| 6,937
| 0.001586
|
import json
with open('data/78mm.json', 'r') as _78mm:
polygons78 = json.load(_78mm)["features"][0]["geometry"]["geometries"]
with open('data/100mm.json', 'r') as _100mm:
polygons100 = json.load(_100mm)["features"][0]["geometry"]["geometries"]
with open('data/130mm.json', 'r') as _130mm:
polygons130 = json.load(_130mm)["features"][0]["geometry"]["geometries"]
def dot(x1, y1, x2, y2):
return x1*y1+x2*y2
def det(x1, y1, x2, y2):
return x1*y2-x2*y1
def dett(x0, y0, x1, y1, x2, y2):
z = det(x1-x0, y1-y0, x2-x0, y2-y0)
return -1 if z < 0 else z > 0
'''
inline DB ang(cPo p0,cPo p1){return acos(dot(p0,p1)/p0.len()/p1.len());}
def ang(x1, y1, x2, y2):
return
def arg(x1, y1, x2, y2):
DB a=ang(x,y);return~dett(x,y)?a:2*PI-a;}
return
'''
def intersect(lx1, ly1, lx2, ly2, rx1, ry1, rx2, ry2):
return 1 if (dett(lx1, ly1, lx2, ly2, rx1, ry1) * dett(lx1, ly1, lx2, ly2, rx2, ry2) <= 0 and
dett(rx1, ry1, rx2, ry2, lx1, ly1) * dett(rx1, ry1, rx2, ry2, lx2, ly2) <= 0) else 0
def within(p, x, y):
z = 0
for i in range(0, len(p)-1):
if x == p[i][0] and y == p[i][1]:
continue
if x == p[i+1][0] and y == p[i+1][1]:
continue
z += intersect(x, y, -3232, -4344, p[i][0], p[i][1], p[i+1][0], p[i+1][1])
return 1 if z % 2 == 1 else 0
def _check(p, d, x, y):
for i in range(0, len(p)):
if within(p[i]["coordinates"][0], x, y):
return [d, i]
return []
def check(x, y):
res = _check(polygons78, 78, x, y)
if len(res) > 0:
return 0.2 # 0.078
res = _check(polygons100, 100, x, y)
if len(res) > 0:
return 0.5 # 0.1
res = _check(polygons130, 130, x, y)
if len(res) > 0:
return 0.8 # 0.13
return 1.0
# init()
# #display()
# #x, y = 121.555764, 24.9833
#
# x, y = 121.565764, 24.9830
# res = check(x, y)
# print res
# if (len(res) > 0):
# if (res[0] == 78):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Red')
# if (res[0] == 100):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Orange')
# if (res[0] == 130):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Yellow')
# plt.plot(x, y, marker='*')
# ax.grid()
# ax.axis('equal')
# plt.show()
|
HackCigriculture/cigriculture-ml
|
src/polygon.py
|
Python
|
gpl-3.0
| 2,316
| 0.002159
|
## Copyright (C) 2017 Oscar Diaz Barriga
## This file is part of Comp-Process-STPatterns.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
# /* Tumbes */
# select count(*) from t_boya_medicion_minpres
# where latitud < -3.392 and latitud > -4.078
# /* Piura */
# select count(*) from t_boya_medicion_minpres
# where latitud < -4.078 and latitud > -6.382
# /* Lambayeque */
# select count(*) from t_boya_medicion_minpres
# where latitud < -6.382 and latitud > -7.177
# /* La Libertad */
# select count(*) from t_boya_medicion_minpres
# where latitud < -7.177 and latitud > -8.9722
# /* Ancash*/
# select count(*) from t_boya_medicion_minpres
# where latitud < -8.9722 and latitud > -10.593
import glob, os
import psycopg2
import datetime
db_user = "USER"
db_host = "IP_ADDRESS"
db_password = "PASSWORD"
output = "./Output/datos_total_boya3_est7_ca1.csv"
class Departamento (object):
def __init__(self, nombre, latitud_min, latitud_max):
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
class Zona (object):
def __init__(self, start_date, end_date, nombre, latitud_min, latitud_max, temperatura, presion, salinidad):
self.start_date = start_date
self.end_date = end_date
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class boya_data (object):
def __init__(self, temperatura, presion, salinidad):
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class estacion_data (object):
# def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar):
# self.est_temperatura_m = temperatura_m
# self.est_punto_rocio_m= punto_rocio_m
# self.est_presion_nivel_mar = presion_nivel_mar
def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar,
presion_est_media, velocidad_viento_media, temperatura_maxima,
temperatura_minima):
self.est_temperatura_m = temperatura_m
self.est_punto_rocio_m= punto_rocio_m
self.est_presion_nivel_mar = presion_nivel_mar
self.est_presion_est_media = presion_est_media
self.est_temperatura_minima = temperatura_minima
self.est_temperatura_maxima = temperatura_maxima
self.est_velocidad_viento_media = velocidad_viento_media
class caudal_data (object):
def __init__(self, caudal):
self.caudal = caudal
def database_select_date_between(start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, db_host, db_password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < -3.392 and latitud > -4.078 AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
for row in rows:
print " ", row
def database_select_date_between_lat(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
for row in rows:
count = row[0]
return count
def database_select_date_between_lat_avg(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select avg(temp), avg(pres), avg(psal) from t_boya_medicion_minpres " \
" where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = boya_data(row[0], row[1], row[2])
return b_data
def database_select_date_between_lat_avg_estacion(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host. password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "Select avg(em.temp_m), avg(em.punto_rocio_m), avg(em.presion_nivel_mar), " \
"avg(em.presion_est_m), avg(em.veloc_viento_m), avg(em.temp_max), avg(em.temp_min) " \
" From t_region r, t_estacion e, t_estacion_medicion em " \
" Where e.id_region = r.id_region AND r.nombre like '%s' " \
" AND em.id_estacion = e.id_estacion " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = estacion_data(row[0], row[1], row[2], row[3], row[4], row[5], row[6])
return b_data
def database_select_date_between_lat_avg_caudal(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = " Select avg(c.caudal) From t_caudal_medicion c " \
" Where c.region like '%s' AND c.caudal != 9999 " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
c_data = None
for row in rows:
c_data = caudal_data(row[0])
return c_data
# def count_boyas_range_space_and_time(i, start_date_unix, step_date, latitude, longitude):
# t_start = start_date_unix + i * step_date
# t_end = start_date_unix + (i + 1) * step_date
# start_date = datetime.datetime.fromtimestamp(t_start).strftime("%Y-%m-%d")
# end_date = datetime.datetime.fromtimestamp(t_end).strftime("%Y-%m-%d")
# count = database_select_date_between_lat(latitude, longitude, start_date, end_date)
# print "%s -- %s -> %s" % (start_date, end_date, count)
# return count
if __name__ == '__main__':
# datebase = 1422766800
maximo = 1467522000
periodo = 18
delta = 0
toDate = 24*3600*periodo
#n = 27
#27
# 26, 16 = 8 8
# 26, 18 = 8 10
# 26, 20 = 10 10
# 24, 20 = 9 10
# 22, 22 = 12, 12 2015-03-18
# 20, 24 = 13, 11
# 14, 34 = 21, 13
departamentos = []
departamentos.append(Departamento("Tumbes", "-3.392", "-4.078"))
departamentos.append(Departamento("Piura", "-4.078", "-6.382"))
departamentos.append(Departamento("Lambayeque", "-6.382", "-7.177"))
departamentos.append(Departamento("La Libertad", "-7.177", "-8.9722"))
departamentos.append(Departamento("Ancash", "-8.9722", "-10.593"))
rango_fechas = []
rango_fechas_status = []
start_date_unix = int(datetime.datetime.strptime("2015-03-05","%Y-%m-%d").strftime("%s"))
n = (maximo - start_date_unix) / (24 * 3600 * periodo)
print n
print "2015-03-05 --- ",
print datetime.datetime.fromtimestamp(maximo).strftime("%Y-%m-%d")
for i in range(n):
t_start = start_date_unix + i * toDate
t_end = start_date_unix + (i + 1) * toDate
start_date = datetime.datetime.fromtimestamp(t_start).strftime("%Y-%m-%d")
end_date = datetime.datetime.fromtimestamp(t_end).strftime("%Y-%m-%d")
rango_fechas.append([start_date, end_date, 1])
print (start_date + " - " + end_date)
for d in range(5):
print "--------- %s -------------" % departamentos[d].nombre
t_count = 0
cero_count = 1
count = 0
i = 0
for r in rango_fechas:
start_date = r[0]
end_date = r[1]
count = database_select_date_between_lat(departamentos[d].latitud_min, departamentos[d].latitud_max, start_date, end_date)
# print "%s -- %s -> %s" % (start_date, end_date, count)
if count > 0:
t_count = t_count + 1
rango_fechas[i][2] = 1*rango_fechas[i][2]
else:
rango_fechas[i][2] = 0*rango_fechas[i][2]
# print "*%s -- %s -> %s" % (start_date, end_date, count)
cero_count = cero_count*count
i += 1
print "Fallo %s ,"%(n - t_count),
print "OK : %s"%t_count
rango_fechas_ok = []
for i in rango_fechas:
if i[2] != 0:
rango_fechas_ok.append([i[0],i[1]])
print rango_fechas_ok
with open(output, 'w') as the_file:
the_file.write("region, boya_temp, boya_salinidad, est_temp, est_pto_rocio, est_presion, "
"est_presion_est_m, est_veloc_viento_m, est_temp_max, est_temp_min, caudal\n")
for d in range(5):
print "--------- %s -------------" % departamentos[d].nombre
t_count = 0
cero_count = 1
count = 0
for r in rango_fechas_ok:
start_date = r[0]
end_date = r[1]
data_boya_avg = database_select_date_between_lat_avg(departamentos[d].latitud_min,
departamentos[d].latitud_max,
start_date, end_date)
data_estacion_avg = database_select_date_between_lat_avg_estacion(departamentos[d].nombre, start_date, end_date)
data_caudal_avg = database_select_date_between_lat_avg_caudal(departamentos[d].nombre, start_date, end_date)
print "%s, boya_temp :%s\tboya_sal :%s\t" \
"est_temp: %s\test_pto_rocio :%s\test_presion :%s\t" \
"est_presion_est_m :%s\test_veloc_viento_m :%s\test_temp_max :%s\test_temp_min :%s\t" \
"caudal:%s" % \
(departamentos[d].nombre, data_boya_avg.temperatura, data_boya_avg.salinidad,
data_estacion_avg.est_temperatura_m, data_estacion_avg.est_punto_rocio_m,
data_estacion_avg.est_presion_nivel_mar, data_estacion_avg.est_presion_est_media,
data_estacion_avg.est_velocidad_viento_media, data_estacion_avg.est_temperatura_maxima, data_estacion_avg.est_temperatura_minima,
data_caudal_avg.caudal)
linea = "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n" % \
(departamentos[d].nombre, data_boya_avg.temperatura, data_boya_avg.salinidad,
data_estacion_avg.est_temperatura_m, data_estacion_avg.est_punto_rocio_m,
data_estacion_avg.est_presion_nivel_mar, data_estacion_avg.est_presion_est_media,
data_estacion_avg.est_velocidad_viento_media, data_estacion_avg.est_temperatura_maxima, data_estacion_avg.est_temperatura_minima,
data_caudal_avg.caudal)
the_file.write(linea)
|
oscardbpucp/Comp-Process-STPatterns
|
clean_and_pretreatment/datos_total_fase1v3-mod.py
|
Python
|
gpl-3.0
| 13,307
| 0.007139
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.queries import get_property_name
from jx_sqlite.utils import GUID, untyped_column
from mo_dots import concat_field, relative_field, set_default, startswith_field
from mo_json import EXISTS, OBJECT, STRUCT
from mo_logs import Log
class Schema(object):
"""
A Schema MAPS ALL COLUMNS IN SNOWFLAKE FROM THE PERSPECTIVE OF A SINGLE TABLE (a nested_path)
"""
def __init__(self, nested_path, snowflake):
if nested_path[-1] != '.':
Log.error("Expecting full nested path")
self.path = concat_field(snowflake.fact_name, nested_path[0])
self.nested_path = nested_path
self.snowflake = snowflake
# def add(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.columns.append(column)
#
# for np in self.nested_path:
# rel_name = column.names[np]
# container = self.namespace.setdefault(rel_name, set())
# hidden = [
# c
# for c in container
# if len(c.nested_path[0]) < len(np)
# ]
# for h in hidden:
# container.remove(h)
#
# container.add(column)
#
# container = self.namespace.setdefault(column.es_column, set())
# container.add(column)
# def remove(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.namespace[column_name] = [c for c in self.namespace[column_name] if c != column]
def __getitem__(self, item):
output = self.snowflake.namespace.columns.find(self.path, item)
return output
# def __copy__(self):
# output = Schema(self.nested_path)
# for k, v in self.namespace.items():
# output.namespace[k] = copy(v)
# return output
def get_column_name(self, column):
"""
RETURN THE COLUMN NAME, FROM THE PERSPECTIVE OF THIS SCHEMA
:param column:
:return: NAME OF column
"""
relative_name = relative_field(column.name, self.nested_path[0])
return get_property_name(relative_name)
@property
def namespace(self):
return self.snowflake.namespace
def keys(self):
"""
:return: ALL COLUMN NAMES
"""
return set(c.name for c in self.columns)
@property
def columns(self):
return self.snowflake.namespace.columns.find(self.snowflake.fact_name)
def column(self, prefix):
full_name = untyped_column(concat_field(self.nested_path, prefix))
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k, t in [untyped_column(c.name)]
if k == full_name and k != GUID
if c.jx_type not in [OBJECT, EXISTS]
)
def leaves(self, prefix):
full_name = concat_field(self.nested_path, prefix)
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k in [c.name]
if startswith_field(k, full_name) and k != GUID or k == full_name
if c.jx_type not in [OBJECT, EXISTS]
)
def map_to_sql(self, var=""):
"""
RETURN A MAP FROM THE RELATIVE AND ABSOLUTE NAME SPACE TO COLUMNS
"""
origin = self.nested_path[0]
if startswith_field(var, origin) and origin != var:
var = relative_field(var, origin)
fact_dict = {}
origin_dict = {}
for k, cs in self.namespace.items():
for c in cs:
if c.jx_type in STRUCT:
continue
if startswith_field(get_property_name(k), var):
origin_dict.setdefault(c.names[origin], []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(c.name, []).append(c)
elif origin == var:
origin_dict.setdefault(concat_field(var, c.names[origin]), []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(concat_field(var, c.name), []).append(c)
return set_default(origin_dict, fact_dict)
|
klahnakoski/TestLog-ETL
|
vendor/jx_sqlite/schema.py
|
Python
|
mpl-2.0
| 4,659
| 0.001502
|
__problem_title__ = "Comfortable distance"
__problem_url___ = "https://projecteuler.net/problem=364"
__problem_description__ = "There are seats in a row. people come after each other to fill the " \
"seats according to the following rules: We can verify that T(10) = " \
"61632 and T(1 000) mod 100 000 007 = 47255094. Find T(1 000 000) mod " \
"100 000 007."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
jrichte43/ProjectEuler
|
Problem-0364/solutions.py
|
Python
|
gpl-3.0
| 808
| 0.006188
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
required=True,
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.hypervisor_hostname,
host.hypervisor_hostname)
for host in hosts
if host.hypervisor_hostname != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/admin/instances/forms.py
|
Python
|
apache-2.0
| 3,609
| 0
|
# -*- coding: Latin-1 -*-
#!/usr/bin/env python
"""PyUdd, a python module for OllyDbg .UDD files
Ange Albertini 2010, Public domain
"""
__author__ = 'Ange Albertini'
__contact__ = 'ange@corkami.com'
__revision__ = "$Revision$"
__version__ = '0.1 r%d'
import struct
HDR_STRING = "Mod\x00"
FTR_STRING = "\nEnd"
#TODO: find standard notation to keep it inside init_mappings
udd_formats = [
(11, "Module info file v1.1\x00"),
(20, "Module info file v2.0\x00"),
(20, "Module info file v2.01g\x00"),
]
def init_mappings():
"""initialize constants' mappings"""
format_ids = [
"STRING",
"DDSTRING",
"MRUSTRING",
"EMPTY",
"VERSION",
"DWORD",
"DD2",
"DD2STRING",
"BIN",
"NAME",
"CRC2",
]
F_ = dict([(e, i) for i, e in enumerate(format_ids)])
udd_formats = [
(11, "Module info file v1.1\x00"),
(20, "Module info file v2.0\x00"),
(20, "Module info file v2.01g\x00"),
]
Udd_Formats = dict(
[(e[1], e[0]) for e in udd_formats] +
udd_formats)
#OllyDbg 1.1
chunk_types11 = [
("Header", HDR_STRING, F_["STRING"]),
("Footer", FTR_STRING, F_["EMPTY"]),
("Filename", "\nFil", F_["STRING"]),
("Version", "\nVer", F_["VERSION"]),
("Size", "\nSiz", F_["DWORD"]),
("Timestamp", "\nTst", F_["DD2"]),
("CRC", "\nCcr", F_["DWORD"]),
("Patch", "\nPat", F_["BIN"]),
("Bpc", "\nBpc", F_["BIN"]),
("Bpt", "\nBpt", F_["BIN"]),
("HwBP", "\nHbr", F_["BIN"]),
("Save", "\nSva", F_["BIN"]), # sometimes 4, sometimes 5 ?
("AnalyseHint", "\nAht", F_["BIN"]),
("CMD_PLUGINS", "\nUs0", F_["DDSTRING"]), # multiline, needs escaping
("U_LABEL", "\nUs1", F_["DDSTRING"]),
("A_LABEL", "\nUs4", F_["DDSTRING"]),
("U_COMMENT", "\nUs6", F_["DDSTRING"]),
("BPCOND", "\nUs8", F_["DDSTRING"]),
("ApiArg", "\nUs9", F_["DDSTRING"]),
("USERLABEL", "\nUs1", F_["DDSTRING"]),
("Watch", "\nUsA", F_["DDSTRING"]),
("US2", "\nUs2", F_["BIN"]),
("US3", "\nUs3", F_["BIN"]),
("_CONST", "\nUs5", F_["BIN"]),
("A_COMMENT", "\nUs7", F_["BIN"]),
("FIND?", "\nUsC", F_["BIN"]),
("SOURCE?", "\nUsI", F_["BIN"]),
("MRU_Inspect","\nUs@", F_["MRUSTRING"]),
("MRU_Asm", "\nUsB", F_["MRUSTRING"]),
("MRU_Goto", "\nUsK", F_["MRUSTRING"]), #?
("MRU_Explanation", "\nUs|", F_["MRUSTRING"]), # logging bp explanation
("MRU_Expression", "\nUs{", F_["MRUSTRING"]), # logging bp expression
("MRU_Watch", "\nUsH", F_["MRUSTRING"]),
("MRU_Label", "\nUsq", F_["MRUSTRING"]), #?
("MRU_Comment", "\nUsv", F_["MRUSTRING"]), #?
("MRU_Condition", "\nUsx", F_["MRUSTRING"]), #?
("MRU_CMDLine", "\nCml", F_["STRING"]), #?
("LogExpression", "\nUs;", F_["DDSTRING"]), # logging bp expression
("ANALY_COMM", "\nUs:", F_["DDSTRING"]), #
("US?", "\nUs?", F_["DDSTRING"]), #?
("TracCond", "\nUsM", F_["DDSTRING"]), # tracing condition
("LogExplanation", "\nUs<", F_["DDSTRING"]), # logging bp explanation
("AssumedArgs", "\nUs=", F_["DDSTRING"]), # Assumed arguments
("CFA", "\nCfa", F_["DD2"]), #?
("CFM", "\nCfm", F_["DD2STRING"]), #?
("CFI", "\nCfi", F_["DD2"]), #?
("US>", "\nUs>", F_["BIN"]), #?
("ANC", "\nAnc", F_["BIN"]), #?
("JDT", "\nJdt", F_["BIN"]), #?
("PRC", "\nPrc", F_["BIN"]), #?
("SWI", "\nSwi", F_["BIN"]), #?
]
#OllyDbg 2
chunk_types20 = [
("Header", HDR_STRING, F_["STRING"]),
("Footer", FTR_STRING, F_["EMPTY"]),
("Filename", "\nFil", F_["STRING"]),
("Infos", "\nFcr", F_["CRC2"]), #?
("Name", "\nNam", F_["NAME"]), #?
("Data", "\nDat", F_["NAME"]), #?
("MemMap", "\nMba", F_["DDSTRING"]), #?
("LSA", "\nLsa", F_["NAME"]), # MRU entries
("JDT", "\nJdt", F_["BIN"]), #?
("PRC", "\nPrc", F_["BIN"]), #?
("SWI", "\nSwi", F_["BIN"]), #?
("CBR", "\nCbr", F_["BIN"]), #?
("LBR", "\nLbr", F_["BIN"]), #?
("ANA", "\nAna", F_["BIN"]), #?
("CAS", "\nCas", F_["BIN"]), #?
("PRD", "\nPrd", F_["BIN"]), #?
("Save", "\nSav", F_["BIN"]), #?
("RTC", "\nRtc", F_["BIN"]), #?
("RTP", "\nRtp", F_["BIN"]), #?
("Int3", "\nIn3", F_["BIN"]), #?
("MemBP", "\nBpm", F_["BIN"]), #?
("HWBP", "\nBph", F_["BIN"]), #?
]
Chunk_Types11 = dict(
[(e[1], e[0]) for e in chunk_types11] +
[(e[0], e[1]) for e in chunk_types11]
)
Chunk_Types20 = dict(
[(e[1], e[0]) for e in chunk_types20] +
[(e[0], e[1]) for e in chunk_types20]
)
Chunk_Types = {
11: Chunk_Types11,
20: Chunk_Types20
}
# no overlapping of formats yet so they're still merged
#
Chunk_Formats = dict(
[(e[2], e[0]) for e in chunk_types11] +
[(e[0], e[2]) for e in chunk_types11] +
[(e[2], e[0]) for e in chunk_types20] +
[(e[0], e[2]) for e in chunk_types20]
)
olly2cats = [
# used in DATA and NAME
#
('!', "UserLabel"),
('0', "UserComment"),
('1', "Import"),
('2', "APIArg"),
('3', "APICall"),
('4', "Member"),
('6', "Unk6"),
('*', "Struct"),
# only used in LSA ?
#
('`', 'mru_label'),
('a', 'mru_asm'),
('c', 'mru_comment'),
('d', 'watch'),
('e', 'mru_goto'),
('p', 'trace_condition1'),
('q', 'trace_condition2'),
('r', 'trace_condition3'),
('s', 'trace_condition4'),
('t', 'trace_command1'),
('u', 'trace_command2'),
('v', 'protocol_start'),
('w', 'protocol_end'),
('Q', 'log_explanation'),
('R', 'log_condition'),
('S', 'log_expression'),
('U', 'mem_explanation'),
('V', 'mem_condition'),
('W', 'mem_expression'),
('Y', 'hbplog_explanation'),
('Z', 'hbplog_condition'),
('[', 'hbplog_expression'),
]
Olly2Cats = dict(
[(e[1], e[0]) for e in olly2cats] +
olly2cats)
return Udd_Formats, F_, Chunk_Types, Chunk_Formats, Olly2Cats
UDD_FORMATS, F_, CHUNK_TYPES, CHUNK_FORMATS, OLLY2CATS = init_mappings()
def binstr(data):
"""return a stream as hex sequence"""
return " ".join(["%02X" % ord(c) for c in data])
def elbinstr(data):
"""return a stream as hex sequence, ellipsed if too long"""
if len(data) < 10:
return binstr(data)
return "(%i) %s ... %s" % (len(data), binstr(data[:10]), binstr(data[-10:]))
class Error(Exception):
"""custom error class"""
pass
def crc32mpeg(buffer_):
"""computes the CRC32 MPEG of a buffer"""
crc = 0xffffffff
for c in buffer_:
octet = ord(c)
for i in range(8):
topbit = crc & 0x80000000
if octet & (0x80 >> i):
topbit ^= 0x80000000
crc <<= 1
if topbit:
crc ^= 0x4c11db7
crc &= 0xffffffff
return crc
def getcrc(filename):
"""returns the UDD crc of a file, by its filename"""
# probably not always correct
import pefile
pe = pefile.PE(filename)
sec = pe.sections[0]
align = pe.OPTIONAL_HEADER.SectionAlignment
data = sec.get_data(sec.VirtualAddress)
ActualSize = max(sec.Misc_VirtualSize, sec.SizeOfRawData)
data += "\0" * (ActualSize - len(data))
rem = ActualSize % align
if rem:
data += "\0" * (align - rem)
return crc32mpeg(data)
def getTimestamp(filename):
"""read LastModified timestamp and return as a binary buffer"""
import ctypes
mtime = ctypes.c_ulonglong(0)
h = ctypes.windll.kernel32.CreateFileA(
ctypes.c_char_p(filename),
0, 3, 0, 3, 0x80, 0)
ctypes.windll.kernel32.GetFileTime(h, 0,0, ctypes.pointer(mtime))
ctypes.windll.kernel32.CloseHandle(h)
return struct.pack("<Q", mtime.value)
def getFileInfo(filename):
"""return file's timestamp, crc and size"""
import os
import stat
time_ = getTimestamp(filename)
crc = getcrc(filename)
size = os.stat(filename)[stat.ST_SIZE]
return time_, crc, size
def read_next_chunk(f):
"""read next Udd chunk"""
ct = f.read(4)
size = struct.unpack("<I", f.read(4))[0]
cd = f.read(size)
return ct, cd
def write_chunk(f, ct, cd):
"""write a chunk"""
f.write(ct)
f.write(struct.pack("<I", len(cd)))
f.write(cd)
return
def make_chunk(ct, cd):
"""put together chunk types and data with a few checks"""
if len(ct) != 4:
raise Error("invalid chunk name length")
if len(cd) > 255:
raise Error("invalid chunk data length")
return [ct, cd]
def build_data(format_, info):
"""generate a chunk data depending on the format"""
if format_ == F_["DWORD"]:
return "%s" % (struct.pack("<I", info["dword"]))
if format_ in [F_["DDSTRING"], F_["MRUSTRING"]]:
return "%s%s\x00" % (struct.pack("<I", info["dword"]), info["text"])
else:
raise Error("format not supported for building")
#TODO: merge those into a real make_chunk or something - support format 2
#
def make_comment_chunk(info, format_):
"""generate a user comment chunk depending on the format"""
if format_ == 11:
return make_chunk(
CHUNK_TYPES[format_]["U_COMMENT"],
build_data(CHUNK_FORMATS["U_LABEL"], info)
)
else:
raise Error("Not supported")
def make_label_chunk(info, format_):
"""generate a user label chunk depending on the format"""
if format_ == 11:
return make_chunk(
CHUNK_TYPES[format_]["U_LABEL"],
build_data(CHUNK_FORMATS["U_LABEL"], info)
)
else:
raise Error("Not supported")
def expand_chunk(chunk, format_):
"""Extract information from the chunk data"""
ct, cd = chunk
if ct not in CHUNK_TYPES[format_]:
return cd
cf = CHUNK_FORMATS[CHUNK_TYPES[format_][ct]]
if cf == F_["STRING"]:
result = {"string": cd}
elif cf in [F_["DDSTRING"], F_["MRUSTRING"]]:
result = {
"dword": struct.unpack("<I", cd[:4])[0],
"text": cd[4:].rstrip("\x00").encode('string-escape')
}
elif cf == F_["NAME"]:
#name can be null, no 00 in that case
#if lptype is not present then no type
RVA = cd[:4]
buffer_ = cd[4:]
RVA = struct.unpack("<I", RVA)[0]
buffer_ = buffer_.rstrip("\x00")
result = {"RVA": RVA, "category": buffer_[0]}
buffer_ = buffer_[1:]
for i, c in enumerate(buffer_):
if ord(c) >= 0x80:
found = i
break
else:
name = buffer_
if buffer_:
result["name"] = buffer_
return result
name = buffer_[:found]
lptype = buffer_[found]
type_ = buffer_[found + 1:]
# should be in rendering ?
#
name = name.rstrip("\x00")
if name:
result["name"] = name
# should be in rendering ?
#
result["lptype"] = "*" if lptype == "\xa0" else "%i" % ord(lptype)
result["type_"] = type_
elif cf == F_["DD2STRING"]:
result = list(struct.unpack("<2I", cd[:8])) + [cd[8:].rstrip("\x00")]
elif cf == F_["EMPTY"]:
result = None
elif cf == F_["CRC2"]:
dwords = struct.unpack("<6I", cd)
result = {
"size":dwords[0],
"timestamp": " ".join("%08X" % e for e in (dwords[1:3])),
"unk": dwords[3],
"unk2": dwords[4],
"unk3": dwords[5],
}
elif cf == F_["VERSION"]:
result = {"version":struct.unpack("<4I", cd)}
elif cf == F_["DWORD"]:
result = {"dword": struct.unpack("<I", cd)}
elif cf == F_["DD2"]:
result = {"dwords": struct.unpack("<2I", cd)}
elif cf == F_["BIN"]:
result = {"binary": cd}
else:
result = cd
return result
def print_chunk(chunk, format_):
"""Pretty print chunk data after expansion"""
ct, cd = chunk
info = expand_chunk(chunk, format_)
if ct not in CHUNK_TYPES[format_]:
return elbinstr(info)
cf = CHUNK_FORMATS[CHUNK_TYPES[format_][ct]]
if cf == F_["STRING"]:
result = info["string"].rstrip("\x00")
elif cf == F_["DDSTRING"]:
result = "%(dword)08X %(text)s" % (info)
elif cf == F_["MRUSTRING"]:
result = "%(dword)i %(text)s" % (info)
elif cf == F_["NAME"]:
if info["category"] in OLLY2CATS:
info["category"] = OLLY2CATS[info["category"]]
result = ["%(RVA)08X (%(category)s)" % info]
if "name" in info:
result += ["%(name)s" % info]
if "type_" in info:
result += ["type:%(lptype)s %(type_)s" % info]
result = " ".join(result)
elif cf == F_["DD2STRING"]:
result = "%08X %08X %s" % tuple(info)
elif cf == F_["EMPTY"]:
result = ""
elif cf == F_["CRC2"]:
result = "Size: %(size)i Time:%(timestamp)s unk:%(unk)08X" % info
elif cf == F_["VERSION"]:
result = "%i.%i.%i.%i" % info["version"]
elif cf == F_["DWORD"]:
result = "%08X" % info["dword"]
elif cf == F_["DD2"]:
result = "%08X %08X" % info["dwords"]
elif cf == F_["BIN"]:
result = elbinstr(info["binary"])
else:
result = cd
return result
class Udd(object):
"""OllyDbg UDD file format class"""
def __init__(self, filename=None, format_=None):
"""initialization. load file if given"""
self.__data = {}
self.__chunks = []
self.__warnings = []
self.__format = 11 if format_ is None else format_
if filename is not None:
self.load(filename)
return
def load(self, filename):
"""load UDD file from filename"""
try:
f = open(filename, "rb")
ct, cd = read_next_chunk(f)
if not (ct == HDR_STRING and
cd in (e[1] for e in udd_formats)):
raise Error("Invalid HEADER chunk")
self.__format = UDD_FORMATS[cd]
self.__chunks.append([ct, cd])
while (True):
ct, cd = read_next_chunk(f)
if ct not in CHUNK_TYPES[self.__format]:
self.__warnings.append(
"Warning (offset %08X) unknown chunk type: '%s' %s" %
(f.tell(), ct.lstrip("\n"), elbinstr(cd))
)
self.__chunks.append([ct, cd])
if (ct, cd) == (CHUNK_TYPES[self.__format]["Footer"] , ""):
break
finally:
f.close()
return
def save(self, filename):
"""(over)writes UDD file to disk"""
f = open(filename, "wb")
for ct, cd in self.__chunks:
write_chunk(f, ct, cd)
f.close()
return
def set_chunk(self, pos, chunk):
"""give new values to a chunk"""
self.__chunks[pos] = chunk
return
def get_chunk(self, pos):
"""return chunk contents"""
return self.__chunks[pos]
def add_chunk(self, chunk):
"""append a chunk before the footer"""
if not self.find_chunk(chunk):
self.__chunks.insert(-1, chunk)
return
def append_chunk(self, chunk):
"""blindly append the chunk"""
self.__chunks.append(chunk)
return
def get_format(self):
"""return UDD file format"""
return self.__format
def find_by_type(self, type_):
"""return chunk indexes matching the given type"""
found = []
for i, c in enumerate(self.__chunks):
if c[0] == type_:
found += [i]
return found
def find_by_types(self, types):
"""return chunk indexes matching any of the given types"""
found = []
for i, c in enumerate(self.__chunks):
if c[0] in types:
found += [i]
return found
def find_chunk(self, chunk):
"""lookup chunk by its type and data"""
found = []
for i, c in enumerate(self.__chunks):
if c == chunk:
found += [i]
return found if found else None
def __repr__(self):
"""pretty print of a UDD"""
r = []
for i in self.__chunks:
if i[0] in CHUNK_TYPES[self.__format]:
s = ["%s:" % CHUNK_TYPES[self.__format][i[0]]]
else:
s = ["UNK[%s]:" % i[0][1:4]]
s += [print_chunk(i, self.__format)]
r += ["".join(s)]
return "\n".join(r)
|
foone/3dmmInternals
|
generate/lib/pyudd.py
|
Python
|
unlicense
| 17,726
| 0.007221
|
#!/usr/bin/python
import sys, os
import select, socket
import usbcomm
import usb
_default_host = 'localhost'
_default_port = 23200
_READ_ONLY = select.POLLIN | select.POLLPRI
class Stream(object):
def __init__(self,
host=_default_host,
port=_default_port):
self.host = host
self.port = port
self.usb = usbcomm.USBComm(idVendor=usbcomm.ids.Bayer, idProduct=usbcomm.ids.Bayer.Contour)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setblocking(0)
self.poller = select.poll()
self.fd_to_socket = {}
self.clients = []
def close(self):
print >>sys.stderr, '\nMUX > Closing...'
for client in self.clients:
client.close()
self.usb.close()
self.server.close()
print >>sys.stderr, 'MUX > Done! =)'
def add_client(self, client):
print >>sys.stderr, 'MUX > New connection from', client.getpeername()
client.setblocking(0)
self.fd_to_socket[client.fileno()] = client
self.clients.append(client)
self.poller.register(client, _READ_ONLY)
def remove_client(self, client, why='?'):
try:
name = client.getpeername()
except:
name = 'client %d' % client.fileno()
print >>sys.stderr, 'MUX > Closing %s: %s' % (name, why)
self.poller.unregister(client)
self.clients.remove(client)
client.close()
def read(self):
self.sink = None
try:
data = self.usb.read( )
self.sink = data
except usb.core.USBError, e:
if e.errno != 110:
print e, dir(e), e.backend_error_code, e.errno
raise
return self.sink is not None
def flush(self):
if self.sink is not None:
for client in self.clients:
client.send(self.sink)
self.sink = None
def run(self):
try:
# self.tty.setTimeout(0) # Non-blocking
# self.tty.flushInput()
# self.tty.flushOutput()
# self.poller.register(self.usb.epout.bEndpointAddress, _READ_ONLY)
# self.fd_to_socket[self.usb.epout.bEndpointAddress] = self.usb
# print >>sys.stderr, 'MUX > Serial port: %s @ %s' % (self.device, self.baudrate)
print >>sys.stderr, 'MUX > usb port: %s' % (self.usb)
self.server.bind((self.host, self.port))
self.server.listen(5)
self.poller.register(self.server, _READ_ONLY)
self.fd_to_socket[self.server.fileno()] = self.server
print >>sys.stderr, 'MUX > Server: %s:%d' % self.server.getsockname()
print >>sys.stderr, 'MUX > Use ctrl+c to stop...\n'
while True:
events = self.poller.poll(500)
if self.read( ):
self.flush( )
for fd, flag in events:
# Get socket from fd
s = self.fd_to_socket[fd]
print fd, flag, s
if flag & select.POLLHUP:
self.remove_client(s, 'HUP')
elif flag & select.POLLERR:
self.remove_client(s, 'Received error')
elif flag & (_READ_ONLY):
# A readable server socket is ready to accept a connection
if s is self.server:
connection, client_address = s.accept()
self.add_client(connection)
# Data from serial port
elif s is self.usb:
data = s.read( )
for client in self.clients:
client.send(data)
# Data from client
else:
data = s.recv(80)
# Client has data
print "send to usb"
if data: self.usb.write(data)
# Interpret empty result as closed connection
else: self.remove_client(s, 'Got no data')
except usb.core.USBError, e:
print >>sys.stderr, '\nMUX > USB error: "%s". Closing...' % e
except socket.error, e:
print >>sys.stderr, '\nMUX > Socket error: %s' % e.strerror
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.close()
if __name__ == '__main__':
s = Stream( )
s.run( )
|
bewest/glucodump
|
glucodump/stream.py
|
Python
|
gpl-2.0
| 3,981
| 0.017332
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-09 17:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scoping', '0047_auto_20170209_1626'),
]
operations = [
migrations.RemoveField(
model_name='query',
name='technology',
),
migrations.AddField(
model_name='query',
name='technology',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scoping.Technology'),
),
]
|
mcallaghan/tmv
|
BasicBrowser/scoping/migrations/0048_auto_20170209_1708.py
|
Python
|
gpl-3.0
| 658
| 0.00152
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
This file contains various constants and helper code to generate constants
that are used in the Statistical Variable renaming.
"""
import pandas as pd
import collections
import re
def capitalizeFirst(word):
""" Capitalizes the first letter of a string. """
return word[0].upper() + word[1:]
def standard_name_remapper(orig_name):
""" General renaming function for long strings into Pascal case.
Text inbetween trailing parentheses is removed.
Commas, dashes, and "ands" are removed. Then string is converted into Pascal
case without and spaces present.
"""
# Remove any trailing parentheses.
# TODO(tjann): to check if this is safe.
paren_start = orig_name.find("(")
if paren_start != -1:
orig_name = orig_name[:paren_start]
# Removes separating words.
orig_name = orig_name.replace(",", " ")
orig_name = orig_name.replace("-", " ")
orig_name = orig_name.replace("and ", "")
return "".join([word.capitalize() for word in orig_name.split()])
def _create_naics_map():
""" Downloads all NAICS codes across long and short form codes. """
# Read in list of industry topics.
naics_codes = pd.read_excel(
"https://www.census.gov/eos/www/naics/2017NAICS/2-6%20digit_2017_Codes.xlsx"
)
naics_codes = naics_codes.iloc[:, [1, 2]]
naics_codes.columns = ['NAICSCode', 'Title']
# Replace all ranges with individual rows. E.g. 31-33 -> 31, 32, 33.
def range_to_array(read_code):
if isinstance(read_code, str) and "-" in read_code:
lower, upper = read_code.split("-")
return list(range(int(lower), int(upper) + 1))
return read_code
naics_codes = naics_codes.dropna()
naics_codes['NAICSCode'] = naics_codes['NAICSCode'].apply(range_to_array)
naics_codes = naics_codes.explode('NAICSCode')
# Add unclassified code which is used in some statistical variables.
naics_codes = naics_codes.append(
{
"NAICSCode": 99,
"Title": "Nonclassifiable"
}, ignore_index=True)
# Query for only two digit codes.
short_codes = naics_codes[naics_codes['NAICSCode'] < 100]
short_codes = short_codes.set_index("NAICSCode")
short_codes = short_codes['Title'].to_dict()
# Read in overview codes.
overview_codes = pd.read_csv(
"https://data.bls.gov/cew/doc/titles/industry/high_level_industries.csv"
)
overview_codes.columns = ["NAICSCode", "Title"]
overview_codes = overview_codes.set_index("NAICSCode")
overview_codes = overview_codes['Title'].to_dict()
# Combine the two sources of codes.
NAICS_MAP = {}
combined_codes = short_codes
combined_codes.update(overview_codes)
# Rename industries into Pascal case.
for code, orig_name in combined_codes.items():
NAICS_MAP[str(code)] = standard_name_remapper(orig_name)
# Other edge cases.
NAICS_MAP['00'] = 'Unclassified'
return NAICS_MAP
# TODO(iancostello): Consider adding function memoization.
NAICS_MAP = _create_naics_map()
### True Constants
# Template of Stat Var MCF.
TEMPLATE_STAT_VAR = """
Node: dcid:{human_readable_dcid}
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
{CONSTRAINTS}"""
# Main query for stat vars. Combines across populations and observations
# to create statistical variables.
QUERY_FOR_ALL_STAT_VARS = """
SELECT DISTINCT
SP.population_type as populationType,
{CONSTRAINTS}
{POPULATIONS}
O.measurement_qualifier AS measurementQualifier,
O.measurement_denominator as measurementDenominator,
O.measured_prop as measuredProp,
O.unit as unit,
O.scaling_factor as scalingFactor,
O.measurement_method as measurementMethod,
SP.num_constraints as numConstraints,
CASE
WHEN O.measured_value IS NOT NULL THEN "measuredValue"
WHEN O.sum_value IS NOT NULL THEN "sumValue"
WHEN O.mean_value IS NOT NULL THEN "meanValue"
WHEN O.min_value IS NOT NULL THEN "minValue"
WHEN O.max_value IS NOT NULL THEN "maxValue"
WHEN O.std_deviation_value IS NOT NULL THEN "stdDeviationValue"
WHEN O.growth_rate IS NOT NULL THEN "growthRate"
WHEN O.median_value IS NOT NULL THEN "medianValue"
ELSE "Unknown"
END AS statType
FROM
`google.com:datcom-store-dev.dc_v3_clustered.StatisticalPopulation`
AS SP JOIN
`google.com:datcom-store-dev.dc_v3_clustered.Observation`
AS O
ON (SP.id = O.observed_node_key)
WHERE
O.type <> "ComparativeObservation"
AND SP.is_public
AND SP.prov_id NOT IN ({comma_sep_prov_blacklist})
"""
# Dataset blacklist.
_BIO_DATASETS = frozenset([
'dc/p47rsv3', # UniProt
'dc/0cwj4g1', # FDA_Pharmacologic_Class
'dc/5vxrbh3', # SIDER
'dc/ff08ks', # Gene_NCBI
'dc/rhjyj31', # MedicalSubjectHeadings
'dc/jd648v2', # GeneticVariantClinVar
'dc/x8m41b1', # ChEMBL
'dc/vbyjkh3', # SPOKESymptoms
'dc/gpv9pl2', # DiseaseOntology
'dc/8nwtbj2', # GTExSample0
'dc/t5lx1e2', # GTExSample2
'dc/kz0q1c2', # GTExSample1
'dc/8xcvhx', # GenomeAssemblies
'dc/hgp9hn1', # Species
'dc/9llzsx1', # GeneticVariantUCSC
'dc/f1fxve1', # Gene_RNATranscript_UCSC
'dc/mjgrfc', # Chromosome
'dc/h2lkz1', # ENCODEProjectSample
])
_MISC_DATASETS = frozenset([
'dc/93qydx3', # NYBG
'dc/g3rq1f1', # DeepSolar
'dc/22t2hr3', # EIA_860
'dc/zkhvp12', # OpportunityInsightsOutcomes
'dc/89fk9x3', # CollegeScorecard
])
# List of constraint prefixes to remove from certain properties.
CONSTRAINT_PREFIXES_TO_STRIP = {
'nativity': 'USC',
'age': 'USC',
'institutionalization': 'USC',
'educationStatus': 'USC',
'povertyStatus': 'USC',
'workExperience': 'USC',
'nativity': 'USC',
'race': ['USC', 'CDC', 'DAD'],
'employment': ['USC', 'BLS'],
'employmentStatus': ['USC', 'BLS'],
'schoolGradeLevel': 'NCES',
'patientRace': 'DAD'
}
# List of drug renamings. Note that some drugs are intentionally excluded.
DRUG_REMAPPINGS = {
'drug/dea/1100': 'Amphetamine',
'drug/dea/1105B': 'DlMethamphetamine',
'drug/dea/1105D': 'DMethamphetamine',
'drug/dea/1205': 'Lisdexamfetamine',
'drug/dea/1248': 'Mephedrone',
'drug/dea/1615': 'Phendimetrazine',
'drug/dea/1724': 'Methylphenidate',
'drug/dea/2010': 'GammaHydroxybutyricAcid',
'drug/dea/2012': 'FDAApprovedGammaHydroxybutyricAcidPreparations',
'drug/dea/2100': 'BarbituricAcidDerivativeOrSalt',
'drug/dea/2125': 'Amobarbital',
'drug/dea/2165': 'Butalbital',
'drug/dea/2270': 'Pentobarbital', # Intentionally duplicated
'drug/dea/2285': 'Phenobarbital', #
'drug/dea/2315': 'Secobarbital',
'drug/dea/2765': 'Diazepam',
'drug/dea/2783': 'Zolpidem',
'drug/dea/2885': 'Lorazepam',
'drug/dea/4000': 'AnabolicSteroids',
'drug/dea/4187': 'Testosterone',
'drug/dea/7285': 'Ketamine',
'drug/dea/7315D': 'Lysergide',
'drug/dea/7365': 'MarketableOralDronabinol',
'drug/dea/7369': 'DronabinolGelCapsule',
'drug/dea/7370': 'Tetrahydrocannabinol',
'drug/dea/7377': 'Cannabicyclol',
'drug/dea/7379': 'Nabilone',
'drug/dea/7381': 'Mescaline',
'drug/dea/7400': '34Methylenedioxyamphetamine',
'drug/dea/7431': '5MethoxyNNDimethyltryptamine',
'drug/dea/7433': 'Bufotenine',
'drug/dea/7437': 'Psilocybin',
'drug/dea/7438': 'Psilocin',
'drug/dea/7455': 'PCE',
'drug/dea/7471': 'Phencyclidine',
'drug/dea/7540': 'Methylone',
'drug/dea/9010': 'Alphaprodine',
'drug/dea/9020': 'Anileridine',
'drug/dea/9041L': 'Cocaine',
'drug/dea/9046': 'Norcocaine',
'drug/dea/9050': 'Codeine',
'drug/dea/9056': 'EtorphineExceptHCl',
'drug/dea/9064': 'Buprenorphine',
'drug/dea/9120': 'Dihydrocodeine',
'drug/dea/9143': 'Oxycodone',
'drug/dea/9150': 'Hydromorphone',
'drug/dea/9168': 'Difenoxin',
'drug/dea/9170': 'Diphenoxylate',
'drug/dea/9180L': 'Ecgonine',
'drug/dea/9190': 'Ethylmorphine',
'drug/dea/9193': 'Hydrocodone',
'drug/dea/9200': 'Heroin',
'drug/dea/9220L': 'Levorphanol',
'drug/dea/9230': 'Pethidine',
'drug/dea/9250B': 'Methadone',
'drug/dea/9273D': 'BulkDextropropoxyphene',
'drug/dea/9300': 'Morphine',
'drug/dea/9313': 'Normorphine',
'drug/dea/9333': 'Thebaine',
'drug/dea/9411': 'Naloxone',
'drug/dea/9600': 'RawOpium',
'drug/dea/9630': 'TincuredOpium',
'drug/dea/9639': 'PowderedOpium',
'drug/dea/9652': 'Oxymorphone',
'drug/dea/9655': 'Paregoric',
'drug/dea/9665': '14Hydroxycodeinone',
'drug/dea/9668': 'Noroxymorphone',
'drug/dea/9670': 'PoppyStrawConcentrate',
'drug/dea/9737': 'Alfentanil',
'drug/dea/9739': 'Remifentanil',
'drug/dea/9740': 'Sufentanil',
'drug/dea/9743': 'Carfentanil',
'drug/dea/9780': 'Tapentadol',
'drug/dea/9801': 'Fentanyl',
}
# Exceptionally long and confusing cause of death names are manually renamed.
MANUAL_CAUSE_OF_DEATH_RENAMINGS = {
'ICD10/D50-D89': 'DiseasesOfBloodAndBloodFormingOrgansAndImmuneDisorders',
'ICD10/R00-R99': 'AbnormalNotClassfied',
'ICD10/U00-U99': 'SpecialCases',
'ICD10/V01-Y89': 'ExternalCauses'
}
# List of properties to perform a numerical quantity remap on.
NUMERICAL_QUANTITY_PROPERTIES_TO_REMAP = [
'income', 'age', 'householderAge', 'homeValue', 'dateBuilt', 'grossRent',
'numberOfRooms', 'numberOfRooms', 'householdSize', 'numberOfVehicles',
'propertyTax'
]
# Regex rules to apply to numerical quantity remap.
REGEX_NUMERICAL_QUANTITY_RENAMINGS = [
# [A-Za-z]+[0-9]+Onwards -> [0-9]+OrMore[A-Za-z]+
(re.compile(r"^([A-Za-z]+)([0-9]+)Onwards$"),
lambda match: match.group(2) + "OrMore" + match.group(1)),
# [A-Za-z]+Upto[0-9]+ -> Upto[0-9]+[A-Za-z]+
(re.compile(r"^([A-Za-z]+)Upto([0-9]+)$"),
lambda match: "Upto" + match.group(2) + match.group(1)),
# [A-Za-z]+[0-9]+To[0-9]+-> [0-9]+To[0-9]+[A-Za-z]+
(re.compile(r"^([A-Za-z]+)([0-9]+)To([0-9]+)$"),
lambda match: match.group(2) + "To" + match.group(3) + match.group(1)),
# [A-Za-z]+[0-9]+ -> [0-9]+[A-Za-z]+
(re.compile(r"^([A-Za-z]+)([0-9]+)$"),
lambda match: match.group(2) + match.group(1))
]
# Constants that power Statistical Variable documentation.
# Tuple is defined as follows:
# (Name of Vertical), (Population Type include),
# (whether to subgroup by all population types in demographic),
# (if subgroup all, whether you should group population types with more than 1
# statistical variable).
SVPopGroup = (collections.namedtuple(
'STAT_VAR_DOCUMENTION_GROUPING',
'vertical popTypes subgroupAllPops subgroupIfMoreThanOne'))
STAT_VAR_POPULATION_GROUPINGS = [
SVPopGroup("Demographics",
['Person', 'Parent', 'Child', 'Student', 'Teacher'],
True, False),
SVPopGroup("Crime",
['CriminalActivities'],
False, False),
SVPopGroup("Health",
['Death', 'DrugDistribution', 'MedicalConditionIncident',
'MedicalTest', 'MedicareEnrollee'],
True, False),
SVPopGroup("Employment",
['Worker', 'Establishment', 'JobPosting',
'UnemploymentInsuranceClaim'],
True, False),
SVPopGroup("Economic",
['EconomicActivity', 'Consumption', 'Debt', 'TreasuryBill',
'TreasuryBond', 'TreasuryNote'],
True, False),
SVPopGroup("Environment",
['Emissions'],
False, False),
SVPopGroup("Household",
['Household'],
False, False),
SVPopGroup("HousingUnit",
['HousingUnit'],
False, False)
]
# HTML for statistical variable markdown.
DOCUMENTATION_BASE_MARKDOWN = \
"""---
layout: default
title: Statistical Variables
nav_order: 2
---
# Statistical Variables
Many of the Data Commons APIs deal with Data Commons nodes of the type
[StatisticalVariable](https://browser.datacommons.org/kg?dcid=StatisticalVariable).
The following list contains all Statistical Variables with human-readable identifiers,
grouped by domain and population type. Some verticals are grouped such that all
population types are a sub-level grouping, while others (like disasters), only
group by population types when there are multiple statistical variables for that
population type.
<style>
details details {
margin-left: 24px;
}
details details summary {
font-size: 16px;
}
li {
white-space: nowrap;
}
</style>
"""
DOCUMENTATION_HEADER_START = \
"""
<details>
<summary>{HEADER}</summary>
"""
DOCUMENTATION_DROPDOWN_START = \
"""
<details>
<summary>{POPULATION_TYPE}</summary>
<ul>
"""
|
datacommonsorg/tools
|
stat_var_renaming/stat_var_renaming_constants.py
|
Python
|
apache-2.0
| 13,060
| 0.001914
|
#!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
package : configd
function: commandline tool to send commands to configd (response to stdout)
"""
import argparse
import socket
import os.path
import traceback
import sys
import syslog
import time
from select import select
from modules import syslog_error, syslog_notice
__author__ = 'Ad Schellevis'
configd_socket_name = '/var/run/configd.socket'
configd_socket_wait = 20
def exec_config_cmd(exec_command):
""" execute command using configd socket
:param exec_command: command string
:return: string
"""
# Create and open unix domain socket
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(configd_socket_name)
except socket.error:
syslog_error('unable to connect to configd socket (@%s)'%configd_socket_name)
print('unable to connect to configd socket (@%s)'%configd_socket_name, file=sys.stderr)
return None
try:
sock.send(exec_command.encode())
data = []
while True:
line = sock.recv(65536).decode()
if line:
data.append(line)
else:
break
return ''.join(data)[:-3]
except:
syslog_error('error in configd communication \n%s'%traceback.format_exc())
print ('error in configd communication %s, see syslog for details', file=sys.stderr)
finally:
sock.close()
parser = argparse.ArgumentParser()
parser.add_argument("-m", help="execute multiple arguments at once", action="store_true")
parser.add_argument("-e", help="use as event handler, execute command on receiving input", action="store_true")
parser.add_argument("-d", help="detach the execution of the command and return immediately", action="store_true")
parser.add_argument("-q", help="run quietly by muting standard output", action="store_true")
parser.add_argument(
"-t",
help="threshold between events, wait this interval before executing commands, combine input into single events",
type=float
)
parser.add_argument("command", help="command(s) to execute", nargs="+")
args = parser.parse_args()
syslog.openlog("configctl")
# set a timeout to the socket
socket.setdefaulttimeout(120)
# check if configd socket exists
# (wait for a maximum of "configd_socket_wait" seconds for configd to start)
i=0
while not os.path.exists(configd_socket_name):
if i >= configd_socket_wait:
break
time.sleep(1)
i += 1
if not os.path.exists(configd_socket_name):
print('configd socket missing (@%s)'%configd_socket_name, file=sys.stderr)
sys.exit(-1)
# command(s) to execute
if args.m:
# execute multiple commands at once ( -m "action1 param .." "action2 param .." )
exec_commands=args.command
else:
# execute single command sequence
exec_commands=[' '.join(args.command)]
if args.e:
# use as event handler, execute configd command on every line on stdin
last_message_stamp = time.time()
stashed_lines = list()
while True:
rlist, _, _ = select([sys.stdin], [], [], args.t)
if rlist:
last_message_stamp = time.time()
r_line = sys.stdin.readline()
if len(r_line) == 0:
#EOFError. pipe broken?
sys.exit(-1)
stashed_lines.append(r_line)
if len(stashed_lines) >= 1 and (args.t is None or time.time() - last_message_stamp > args.t):
# emit event trigger(s) to syslog
for line in stashed_lines:
syslog_notice("event @ %.2f msg: %s" % (last_message_stamp, line))
# execute command(s)
for exec_command in exec_commands:
syslog_notice("event @ %.2f exec: %s" % (last_message_stamp, exec_command))
exec_config_cmd(exec_command=exec_command)
stashed_lines = list()
else:
# normal execution mode
for exec_command in exec_commands:
if args.d:
exec_command = '&' + exec_command
result=exec_config_cmd(exec_command=exec_command)
if result is None:
sys.exit(-1)
if not args.q:
print('%s' % (result.strip()))
|
opnsense/core
|
src/opnsense/service/configd_ctl.py
|
Python
|
bsd-2-clause
| 5,598
| 0.004287
|
from c2cgeoportal_admin.views.layertree import itemtypes_tables
itemtypes_tables.update({
'lu_int_wms': 'lux_layer_internal_wms',
'lu_ext_wms': 'lux_layer_external_wms',
})
|
Geoportail-Luxembourg/geoportailv3
|
geoportal/geoportailv3_geoportal/admin/admin.py
|
Python
|
mit
| 178
| 0
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dcm.run import dcm
def recipe_dcm_run(config, auth_read, account, report_id, report_name):
"""Trigger a CM report run
Args:
auth_read (authentication) - Credentials used for reading data.
account (integer) - CM network id.
report_id (integer) - CM report id, empty if using name.
report_name (string) - CM report name, empty if using id instead.
"""
dcm(config, {
'auth':auth_read,
'report_run_only':True,
'report':{
'account':account,
'report_id':report_id,
'name':report_name
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Trigger a CM report run
1. Specify an account id.
2. Specify either report name or report id to run.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-account", help="CM network id.", default='')
parser.add_argument("-report_id", help="CM report id, empty if using name.", default='')
parser.add_argument("-report_name", help="CM report name, empty if using id instead.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_dcm_run(config, args.auth_read, args.account, args.report_id, args.report_name)
|
google/starthinker
|
examples/dcm_run_example.py
|
Python
|
apache-2.0
| 3,107
| 0.011265
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.cephalopod.controllers import RHCephalopod, RHCephalopodSync, RHSystemInfo
from indico.web.flask.wrappers import IndicoBlueprint
cephalopod_blueprint = _bp = IndicoBlueprint('cephalopod', __name__, template_folder='templates',
virtual_template_folder='cephalopod')
_bp.add_url_rule('/admin/community-hub/', 'index', RHCephalopod, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/community-hub/sync', 'sync', RHCephalopodSync, methods=('POST',))
_bp.add_url_rule('/system-info', 'system-info', RHSystemInfo)
|
nop33/indico
|
indico/modules/cephalopod/blueprint.py
|
Python
|
gpl-3.0
| 1,345
| 0.003717
|
"""
@summary: Module contain matrix base classes
@author: CJ Grady
@version: 1.0
@status: alpha
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
class Grid(object):
"""
@summary: Base class for Lifemapper grids. This class can be used with
uncompressed grids.
"""
# ...........................
def __init__(self, griddedData=None):
if griddedData is not None:
self._initFromGrid(griddedData)
else:
self.ySize = None
self.xSize = None
self.data = []
self.classes = set([])
# ...........................
def _initFromGrid(self, griddedData):
self.ySize = len(griddedData)
self.xSize = len(griddedData[0])
self.data = griddedData
self.findClasses()
# ...........................
def findClasses(self):
"""
@summary: Finds all of the unique classes in the data
"""
self.classes = set([])
for row in self.data:
for col in row:
self.classes.add(col)
# ...........................
def query(self, x, y):
return self.data[y][x]
# ...........................
def read(self, fn):
self.data = []
with open(fn) as f:
for line in f:
self.data.append([int(i) for i in line.split(' ')])
# ...........................
def write(self, fn):
with open(fn, 'w') as f:
for row in self.data:
f.write('%s\n' % ' '.join([str(i) for i in row]))
# .............................................................................
class _CompressedGrid(Grid):
# ...........................
def __init__(self):
raise Exception, "init must be implemented in sub class"
# ...........................
def query(self, x, y):
raise Exception, "Query must be implemented in sub class"
# ...........................
def read(self, fn):
raise Exception, "Read must be implemented in sub class"
# ...........................
def write(self, fn):
raise Exception, "Write must be implemented in sub class"
|
cjgrady/compression
|
src/matrix/matrix.py
|
Python
|
gpl-2.0
| 3,131
| 0.023315
|
from pymuse.pipelinestages.pipeline_stage import PipelineStage
from pymuse.utils.stoppablequeue import StoppableQueue
from pymuse.signal import Signal
from pymuse.constants import PIPELINE_QUEUE_SIZE
class PipelineFork():
"""
This class is used to fork a Pipeline. Ex.: PipelineFork([stage1, stage2], [stage3]) fork the pipeline
in two paths and has two outputs (stage2 and stage3). It is used during the construction of Pipeline.
"""
def __init__(self, *branches):
self.forked_branches: list = list(branches)
class Pipeline():
"""
This class create a multithreaded pipeline. It automatically links together every contiguous stages.
E.g.: Pipeline(Signal(), PipelineStage(), PipelineFork([PipelineStage(), PipelineStage()], [PipelineStage()] ))
"""
def __init__(self, input_signal: Signal, *stages):
self._output_queues = []
self._stages: list = list(stages)
self._link_stages(self._stages)
self._stages[0]._queue_in = input_signal.signal_queue
def get_output_queue(self, queue_index=0) -> StoppableQueue:
"""Return a ref to the queue given by queue_index"""
return self._output_queues[queue_index]
def read_output_queue(self, queue_index=0):
"""Wait to read a data in a queue given by queue_index"""
return self._output_queues[queue_index].get()
def start(self):
"""Start all pipelines stages."""
self._start(self._stages)
def shutdown(self):
""" shutdowns every child thread (PipelineStage)"""
self._shutdown(self._stages)
def join(self):
"""Ensure every thread (PipelineStage) of the pipeline are done"""
for stage in self._stages:
stage.join()
def _link_pipeline_fork(self, stages: list, index: int):
for fork in stages[index].forked_branches:
stages[index - 1].add_queue_out(fork[0].queue_in)
self._link_stages(fork)
def _link_stages(self, stages: list):
for i in range(1, len(stages)):
if type(stages[i]) == PipelineFork:
self._link_pipeline_fork(stages, i)
else:
stages[i - 1].add_queue_out(stages[i].queue_in)
if issubclass(type(stages[-1]), PipelineStage):
output_queue = StoppableQueue(PIPELINE_QUEUE_SIZE)
stages[-1].add_queue_out(output_queue)
self._output_queues.append(output_queue)
def _start(self, stages: list):
for stage in stages:
if type(stage) == PipelineFork:
for forked_branch in stage.forked_branches:
self._start(forked_branch)
else:
stage.start()
def _shutdown(self, stages: list):
for stage in stages:
if type(stage) == PipelineFork:
for forked_branch in stage.forked_branches:
self._shutdown(forked_branch)
else:
stage.shutdown()
|
PolyCortex/pyMuse
|
pymuse/pipeline.py
|
Python
|
mit
| 2,987
| 0.001339
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import abc
import os
import re
from trove.common import cfg
from trove.common import pagination
from trove.common import utils
from trove.guestagent.common import operating_system
CONF = cfg.CONF
def update_dict(updates, target):
"""Recursively update a target dictionary with given updates.
Updates are provided as a dictionary of key-value pairs
where a value can also be a nested dictionary in which case
its key is treated as a sub-section of the outer key.
If a list value is encountered the update is applied
iteratively on all its items.
:returns: Will always return a dictionary of results (may be empty).
"""
if target is None:
target = {}
if isinstance(target, list):
for index, item in enumerate(target):
target[index] = update_dict(updates, item)
return target
if updates is not None:
for k, v in updates.items():
if isinstance(v, abc.Mapping):
target[k] = update_dict(v, target.get(k, {}))
else:
target[k] = updates[k]
return target
def expand_dict(target, namespace_sep='.'):
"""Expand a flat dict to a nested one.
This is an inverse of 'flatten_dict'.
:seealso: flatten_dict
"""
nested = {}
for k, v in target.items():
sub = nested
keys = k.split(namespace_sep)
for key in keys[:-1]:
sub = sub.setdefault(key, {})
sub[keys[-1]] = v
return nested
def flatten_dict(target, namespace_sep='.'):
"""Flatten a nested dict.
Return a one-level dict with all sub-level keys joined by a namespace
separator.
The following nested dict:
{'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}}
would be flattened to:
{'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10}
"""
def flatten(target, keys, namespace_sep):
flattened = {}
if isinstance(target, abc.Mapping):
for k, v in target.items():
flattened.update(
flatten(v, keys + [k], namespace_sep))
else:
ns = namespace_sep.join(keys)
flattened[ns] = target
return flattened
return flatten(target, [], namespace_sep)
def build_file_path(base_dir, base_name, *extensions):
"""Build a path to a file in a given directory.
The file may have an extension(s).
:returns: Path such as: 'base_dir/base_name.ext1.ext2.ext3'
"""
file_name = os.extsep.join([base_name] + list(extensions))
return os.path.expanduser(os.path.join(base_dir, file_name))
def to_bytes(value):
"""Convert numbers with a byte suffix to bytes.
"""
if isinstance(value, str):
pattern = re.compile(r'^(\d+)([K,M,G]{1})$')
match = pattern.match(value)
if match:
value = match.group(1)
suffix = match.group(2)
factor = {
'K': 1024,
'M': 1024 ** 2,
'G': 1024 ** 3,
}[suffix]
return int(round(factor * float(value)))
return value
def paginate_list(li, limit=None, marker=None, include_marker=False):
"""Paginate a list of objects based on the name attribute.
:returns: Page sublist and a marker (name of the last item).
"""
return pagination.paginate_object_list(
li, 'name', limit=limit, marker=marker, include_marker=include_marker)
def serialize_list(li, limit=None, marker=None, include_marker=False):
"""
Paginate (by name) and serialize a given object list.
:returns: A serialized and paginated version of a given list.
"""
page, next_name = paginate_list(li, limit=limit, marker=marker,
include_marker=include_marker)
return [item.serialize() for item in page], next_name
def get_filesystem_volume_stats(fs_path):
try:
stats = os.statvfs(fs_path)
except OSError:
raise RuntimeError("Filesystem not found (%s)" % fs_path)
total = stats.f_blocks * stats.f_bsize
free = stats.f_bfree * stats.f_bsize
# return the size in GB
used_gb = utils.to_gb(total - free)
total_gb = utils.to_gb(total)
output = {
'block_size': stats.f_bsize,
'total_blocks': stats.f_blocks,
'free_blocks': stats.f_bfree,
'total': total_gb,
'free': free,
'used': used_gb
}
return output
def get_conf_dir():
"""Get the config directory for the database related settings.
For now, the files inside the config dir are mainly for instance rebuild.
"""
mount_point = CONF.get(CONF.datastore_manager).mount_point
conf_dir = os.path.join(mount_point, 'conf.d')
if not operating_system.exists(conf_dir, is_directory=True, as_root=True):
operating_system.ensure_directory(conf_dir, as_root=True)
return conf_dir
|
openstack/trove
|
trove/guestagent/common/guestagent_utils.py
|
Python
|
apache-2.0
| 5,574
| 0
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import pytest
from numpy import isclose
from dolfin import (assemble, dx, FiniteElement, FunctionSpace, inner, MixedElement, split, TestFunction,
TrialFunction, UnitSquareMesh, VectorElement)
from dolfin_utils.test import fixture as module_fixture
from rbnics.backends.dolfin import evaluate as _evaluate, ParametrizedTensorFactory
from rbnics.backends.dolfin.export import tensor_save
from rbnics.backends.dolfin.import_ import tensor_load
from rbnics.eim.utils.decorators import add_to_map_from_parametrized_expression_to_problem
# Meshes
@module_fixture
def mesh():
return UnitSquareMesh(10, 10)
# Forms: elliptic case
def generate_elliptic_linear_form_space(mesh):
return (FunctionSpace(mesh, "Lagrange", 2), )
def generate_elliptic_linear_form(V):
v = TestFunction(V)
return v * dx
def generate_elliptic_bilinear_form_space(mesh):
return generate_elliptic_linear_form_space(mesh) + generate_elliptic_linear_form_space(mesh)
def generate_elliptic_bilinear_form(V1, V2):
assert V1.ufl_element() == V2.ufl_element()
u = TrialFunction(V1)
v = TestFunction(V2)
return u * v * dx
# Forms: mixed case
def generate_mixed_linear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
return (FunctionSpace(mesh, element), )
def generate_mixed_linear_form(V):
v = TestFunction(V)
(v_0, v_1) = split(v)
return v_0[0] * dx + v_0[1] * dx + v_1 * dx
def generate_mixed_bilinear_form_space(mesh):
return generate_mixed_linear_form_space(mesh) + generate_mixed_linear_form_space(mesh)
def generate_mixed_bilinear_form(V1, V2):
assert V1.ufl_element() == V2.ufl_element()
u = TrialFunction(V1)
v = TestFunction(V2)
(u_0, u_1) = split(u)
(v_0, v_1) = split(v)
return inner(u_0, v_0) * dx + u_1 * v_1 * dx + u_0[0] * v_1 * dx + u_1 * v_0[1] * dx
# Forms: collapsed case
def generate_collapsed_linear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
U = FunctionSpace(mesh, element)
V = U.sub(0).collapse()
return (V, )
def generate_collapsed_linear_form(V):
v = TestFunction(V)
return v[0] * dx + v[1] * dx
def generate_collapsed_bilinear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
U = FunctionSpace(mesh, element)
V = U.sub(0).collapse()
return (V, U)
def generate_collapsed_bilinear_form(V, U):
u = TrialFunction(U)
(u_0, u_1) = split(u)
v = TestFunction(V)
return inner(u_0, v) * dx + u_1 * v[0] * dx
# Forms decorator
generate_form_spaces_and_forms = pytest.mark.parametrize("generate_form_space, generate_form", [
(generate_elliptic_linear_form_space, generate_elliptic_linear_form),
(generate_elliptic_bilinear_form_space, generate_elliptic_bilinear_form),
(generate_mixed_linear_form_space, generate_mixed_linear_form),
(generate_mixed_bilinear_form_space, generate_mixed_bilinear_form),
(generate_collapsed_linear_form_space, generate_collapsed_linear_form),
(generate_collapsed_bilinear_form_space, generate_collapsed_bilinear_form)
])
# Mock problem to avoid triggering an assert
class Problem(object):
mu = None
def evaluate(tensor):
add_to_map_from_parametrized_expression_to_problem(tensor, Problem())
return _evaluate(tensor)
# Prepare tensor storage for load
class Generator(object):
def __init__(self, form):
self._form = form
def zero_for_load(form):
tensor = assemble(form, keep_diagonal=True)
tensor.zero()
tensor.generator = Generator(form)
return tensor
# Tests
@generate_form_spaces_and_forms
def test_tensor_save(mesh, generate_form_space, generate_form, save_tempdir):
space = generate_form_space(mesh)
form = generate_form(*space)
tensor = ParametrizedTensorFactory(form)
evaluated_tensor = evaluate(tensor)
tensor_save(evaluated_tensor, save_tempdir, "evaluated_tensor")
@generate_form_spaces_and_forms
def test_tensor_load(mesh, generate_form_space, generate_form, load_tempdir):
space = generate_form_space(mesh)
form = generate_form(*space)
tensor = ParametrizedTensorFactory(form)
expected_evaluated_tensor = evaluate(tensor)
loaded_evaluated_tensor = zero_for_load(form)
tensor_load(loaded_evaluated_tensor, load_tempdir, "evaluated_tensor")
assert len(space) in (1, 2)
if len(space) == 1:
assert isclose(loaded_evaluated_tensor.get_local(), expected_evaluated_tensor.get_local()).all()
elif len(space) == 2:
assert isclose(loaded_evaluated_tensor.array(), expected_evaluated_tensor.array()).all()
@generate_form_spaces_and_forms
def test_tensor_io(mesh, generate_form_space, generate_form, tempdir):
test_tensor_save(mesh, generate_form_space, generate_form, tempdir)
test_tensor_load(mesh, generate_form_space, generate_form, tempdir)
|
mathLab/RBniCS
|
tests/unit/backends/dolfin/test_tensor_io.py
|
Python
|
lgpl-3.0
| 5,343
| 0.001684
|
# Generated by Django 2.0.13 on 2019-08-10 20:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile_manager', '0004_auto_20190729_2101'),
]
operations = [
migrations.AddField(
model_name='profile',
name='get_messages_by_email',
field=models.BooleanField(default=True, help_text='If your teacher sends you a message, get an instance email.'),
),
]
|
timberline-secondary/hackerspace
|
src/profile_manager/migrations/0005_profile_get_messages_by_email.py
|
Python
|
gpl-3.0
| 484
| 0.002066
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'RegisteredIndex.query_hash'
db.alter_column('djangodocument_registeredindex', 'query_hash', self.gf('django.db.models.fields.CharField')(max_length=128))
def backwards(self, orm):
# Changing field 'RegisteredIndex.query_hash'
db.alter_column('djangodocument_registeredindex', 'query_hash', self.gf('django.db.models.fields.BigIntegerField')())
models = {
'djangodocument.booleanindex': {
'Meta': {'object_name': 'BooleanIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'djangodocument.dateindex': {
'Meta': {'object_name': 'DateIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DateField', [], {'null': 'True'})
},
'djangodocument.datetimeindex': {
'Meta': {'object_name': 'DateTimeIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'djangodocument.decimalindex': {
'Meta': {'object_name': 'DecimalIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10'})
},
'djangodocument.documentstore': {
'Meta': {'object_name': 'DocumentStore'},
'collection': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'djangodocument.floatindex': {
'Meta': {'object_name': 'FloatIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'djangodocument.integerindex': {
'Meta': {'object_name': 'IntegerIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'djangodocument.longindex': {
'Meta': {'object_name': 'LongIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'})
},
'djangodocument.registeredindex': {
'Meta': {'unique_together': "[('name', 'collection')]", 'object_name': 'RegisteredIndex'},
'collection': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'query_hash': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'djangodocument.registeredindexdocument': {
'Meta': {'object_name': 'RegisteredIndexDocument'},
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'doc_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': "orm['djangodocument.RegisteredIndex']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'djangodocument.stringindex': {
'Meta': {'object_name': 'StringIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True'})
},
'djangodocument.textindex': {
'Meta': {'object_name': 'TextIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'djangodocument.timeindex': {
'Meta': {'object_name': 'TimeIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.TimeField', [], {'null': 'True'})
}
}
complete_apps = ['djangodocument']
|
zbyte64/django-dockit
|
dockit/backends/djangodocument/migrations/0002_auto__chg_field_registeredindex_query_hash.py
|
Python
|
bsd-3-clause
| 7,475
| 0.007358
|
from .local import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'temp.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
OPBEAT['APP_ID'] = None
|
pkimber/kbsoftware_couk
|
settings/dev_test.py
|
Python
|
apache-2.0
| 247
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-23 11:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestioneide', '0019_auto_20160517_2232'),
]
operations = [
migrations.AlterField(
model_name='festivo',
name='anotacion',
field=models.CharField(default=b'', max_length=50),
),
]
|
Etxea/gestioneide
|
gestioneide/migrations/0020_auto_20160523_1329.py
|
Python
|
gpl-3.0
| 471
| 0
|
# -*- coding: utf8 -*-
#
# Created by 'myth' on 2/19/16
import matplotlib as mpl
import settings
mpl.use('TkAgg')
|
myth/trashcan
|
it3708/project3/modules/__init__.py
|
Python
|
gpl-2.0
| 116
| 0
|
import json
from flask import g, jsonify, request, current_app, url_for
from ..models import User
from .. import db
from . import main
from .authentication import auth_user
from .errors import bad_request, unauthorized, forbidden, not_found
"""read all"""
@main.route('/<token>/users/', methods=['GET'])
def get_users(token):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
# get and return all:
users = User.query.all()
list_of_dicts = [json.loads(user.to_json()) for user in users]
return json.dumps(list_of_dicts)
"""read one"""
@main.route('/<token>/users/<int:id>/', methods=['GET'])
def get_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
# get and return one with id:
user = User.query.get(id)
if user == None:
not_found("Resource not found");
return user.to_json()
"""create"""
@main.route('/users/', methods=['POST']) #sign-up
def new_user():
# create and commit user
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
user = User(username=username, email=email)
user.password = password
db.session.add(user)
db.session.commit()
# get auth_token for the user:
auth_token = user.generate_auth_token(3600*24)
# create and send response
response = {}
response["user"] = user.to_json()
response["auth_token"] = auth_token
response["status"] = "success"
return jsonify(response)
"""update"""
@main.route('/<token>/users/<int:id>/', methods=['PUT'])
def update_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
user = User.query.get(id)
if not user:
not_found("Resource not found!")
# create and commit user
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
user.username = username
user.email = email
user.password = password
db.session.add(user)
db.session.commit()
# create and send response
response = {}
response["user"] = user.to_json()
response["status"] = "success"
return jsonify(response)
"""delete"""
@main.route('/<token>/users/<int:id>/', methods=["DELETE"])
def delete_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
user = User.query.get(id)
if not user:
not_found("Resource not found!")
# delete and commit
db.session.delete(user)
db.session.commit()
# ! delete associated watchs and checks!
#
# create and send response
response = {}
response["status"] = "success"
return json.dumps(response)
"""login"""
@main.route('/users/login/', methods=['POST'])
def login():
# get credentials
email = request.form.get('email')
password = request.form.get('password')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
# check for a user with matching credentials
user = User.query.filter_by(email=email).first()
if user == None or user.verify_password(password)==False:
return bad_request("Invalid email or password!")
# set the global current_user
g.current_user = user
# get auth_token for the user
auth_token = user.generate_auth_token(3600*24) #1day
# create response
response = {}
response["user"] = user.to_json()
response["auth_token"] = auth_token
return jsonify(response)
|
andela-bojengwa/team3
|
monitorbot_api/app/main/users.py
|
Python
|
mit
| 3,988
| 0.007773
|
import serial
import numpy as np
import json
from datetime import datetime
class ElectronicNose:
def __init__(self, devAdd='/dev/ttyUSB0', baudrate=115200/3, \
tmax = 1000, outputFile = '', numSensors = 8):
## Creating the serial object
self.Sensor = serial.Serial(devAdd, baudrate)
self.memory = np.empty((0, numSensors + 2 + 1))
## File to store samples
if outputFile != '':
self.outfile = open(outputFile, 'a')
else:
self.outfile = []
## Writing the parameters
Vparam = '54'
if False: self.Sensor.write('P000' + 8*Vparam )
return
def save(self, filename):
np.save(filename, self.memory)
return
def closeConnection(self):
self.Sensor.close()
return
def forget(self):
self.memory = np.empty( (0, self.memory.shape[1] ) )
return
def refresh(self, nmax):
self.t[:self.tMax - nmax] = self.t[nmax:]
self.S[:self.tMax - nmax,:] = self.S[nmax:,:]
return
def sniff(self, nsamples=5):
# Flushing to ensure time precision
self.Sensor.flush()
# Possibly getting partial line -- this will be discarded
self.Sensor.readline()
avg = np.zeros( (1,11) )
nsamples_ = 0
for j in range(nsamples):
r = self.Sensor.readline()
if len(r) == 44:
nsamples_ += 1
avg[0,1:] += self.convert( r.split('\rV')[1].split('\n')[0][8:39] )
if nsamples_ > 0:
avg = avg/float(nsamples_)
now = datetime.now()
avg[0,0] = now.hour*3600 + now.minute*60 + now.second + now.microsecond/1.e6
self.memory = np.concatenate( (self.memory, np.reshape(avg, (1,11)) ), axis=0 )
return
def convert(self, string):
s = np.zeros(10)
# Converting 8 sensors
for j in range(8):
s[j] = int( string[j*3:j*3+3] , 16 )
# Converting temperature and humidity
s[8] = int( string[24:28] , 16)
s[9] = int( string[28:31] , 16)
return s
if __name__ == "__main__":
# Instantiating the class
EN = ElectronicNose()
# Acquiring some data
EN.sniff(1000)
# Closing connection
EN.closeConnection()
|
VandroiyLabs/FaroresWind
|
faroreswind/collector/ElectronicNose.py
|
Python
|
gpl-3.0
| 2,352
| 0.019133
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import widgets
from mox import IsA
from horizon import api
from horizon import test
class VolumeViewTests(test.TestCase):
@test.create_stubs({api: ('tenant_quota_usages', 'volume_create',
'volume_snapshot_list')})
def test_create_volume(self):
volume = self.volumes.first()
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50, 'snapshot_source': ''}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({api: ('tenant_quota_usages', 'volume_create',
'volume_snapshot_list'),
api.nova: ('volume_snapshot_get',)})
def test_create_volume_from_snapshot(self):
volume = self.volumes.first()
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
snapshot = self.volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50, 'snapshot_source': snapshot.id}
# first call- with url param
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=snapshot.id).\
AndReturn(volume)
# second call- with dropdown
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=snapshot.id).\
AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({api: ('tenant_quota_usages',),
api.nova: ('volume_snapshot_get',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
snapshot = self.volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_gb_used_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20}}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 100GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_number_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20},
'volumes': {'available': 0}}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('volume_list',
'volume_delete',
'server_list')})
def test_delete_volume(self):
volume = self.volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.volume_delete(IsA(http.HttpRequest), volume.id)
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:index')
res = self.client.post(url, formData, follow=True)
self.assertMessageCount(res, count=0)
@test.create_stubs({api: ('volume_list',
'volume_delete',
'server_list')})
def test_delete_volume_error_existing_snapshot(self):
volume = self.volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
exc = self.exceptions.cinder.__class__(400,
"error: dependent snapshots")
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.volume_delete(IsA(http.HttpRequest), volume.id). \
AndRaise(exc)
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:index')
res = self.client.post(url, formData, follow=True)
self.assertMessageCount(res, error=1)
self.assertEqual(list(res.context['messages'])[0].message,
u'Unable to delete volume "%s". '
u'One or more snapshots depend on it.' %
volume.display_name)
@test.create_stubs({api: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments(self):
volume = self.volumes.first()
servers = self.servers.list()
api.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:attach', args=[volume.id])
res = self.client.get(url)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
2)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
@test.create_stubs({api: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
PREV = settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point']
settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = False
volume = self.volumes.first()
servers = self.servers.list()
api.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:attach', args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = PREV
@test.create_stubs({api: ('volume_get',),
api.nova: ('server_get', 'server_list',)})
def test_edit_attachments_attached_volume(self):
server = self.servers.first()
volume = self.volumes.list()[0]
api.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({api.nova: ('volume_get', 'server_get',)})
def test_detail_view(self):
volume = self.volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
api.nova.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res,
"<dd>41023e92-8008-4c8b-8059-7f2293ff3775</dd>",
1,
200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
|
1ukash/horizon
|
horizon/dashboards/project/volumes/tests.py
|
Python
|
apache-2.0
| 14,507
| 0.001103
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-20 01:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forum_conversation', '0009_auto_20160925_2126'),
]
operations = [
migrations.AlterField(
model_name='topic',
name='first_post',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='forum_conversation.Post', verbose_name='First post'),
),
]
|
ellmetha/django-machina
|
tests/_testsite/apps/forum_conversation/migrations/0010_auto_20170120_0224.py
|
Python
|
bsd-3-clause
| 644
| 0.001553
|
"""
This GA code creates the gaModel with a circular island model
"""
from operator import attrgetter
# import sys
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood as loglikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
import multiprocessing
from mpi4py import MPI
import time
def evaluationFunction(individual, modelOmega, mean):
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Infinity')
genomeModel=type(modelOmega[0])
for i in range(len(modelOmega)):
genomeModel.bins=list(individual)
modelLambda=type(modelOmega[0])
modelLambda.bins=calcNumberBins(genomeModel.bins, mean, modelOmega[i].values4poisson)
tempValue=loglikelihood(modelLambda, modelOmega[i])
if tempValue < logValue:
logValue = tempValue
return logValue,
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, FREQ = 10, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This applies the gaModel with a circular island model
It uses two parallel system: 1, simple, that splits the ga evolution between cores
and 2, that distributes the islands
"""
start = time.clock()
# Attribute generator
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
toolbox.register("attr_float", random.random)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
#calculating the number of individuals of the populations based on the number of executions
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
pop = toolbox.population(n)
logbook = tools.Logbook()
logbook.header = "min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#1 to NGEN
#creating comm and island model not fixed
target = 0
info = MPI.Status()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
mpi_info = MPI.Info.Create()
logbook = tools.Logbook()
logbook.header = "rank","gen","min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals{}
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last island[rank] best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#migration
if g % (FREQ-1) == 0 and g > 0:
best_inds = tools.selBest(pop, 1)[0]
data = comm.sendrecv(sendobj=best_inds,dest=dest,source=origin)
#rotation
target+=1
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
pop[random.randint(0, len(pop)-1)] = ind
del best_pop
del data
#logBook
record = stats.compile(pop)
logbook.record(gen=g, **record)
# choose the best value
if rank == 0:
best_pop=tools.selBest(pop, 1)[0]
best_all_pop = list()
best_all_pop.append(best_pop)
for thread in range(size):
if (thread != 0):
# local_best = comm.recv(source=thread)
local_best = comm.recv(source=thread)
# req = comm.irecv(source=thread)
# local_best = req.wait()
best_all_pop.append(local_best)
maximum = float('-inf')
# for value, index in zip(best_all_pop, range(len(best_all_pop))):
for local_best in best_all_pop:
local_maximum = evaluationFunction(local_best, modelOmega, mean)
if maximum < local_maximum[0]:
# theBestIndex = index
maximum = local_maximum[0]
best_pop = local_best
else:
best_pop=tools.selBest(pop, 1)[0]
comm.send(best_pop, dest=0)
end = time.clock()
generatedModel = type(modelOmega[0])
generatedModel.prob = best_pop
generatedModel.bins = calcNumberBins(best_pop, modelOmega[0].bins)
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.definitions = modelOmega[0].definitions
generatedModel.time = start - end
generatedModel.logbook = logbook
return generatedModel
if __name__ == "__main__":
gaModel()
|
PyQuake/earthquakemodels
|
code/gaModel/parallelGAModelP_AVR.py
|
Python
|
bsd-3-clause
| 6,277
| 0.035686
|
# -*- coding: utf-8 -*-
#
# Point Tracker documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 25 14:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import os.path
from glob import glob
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join('..', '..', 'src')))
possible_jsmath_paths = [ os.path.join(os.environ['HOME'],'apps', 'network', 'jsMath*'),
os.path.join(os.environ['HOME'], 'apps', 'science', 'jsMath*'), '/usr/share/jsmath' ]
for filt in possible_jsmath_paths:
for pth in glob(filt):
if os.path.exists(os.path.join(pth, 'jsMath.js')):
jsmath_path = pth
break
else:
continue
break
else:
print >> sys.stderr, "Error, couldn't find the path for jsmath, please edit the possible_jsmath_paths variable."
sys.exit(2)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.jsmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Point Tracker'
copyright = u'2010, Barbier de Reuille, Pierre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PointTrackerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PointTracker.tex', u'Point Tracker Documentation',
u'Barbier de Reuille, Pierre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
PierreBdR/point_tracker
|
doc/source/conf.py
|
Python
|
gpl-2.0
| 7,170
| 0.006834
|
-
def __init__(self):
-
|
chris-j-tang/GLS
|
test/integration/ConstructorStart/simple.py
|
Python
|
mit
| 24
| 0.083333
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto
import lxml.html as LH
import lxml.html.clean as clean
import os
import re
import json
import requests
from requests.exceptions import ConnectionError
from requests import post
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from subprocess32 import PIPE
from collections import defaultdict
from sketchy import db, app, celery
from sketchy.models.capture import Capture
from sketchy.models.static import Static
from sketchy.controllers.validators import grab_domain
import subprocess32
@celery.task(name='check_url', bind=True)
def check_url(self, capture_id=0, retries=0, model='capture'):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
capture_record = Capture.query.filter(Capture.id == capture_id).first()
capture_record.job_status = 'STARTED'
# Write the number of retries to the capture record
db.session.add(capture_record)
capture_record.retry = retries
db.session.commit()
# Only retrieve the headers of the request, and return respsone code
try:
response = ""
verify_ssl = app.config['SSL_HOST_VALIDATION']
response = requests.get(capture_record.url, verify=verify_ssl, allow_redirects=False, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:28.0) Gecko/20100101 Firefox/28.0"})
capture_record.url_response_code = response.status_code
if capture_record.status_only:
capture_record.job_status = 'COMPLETED'
capture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
if capture_record.callback:
finisher(capture_record)
else:
capture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
# If URL doesn't return a valid status code or times out, raise an exception
except Exception as err:
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.url_response_code = 0
check_url.retry(kwargs={'capture_id': capture_id, 'retries': capture_record.retry + 1}, exc=err, countdown=app.config['COOLDOWN'], max_retries=app.config['MAX_RETRIES'])
# If the code was not a good code, record the status as a 404 and raise an exception
finally:
db.session.commit()
return str(response.status_code)
def do_capture(status_code, the_record, base_url, model='capture', phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Create a screenshot, text scrape, from a provided html file.
This depends on phantomjs and an associated javascript file to perform the captures.
In the event an error occurs, an exception is raised and handled by the celery task
or the controller that called this method.
"""
# Make sure the the_record
db.session.add(the_record)
# If the capture is for static content, use a differnet PhantomJS config file
if model == 'static':
capture_name = the_record.filename
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/static.js',
app.config['LOCAL_STORAGE_FOLDER'],
capture_name]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)
else:
capture_name = grab_domain(the_record.url) + '_' + str(the_record.id)
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/capture.js',
the_record.url,
os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.html')
# Using subprocess32 backport, call phantom and if process hangs kill it
pid = subprocess32.Popen(service_args, stdout=PIPE, stderr=PIPE)
try:
stdout, stderr = pid.communicate(timeout=phantomjs_timeout)
except subprocess32.TimeoutExpired:
pid.kill()
stdout, stderr = pid.communicate()
app.logger.error('PhantomJS Capture timeout at {} seconds'.format(phantomjs_timeout))
raise subprocess32.TimeoutExpired('phantomjs capture',phantomjs_timeout)
# If the subprocess has an error, raise an exception
if stderr or stdout:
raise Exception(stderr)
# Strip tags and parse out all text
ignore_tags = ('script', 'noscript', 'style')
with open(content_to_parse, 'r') as content_file:
content = content_file.read()
cleaner = clean.Cleaner()
content = cleaner.clean_html(content)
doc = LH.fromstring(content)
output = ""
for elt in doc.iterdescendants():
if elt.tag in ignore_tags:
continue
text = elt.text or ''
tail = elt.tail or ''
wordz = " ".join((text, tail)).strip('\t')
if wordz and len(wordz) >= 2 and not re.match("^[ \t\n]*$", wordz):
output += wordz.encode('utf-8')
# Since the filename format is different for static captures, update the filename
# This will ensure the URLs are pointing to the correct resources
if model == 'static':
capture_name = capture_name.split('.')[0]
# Wite our html text that was parsed into our capture folder
parsed_text = open(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.txt'), 'wb')
parsed_text.write(output)
# Update the sketch record with the local URLs for the sketch, scrape, and html captures
the_record.sketch_url = base_url + '/files/' + capture_name + '.png'
the_record.scrape_url = base_url + '/files/' + capture_name + '.txt'
the_record.html_url = base_url + '/files/' + capture_name + '.html'
# Create a dict that contains what files may need to be written to S3
files_to_write = defaultdict(list)
files_to_write['sketch'] = capture_name + '.png'
files_to_write['scrape'] = capture_name + '.txt'
files_to_write['html'] = capture_name + '.html'
# If we are not writing to S3, update the capture_status that we are completed.
if not app.config['USE_S3']:
the_record.job_status = "COMPLETED"
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
else:
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
db.session.commit()
return files_to_write
def s3_save(files_to_write, the_record):
"""
Write a sketch, scrape, and html file to S3
"""
db.session.add(the_record)
# These are the content-types for the files S3 will be serving up
reponse_types = {'sketch': 'image/png', 'scrape': 'text/plain', 'html': 'text/html'}
# Iterate through each file we need to write to s3
for capture_type, file_name in files_to_write.items():
# Connect to S3, generate Key, set path based on capture_type, write file to S3
conn = boto.s3.connect_to_region(
region_name = app.config.get('S3_BUCKET_REGION_NAME'),
calling_format = boto.s3.connection.OrdinaryCallingFormat()
)
key = Key(conn.get_bucket(app.config.get('S3_BUCKET_PREFIX')))
path = "sketchy/{}/{}".format(capture_type, the_record.id)
key.key = path
key.set_contents_from_filename(app.config['LOCAL_STORAGE_FOLDER'] + '/' + file_name)
# Generate a URL for downloading the files
url = conn.generate_url(
app.config.get('S3_LINK_EXPIRATION'),
'GET',
bucket=app.config.get('S3_BUCKET_PREFIX'),
key=key.key,
response_headers={
'response-content-type': reponse_types[capture_type],
'response-content-disposition': 'attachment; filename=' + file_name
})
# Generate appropriate url based on capture_type
if capture_type == 'sketch':
the_record.sketch_url = str(url)
if capture_type == 'scrape':
the_record.scrape_url = str(url)
if capture_type == 'html':
the_record.html_url = str(url)
# Remove local files if we are saving to S3
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['sketch']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['scrape']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['html']))
# If we don't have a finisher task is complete
if the_record.callback:
the_record.capture_status = 'S3_ITEMS_SAVED'
else:
the_record.capture_status = 'S3_ITEMS_SAVED'
the_record.job_status = 'COMPLETED'
db.session.commit()
def finisher(the_record):
"""
POST finished chain to a callback URL provided
"""
db.session.add(the_record)
verify_ssl = app.config['SSL_HOST_VALIDATION']
# Set the correct headers for the postback
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'Connection': 'close'}
#proxy = {"http": "127.0.0.1:8080"}
req = post(the_record.callback, verify=verify_ssl, data=json.dumps(the_record.as_dict()), headers=headers)
# If a 4xx or 5xx status is recived, raise an exception
req.raise_for_status()
# Update capture_record and save to database
the_record.job_status = 'COMPLETED'
the_record.capture_status = 'CALLBACK_SUCCEEDED'
db.session.add(the_record)
db.session.commit()
@celery.task(name='celery_static_capture', ignore_result=True, bind=True)
def celery_static_capture(self, base_url, capture_id=0, retries=0, model="static"):
"""
Celery task used to create a sketch and scrape with a provided static HTML file.
Task also writes files to S3 or posts a callback depending on configuration file.
"""
static_record = Static.query.filter(Static.id == capture_id).first()
# Write the number of retries to the capture record
db.session.add(static_record)
static_record.retry = retries
db.session.commit()
# First perform the captures, then either write to S3, perform a callback, or neither
try:
# call the main capture function to retrieve sketches and scrapes
files_to_write = do_capture(0, static_record, base_url, model='static')
# Call the s3 save funciton if s3 is configured, and perform callback if configured.
if app.config['USE_S3']:
if static_record.callback:
s3_save(files_to_write, static_record)
finisher(static_record)
else:
s3_save(files_to_write, static_record)
elif static_record.callback:
finisher(static_record)
# Only execute retries on ConnectionError exceptions, otherwise fail immediatley
except ConnectionError as err:
app.logger.error(err)
static_record.job_status = 'RETRY'
static_record.capture_status = str(err)
static_record.retry = retries + 1
db.session.commit()
raise celery_static_capture.retry(args=[base_url],
kwargs={'capture_id' :capture_id, 'retries': static_record.retry + 1, 'model': 'static'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# Catch exceptions raised by any functions called
except Exception as err:
app.logger.error(err)
static_record.job_status = 'FAILURE'
if str(err):
static_record.capture_status = str(err)
raise Exception
finally:
db.session.commit()
@celery.task(name='celery_capture', ignore_result=True, bind=True)
def celery_capture(self, status_code, base_url, capture_id=0, retries=0, model="capture", phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Celery task used to create sketch, scrape, html.
Task also writes files to S3 or posts a callback depending on configuration file.
"""
capture_record = Capture.query.filter(Capture.id == capture_id).first()
# Write the number of retries to the capture record
db.session.add(capture_record)
capture_record.retry = retries
db.session.commit()
try:
# Perform a callback or complete the task depending on error code and config
if capture_record.url_response_code > 400 and app.config['CAPTURE_ERRORS'] == False:
if capture_record.callback:
finisher(capture_record)
else:
capture_record.job_status = 'COMPLETED'
return True
# Only execute retries on ConnectionError exceptions, otherwise fail immediatley
except ConnectionError as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs = { 'capture_id' :capture_id, 'retries': capture_record.retry + 1, 'model': 'capture'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
except Exception as err:
app.logger.error(err)
capture_record.job_status = 'FAILURE'
if str(err):
capture_record.capture_status = str(err)
capture_record.capture_status = str(err)
finally:
db.session.commit()
# First perform the captures, then either write to S3, perform a callback, or neither
try:
# call the main capture function to retrieve sketches, scrapes, and html
files_to_write = do_capture(status_code, capture_record, base_url, model='capture', phantomjs_timeout=phantomjs_timeout)
# Call the s3 save funciton if s3 is configured, and perform callback if configured.
if app.config['USE_S3']:
if capture_record.callback:
s3_save(files_to_write, capture_record)
finisher(capture_record)
else:
s3_save(files_to_write, capture_record)
elif capture_record.callback:
finisher(capture_record)
# If the screenshot generation timed out, try to render again
except subprocess32.TimeoutExpired as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs={'capture_id' :capture_id, 'retries': capture_record.retry, 'model': 'capture', 'phantomjs_timeout': (capture_record.retry * 5) + phantomjs_timeout}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# Retry on connection error exceptions
except ConnectionError as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs={'capture_id' :capture_id, 'retries': capture_record.retry, 'model': 'capture'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# For all other exceptions, fail immediately
except Exception as err:
app.logger.error(err)
if str(err):
capture_record.capture_status = str(err)
capture_record.job_status = 'FAILURE'
raise Exception
finally:
db.session.commit()
|
odin1314/sketchy
|
sketchy/controllers/tasks.py
|
Python
|
apache-2.0
| 16,270
| 0.004856
|
import json
import unittest2
from google.appengine.ext import testbed
from consts.media_type import MediaType
from helpers.media_helper import MediaParser
from helpers.webcast_helper import WebcastParser
class TestMediaUrlParser(unittest2.TestCase):
def setUp(cls):
cls.testbed = testbed.Testbed()
cls.testbed.activate()
cls.testbed.init_urlfetch_stub()
def tearDown(cls):
cls.testbed.deactivate()
def test_youtube_parse(self):
yt_long = MediaParser.partial_media_dict_from_url("http://www.youtube.com/watch?v=I-IrVbsl_K8")
self.assertEqual(yt_long['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_long['foreign_key'], "I-IrVbsl_K8")
yt_short = MediaParser.partial_media_dict_from_url("http://youtu.be/I-IrVbsl_K8")
self.assertEqual(yt_short['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_short['foreign_key'], "I-IrVbsl_K8")
yt_from_playlist = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=VP992UKFbko&index=1&list=PLZT9pIgNOV6ZE0EgstWeoRWGWT3uoaszm")
self.assertEqual(yt_from_playlist['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_from_playlist['foreign_key'], 'VP992UKFbko')
# def test_cdphotothread_parsetest_cdphotothread_parse(self):
# cd = MediaParser.partial_media_dict_from_url(
# "https://www.chiefdelphi.com/media/photos/41999")
# self.assertEqual(cd['media_type_enum'], MediaType.CD_PHOTO_THREAD)
# self.assertEqual(cd['foreign_key'], "41999")
# self.assertTrue(cd['details_json'])
# details = json.loads(cd['details_json'])
# self.assertEqual(details['image_partial'], "a88/a880fa0d65c6b49ddb93323bc7d2e901_l.jpg")
def test_imgur_parse(self):
imgur_img = MediaParser.partial_media_dict_from_url("http://imgur.com/zYqWbBh")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
imgur_img = MediaParser.partial_media_dict_from_url("http://i.imgur.com/zYqWbBh.png")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/r/aww"), None)
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/a/album"), None)
def test_fb_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("http://facebook.com/theuberbots")
self.assertEqual(result['media_type_enum'], MediaType.FACEBOOK_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'theuberbots')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.FACEBOOK_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.facebook.com/theuberbots')
def test_twitter_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://twitter.com/team1124")
self.assertEqual(result['media_type_enum'], MediaType.TWITTER_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'team1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.TWITTER_PROFILE])
self.assertEqual(result['profile_url'], 'https://twitter.com/team1124')
def test_youtube_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'uberbots1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(result['profile_url'], 'https://www.youtube.com/uberbots1124')
short_result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(short_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(short_result['is_social'], True)
self.assertEqual(short_result['foreign_key'], 'uberbots1124')
self.assertEqual(short_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(short_result['profile_url'], 'https://www.youtube.com/uberbots1124')
gapps_result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/c/tnt3102org")
self.assertEqual(gapps_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(gapps_result['is_social'], True)
self.assertEqual(gapps_result['foreign_key'], 'tnt3102org')
self.assertEqual(gapps_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(gapps_result['profile_url'], 'https://www.youtube.com/tnt3102org')
def test_github_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://github.com/frc1124")
self.assertEqual(result['media_type_enum'], MediaType.GITHUB_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'frc1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.GITHUB_PROFILE])
self.assertEqual(result['profile_url'], 'https://github.com/frc1124')
def test_instagram_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/4hteamneutrino")
self.assertEqual(result['media_type_enum'], MediaType.INSTAGRAM_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], '4hteamneutrino')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.INSTAGRAM_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.instagram.com/4hteamneutrino')
def test_periscope_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.periscope.tv/evolution2626")
self.assertEqual(result['media_type_enum'], MediaType.PERISCOPE_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'evolution2626')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.PERISCOPE_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.periscope.tv/evolution2626')
def test_grabcad_link(self):
result = MediaParser.partial_media_dict_from_url("https://grabcad.com/library/2016-148-robowranglers-1")
self.assertEqual(result['media_type_enum'], MediaType.GRABCAD)
self.assertEqual(result['is_social'], False)
self.assertEqual(result['foreign_key'], '2016-148-robowranglers-1')
details = json.loads(result['details_json'])
self.assertEqual(details['model_name'], '2016 | 148 - Robowranglers')
self.assertEqual(details['model_description'], 'Renegade')
self.assertEqual(details['model_image'], 'https://d2t1xqejof9utc.cloudfront.net/screenshots/pics/96268d5c5e6c1b7fe8892f713813bb40/card.jpg')
self.assertEqual(details['model_created'], '2016-09-19T11:52:23Z')
def test_instagram_image(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/p/BUnZiriBYre/")
self.assertEqual(result['media_type_enum'], MediaType.INSTAGRAM_IMAGE)
self.assertEqual(result['foreign_key'], "BUnZiriBYre")
details = json.loads(result['details_json'])
self.assertEqual(details['title'], "FRC 195 @ 2017 Battlecry @ WPI")
self.assertEqual(details['author_name'], '1stroboticsrocks')
self.assertIsNotNone(details.get('thumbnail_url', None))
def test_unsupported_url_parse(self):
self.assertEqual(MediaParser.partial_media_dict_from_url("http://foo.bar"), None)
class TestWebcastUrlParser(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.testbed = testbed.Testbed()
cls.testbed.activate()
cls.testbed.init_urlfetch_stub()
@classmethod
def tearDownClass(cls):
cls.testbed.deactivate()
def testTwitchUrl(self):
res = WebcastParser.webcast_dict_from_url("http://twitch.tv/frcgamesense")
self.assertIsNotNone(res)
self.assertEqual(res['type'], 'twitch')
self.assertEqual(res['channel'], 'frcgamesense')
unknown = WebcastParser.webcast_dict_from_url("http://twitch.tv/")
self.assertIsNone(unknown)
def testYouTubeUrl(self):
yt_long = WebcastParser.webcast_dict_from_url("http://www.youtube.com/watch?v=I-IrVbsl_K8")
self.assertIsNotNone(yt_long)
self.assertEqual(yt_long['type'], 'youtube')
self.assertEqual(yt_long['channel'], 'I-IrVbsl_K8')
yt_short = WebcastParser.webcast_dict_from_url("http://youtu.be/I-IrVbsl_K8")
self.assertIsNotNone(yt_short)
self.assertEqual(yt_short['type'], 'youtube')
self.assertEqual(yt_short['channel'], 'I-IrVbsl_K8')
bad_long = WebcastParser.webcast_dict_from_url('"http://www.youtube.com/')
self.assertIsNone(bad_long)
bad_short = WebcastParser.webcast_dict_from_url("http://youtu.be/")
self.assertIsNone(bad_short)
def testUstream(self):
res = WebcastParser.webcast_dict_from_url('http://www.ustream.tv/decoraheagles')
self.assertIsNotNone(res)
self.assertEqual(res['type'], 'ustream')
self.assertEqual(res['channel'], '3064708')
bad = WebcastParser.webcast_dict_from_url('http://ustream.tv/')
self.assertIsNone(bad)
def testUnknownUrl(self):
bad = WebcastParser.webcast_dict_from_url("http://mywebsite.somewebcast")
self.assertIsNone(bad)
|
jaredhasenklein/the-blue-alliance
|
tests/suggestions/test_media_url_parse.py
|
Python
|
mit
| 9,927
| 0.004533
|
import numpy as np
from scipy.special import iv
def tapering_window(time,D,mywindow):
""" tapering_window returns the window for tapering a WOSA segment.
Inputs:
- time [1-dim numpy array of floats]: times along the WOSA segment.
- D [float]: Temporal length of the WOSA segment.
- mywindow [int]: Choice of tapering window:
-> 1: Square window
-> 2: Triangular window
-> 3: sin window
-> 4: sin**2 (Hanning) window
-> 5: sin**3 window
-> 6: sin**4 window
-> 7: Hamming window, defined as 0.54-0.46*np.cos(2.0*np.pi*time/D)
-> 8: 4-term Blackman-Harris window, with a0=0.35875 and a1=0.48829 and a2=0.14128 and a3=0.01168
-> 9: Kaiser-Bessel window, with parameter alpha=2.5
-> 10: Gaussian window, with standard dev. sigma=D/6.0
The terminology and formulas come from:
F. Harris. On the use of windows for harmonic analysis with the discrete fourier transform. Proceedings of the IEEE, 66(1):51-83, January 1978.
WARNING: Provide the vector 'time' such that for all k=0,...,time.size-1, we have time[k]>=0 and time[k]<=D
Outputs:
- tapering_window [1-dim numpy array of floats - size=time.size]: the tapering window.
-----------------------------
This is part of WAVEPAL
(C) 2016 G. Lenoir"""
T=time.size
if mywindow==1:
tapering_window=np.ones(T)
elif mywindow==2:
tapering_window=1.0-np.absolute(time-D/2.0)/(D/2.0)
elif mywindow==3:
tapering_window=np.sin(np.pi*time/D)
elif mywindow==4:
tapering_window=(np.sin(np.pi*time/D))**2
elif mywindow==5:
tapering_window=(np.sin(np.pi*time/D))**3
elif mywindow==6:
tapering_window=(np.sin(np.pi*time/D))**4
elif mywindow==7:
tapering_window=0.54-0.46*np.cos(2.0*np.pi*time/D)
elif mywindow==8:
a0=0.35875
a1=0.48829
a2=0.14128
a3=0.01168
tapering_window=a0-a1*np.cos(2.0*np.pi*time/D)+a2*np.cos(4.0*np.pi*time/D)-a3*np.cos(6.0*np.pi*time/D)
elif mywindow==9:
alpha=2.5
tapering_window=iv(0,np.pi*alpha*np.sqrt(1.0-((time-D/2.0)/(D/2.0))**2))
elif mywindow==10:
sig=D/6.0
tapering_window=np.exp(-(time-D/2.0)**2/2.0/sig**2)
else:
print "Error: The window number you entered is not valid. Check input variable 'mywindow'."
return
return tapering_window
|
guillaumelenoir/WAVEPAL
|
wavepal/tapering_window.py
|
Python
|
mit
| 2,207
| 0.043045
|
from ga_starters import *
|
Drob-AI/music-queue-rec
|
src/playlistsRecomender/gaPlaylistGenerator/__init__.py
|
Python
|
mit
| 25
| 0.04
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
progress test (count to 1000)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from ...utils.timing import TimedTestCase
from ..progress import together
class test_progress(TimedTestCase):
def test_prog(self):
self.threshold = 0.1
together(1000)
|
Thetoxicarcade/ac
|
congredi/utils/test/test_progress.py
|
Python
|
gpl-3.0
| 391
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from . import models
class ServiceFabricClientAPIsConfiguration(Configuration):
"""Configuration for ServiceFabricClientAPIs
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'http://localhost:19080'
super(ServiceFabricClientAPIsConfiguration, self).__init__(base_url)
self.add_user_agent('azure-servicefabric/{}'.format(VERSION))
self.credentials = credentials
class ServiceFabricClientAPIs(object):
"""Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services.
:ivar config: Configuration for client.
:vartype config: ServiceFabricClientAPIsConfiguration
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
self.config = ServiceFabricClientAPIsConfiguration(credentials, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '6.1.2'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def get_cluster_manifest(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Get the Service Fabric cluster manifest.
Get the Service Fabric cluster manifest. The cluster manifest contains
properties of the cluster that include different node types on the
cluster,
security configurations, fault and upgrade domain topologies etc.
These properties are specified as part of the ClusterConfig.JSON file
while deploying a stand alone cluster. However, most of the information
in the cluster manifest
is generated internally by service fabric during cluster deployment in
other deployment scenarios (for e.g when using azure portal).
The contents of the cluster manifest are for informational purposes
only and users are not expected to take a dependency on the format of
the file contents or its interpretation.
.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterManifest or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterManifest or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetClusterManifest'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterManifest', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_health(
self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric cluster.
Gets the health of a Service Fabric cluster.
Use EventsHealthStateFilter to filter the collection of health events
reported on the cluster based on the health state.
Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter
to filter the collection of nodes and applications returned based on
their aggregated health state.
.
:param nodes_health_state_filter: Allows filtering of the node health
state objects returned in the result of cluster health query
based on their health state. The possible values for this parameter
include integer value of one of the
following health states. Only nodes that match the filter are
returned. All nodes are used to evaluate the aggregated health state.
If not specified, all entries are returned.
The state values are flag based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of nodes
with HealthState value of OK (2) and Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type nodes_health_state_filter: int
:param applications_health_state_filter: Allows filtering of the
application health state objects returned in the result of cluster
health
query based on their health state.
The possible values for this parameter include integer value obtained
from members or bitwise operations
on members of HealthStateFilter enumeration. Only applications that
match the filter are returned.
All applications are used to evaluate the aggregated health state. If
not specified, all entries are returned.
The state values are flag based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of
applications with HealthState value of OK (2) and Warning (4) are
returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type applications_health_state_filter: int
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param include_system_application_health_statistics: Indicates whether
the health statistics should include the fabric:/System application
health statistics. False by default.
If IncludeSystemApplicationHealthStatistics is set to true, the health
statistics include the entities that belong to the fabric:/System
application.
Otherwise, the query result includes health statistics only for user
applications.
The health statistics must be included in the query result for this
parameter to be applied.
:type include_system_application_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetClusterHealth'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if nodes_health_state_filter is not None:
query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int')
if applications_health_state_filter is not None:
query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if include_system_application_health_statistics is not None:
query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_health_using_policy(
self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, application_health_policy_map=None, cluster_health_policy=None, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric cluster using the specified policy.
Gets the health of a Service Fabric cluster.
Use EventsHealthStateFilter to filter the collection of health events
reported on the cluster based on the health state.
Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter
to filter the collection of nodes and applications returned based on
their aggregated health state.
Use ClusterHealthPolicies to override the health policies used to
evaluate the health.
.
:param nodes_health_state_filter: Allows filtering of the node health
state objects returned in the result of cluster health query
based on their health state. The possible values for this parameter
include integer value of one of the
following health states. Only nodes that match the filter are
returned. All nodes are used to evaluate the aggregated health state.
If not specified, all entries are returned.
The state values are flag based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of nodes
with HealthState value of OK (2) and Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type nodes_health_state_filter: int
:param applications_health_state_filter: Allows filtering of the
application health state objects returned in the result of cluster
health
query based on their health state.
The possible values for this parameter include integer value obtained
from members or bitwise operations
on members of HealthStateFilter enumeration. Only applications that
match the filter are returned.
All applications are used to evaluate the aggregated health state. If
not specified, all entries are returned.
The state values are flag based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of
applications with HealthState value of OK (2) and Warning (4) are
returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type applications_health_state_filter: int
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param include_system_application_health_statistics: Indicates whether
the health statistics should include the fabric:/System application
health statistics. False by default.
If IncludeSystemApplicationHealthStatistics is set to true, the health
statistics include the entities that belong to the fabric:/System
application.
Otherwise, the query result includes health statistics only for user
applications.
The health statistics must be included in the query result for this
parameter to be applied.
:type include_system_application_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param application_health_policy_map: Defines a map that contains
specific application health policies for different applications.
Each entry specifies as key the application name and as value an
ApplicationHealthPolicy used to evaluate the application health.
If an application is not specified in the map, the application health
evaluation uses the ApplicationHealthPolicy found in its application
manifest or the default application health policy (if no health policy
is defined in the manifest).
The map is empty by default.
:type application_health_policy_map:
list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem]
:param cluster_health_policy: Defines a health policy used to evaluate
the health of the cluster or of a cluster node.
:type cluster_health_policy:
~azure.servicefabric.models.ClusterHealthPolicy
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
cluster_health_policies = None
if application_health_policy_map is not None or cluster_health_policy is not None:
cluster_health_policies = models.ClusterHealthPolicies(application_health_policy_map=application_health_policy_map, cluster_health_policy=cluster_health_policy)
api_version = "6.0"
# Construct URL
url = '/$/GetClusterHealth'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if nodes_health_state_filter is not None:
query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int')
if applications_health_state_filter is not None:
query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if include_system_application_health_statistics is not None:
query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if cluster_health_policies is not None:
body_content = self._serialize.body(cluster_health_policies, 'ClusterHealthPolicies')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_health_chunk(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric cluster using health chunks.
Gets the health of a Service Fabric cluster using health chunks.
Includes the aggregated health state of the cluster, but none of the
cluster entities.
To expand the cluster health and get the health state of all or some of
the entities, use the POST URI and specify the cluster health chunk
query description.
.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterHealthChunk or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterHealthChunk or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetClusterHealthChunk'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterHealthChunk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_health_chunk_using_policy_and_advanced_filters(
self, cluster_health_chunk_query_description=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric cluster using health chunks.
Gets the health of a Service Fabric cluster using health chunks. The
health evaluation is done based on the input cluster health chunk query
description.
The query description allows users to specify health policies for
evaluating the cluster and its children.
Users can specify very flexible filters to select which cluster
entities to return. The selection can be done based on the entities
health state and based on the hierarchy.
The query can return multi-level children of the entities based on the
specified filters. For example, it can return one application with a
specified name, and for this application, return
only services that are in Error or Warning, and all partitions and
replicas for one of these services.
.
:param cluster_health_chunk_query_description: Describes the cluster
and application health policies used to evaluate the cluster health
and the filters to select which cluster entities to be returned.
If the cluster health policy is present, it is used to evaluate the
cluster events and the cluster nodes. If not present, the health
evaluation uses the cluster health policy defined in the cluster
manifest or the default cluster health policy.
By default, each application is evaluated using its specific
application health policy, defined in the application manifest, or the
default health policy, if no policy is defined in manifest.
If the application health policy map is specified, and it has an entry
for an application, the specified application health policy
is used to evaluate the application health.
Users can specify very flexible filters to select which cluster
entities to include in response. The selection can be done based on
the entities health state and based on the hierarchy.
The query can return multi-level children of the entities based on the
specified filters. For example, it can return one application with a
specified name, and for this application, return
only services that are in Error or Warning, and all partitions and
replicas for one of these services.
:type cluster_health_chunk_query_description:
~azure.servicefabric.models.ClusterHealthChunkQueryDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterHealthChunk or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterHealthChunk or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetClusterHealthChunk'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if cluster_health_chunk_query_description is not None:
body_content = self._serialize.body(cluster_health_chunk_query_description, 'ClusterHealthChunkQueryDescription')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterHealthChunk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_cluster_health(
self, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric cluster.
Sends a health report on a Service Fabric cluster. The report must
contain the information about the source of the health report and
property on which it is reported.
The report is sent to a Service Fabric gateway node, which forwards to
the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, run
GetClusterHealth and check that the report appears in the HealthEvents
section.
.
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/ReportClusterHealth'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_provisioned_fabric_code_version_info_list(
self, code_version=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets a list of fabric code versions that are provisioned in a Service
Fabric cluster.
Gets a list of information about fabric code versions that are
provisioned in the cluster. The parameter CodeVersion can be used to
optionally filter the output to only that particular version.
:param code_version: The product version of Service Fabric.
:type code_version: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.FabricCodeVersionInfo] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetProvisionedCodeVersions'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if code_version is not None:
query_parameters['CodeVersion'] = self._serialize.query("code_version", code_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[FabricCodeVersionInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_provisioned_fabric_config_version_info_list(
self, config_version=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets a list of fabric config versions that are provisioned in a Service
Fabric cluster.
Gets a list of information about fabric config versions that are
provisioned in the cluster. The parameter ConfigVersion can be used to
optionally filter the output to only that particular version.
:param config_version: The config version of Service Fabric.
:type config_version: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.FabricConfigVersionInfo] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetProvisionedConfigVersions'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if config_version is not None:
query_parameters['ConfigVersion'] = self._serialize.query("config_version", config_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[FabricConfigVersionInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_upgrade_progress(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the progress of the current cluster upgrade.
Gets the current progress of the ongoing cluster upgrade. If no upgrade
is currently in progress, gets the last state of the previous cluster
upgrade.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterUpgradeProgressObject or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterUpgradeProgressObject or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetUpgradeProgress'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterUpgradeProgressObject', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_configuration(
self, configuration_api_version, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Get the Service Fabric standalone cluster configuration.
Get the Service Fabric standalone cluster configuration. The cluster
configuration contains properties of the cluster that include different
node types on the cluster,
security configurations, fault and upgrade domain topologies etc.
.
:param configuration_api_version: The API version of the Standalone
cluster json configuration.
:type configuration_api_version: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterConfiguration or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterConfiguration or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetClusterConfiguration'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['ConfigurationApiVersion'] = self._serialize.query("configuration_api_version", configuration_api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterConfiguration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_configuration_upgrade_status(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Get the cluster configuration upgrade status of a Service Fabric
standalone cluster.
Get the cluster configuration upgrade status details of a Service
Fabric standalone cluster.
.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterConfigurationUpgradeStatusInfo or ClientRawResponse if
raw=true
:rtype:
~azure.servicefabric.models.ClusterConfigurationUpgradeStatusInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetClusterConfigurationUpgradeStatus'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterConfigurationUpgradeStatusInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_upgrade_orchestration_service_state(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Get the service state of Service Fabric Upgrade Orchestration Service.
Get the service state of Service Fabric Upgrade Orchestration Service.
This API is internally used for support purposes.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: UpgradeOrchestrationServiceState or ClientRawResponse if
raw=true
:rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceState
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetUpgradeOrchestrationServiceState'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UpgradeOrchestrationServiceState', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_upgrade_orchestration_service_state(
self, timeout=60, service_state=None, custom_headers=None, raw=False, **operation_config):
"""Update the service state of Service Fabric Upgrade Orchestration
Service.
Update the service state of Service Fabric Upgrade Orchestration
Service. This API is internally used for support purposes.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param service_state: The state of Service Fabric Upgrade
Orchestration Service.
:type service_state: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: UpgradeOrchestrationServiceStateSummary or ClientRawResponse
if raw=true
:rtype:
~azure.servicefabric.models.UpgradeOrchestrationServiceStateSummary or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
upgrade_orchestration_service_state = models.UpgradeOrchestrationServiceState(service_state=service_state)
api_version = "6.0"
# Construct URL
url = '/$/SetUpgradeOrchestrationServiceState'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(upgrade_orchestration_service_state, 'UpgradeOrchestrationServiceState')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UpgradeOrchestrationServiceStateSummary', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def provision_cluster(
self, timeout=60, code_file_path=None, cluster_manifest_file_path=None, custom_headers=None, raw=False, **operation_config):
"""Provision the code or configuration packages of a Service Fabric
cluster.
Validate and provision the code or configuration packages of a Service
Fabric cluster.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param code_file_path: The cluster code package file path.
:type code_file_path: str
:param cluster_manifest_file_path: The cluster manifest file path.
:type cluster_manifest_file_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
provision_fabric_description = models.ProvisionFabricDescription(code_file_path=code_file_path, cluster_manifest_file_path=cluster_manifest_file_path)
api_version = "6.0"
# Construct URL
url = '/$/Provision'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(provision_fabric_description, 'ProvisionFabricDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def unprovision_cluster(
self, timeout=60, code_version=None, config_version=None, custom_headers=None, raw=False, **operation_config):
"""Unprovision the code or configuration packages of a Service Fabric
cluster.
Unprovision the code or configuration packages of a Service Fabric
cluster. It is supported to unprovision code and configuration
separately.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param code_version: The cluster code package version.
:type code_version: str
:param config_version: The cluster manifest version.
:type config_version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
unprovision_fabric_description = models.UnprovisionFabricDescription(code_version=code_version, config_version=config_version)
api_version = "6.0"
# Construct URL
url = '/$/Unprovision'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(unprovision_fabric_description, 'UnprovisionFabricDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def rollback_cluster_upgrade(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Rollback the upgrade of a Service Fabric cluster.
Rollback the code or configuration upgrade of a Service Fabric cluster.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/RollbackUpgrade'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def resume_cluster_upgrade(
self, upgrade_domain, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Make the cluster upgrade move on to the next upgrade domain.
Make the cluster code or configuration upgrade move on to the next
upgrade domain if appropriate.
:param upgrade_domain: The next upgrade domain for this cluster
upgrade.
:type upgrade_domain: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
resume_cluster_upgrade_description = models.ResumeClusterUpgradeDescription(upgrade_domain=upgrade_domain)
api_version = "6.0"
# Construct URL
url = '/$/MoveToNextUpgradeDomain'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(resume_cluster_upgrade_description, 'ResumeClusterUpgradeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start_cluster_upgrade(
self, start_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Start upgrading the code or configuration version of a Service Fabric
cluster.
Validate the supplied upgrade parameters and start upgrading the code
or configuration version of a Service Fabric cluster if the parameters
are valid.
:param start_cluster_upgrade_description: Describes the parameters for
starting a cluster upgrade.
:type start_cluster_upgrade_description:
~azure.servicefabric.models.StartClusterUpgradeDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/Upgrade'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(start_cluster_upgrade_description, 'StartClusterUpgradeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start_cluster_configuration_upgrade(
self, cluster_configuration_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Start upgrading the configuration of a Service Fabric standalone
cluster.
Validate the supplied configuration upgrade parameters and start
upgrading the cluster configuration if the parameters are valid.
:param cluster_configuration_upgrade_description: Parameters for a
standalone cluster configuration upgrade.
:type cluster_configuration_upgrade_description:
~azure.servicefabric.models.ClusterConfigurationUpgradeDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/StartClusterConfigurationUpgrade'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(cluster_configuration_upgrade_description, 'ClusterConfigurationUpgradeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def update_cluster_upgrade(
self, update_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Update the upgrade parameters of a Service Fabric cluster upgrade.
Update the upgrade parameters used during a Service Fabric cluster
upgrade.
:param update_cluster_upgrade_description: Parameters for updating a
cluster upgrade.
:type update_cluster_upgrade_description:
~azure.servicefabric.models.UpdateClusterUpgradeDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/UpdateUpgrade'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(update_cluster_upgrade_description, 'UpdateClusterUpgradeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_aad_metadata(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the Azure Active Directory metadata used for secured connection to
cluster.
Gets the Azure Active Directory metadata used for secured connection to
cluster.
This API is not supposed to be called separately. It provides
information needed to set up an Azure Active Directory secured
connection with a Service Fabric cluster.
.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AadMetadataObject or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.AadMetadataObject or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetAadMetadata'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AadMetadataObject', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_node_info_list(
self, continuation_token=None, node_status_filter="default", timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of nodes in the Service Fabric cluster.
Gets the list of nodes in the Service Fabric cluster. The response
include the name, status, id, health, uptime and other details about
the node.
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param node_status_filter: Allows filtering the nodes based on the
NodeStatus. Only the nodes that are matching the specified filter
value will be returned. The filter value can be one of the following.
Possible values include: 'default', 'all', 'up', 'down', 'enabling',
'disabling', 'disabled', 'unknown', 'removed'
:type node_status_filter: str or
~azure.servicefabric.models.NodeStatusFilterOptionalQueryParam
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedNodeInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedNodeInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if node_status_filter is not None:
query_parameters['NodeStatusFilter'] = self._serialize.query("node_status_filter", node_status_filter, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedNodeInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_node_info(
self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about a specific node in the Service Fabric
cluster.
Gets the information about a specific node in the Service Fabric
Cluster.The response include the name, status, id, health, uptime and
other details about the node.
:param node_name: The name of the node.
:type node_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NodeInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.NodeInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NodeInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_node_health(
self, node_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric node.
Gets the health of a Service Fabric node. Use EventsHealthStateFilter
to filter the collection of health events reported on the node based on
the health state. If the node that you specify by name does not exist
in the health store, this returns an error.
:param node_name: The name of the node.
:type node_name: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NodeHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.NodeHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NodeHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_node_health_using_policy(
self, node_name, events_health_state_filter=0, cluster_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric node, by using the specified health
policy.
Gets the health of a Service Fabric node. Use EventsHealthStateFilter
to filter the collection of health events reported on the node based on
the health state. Use ClusterHealthPolicy in the POST body to override
the health policies used to evaluate the health. If the node that you
specify by name does not exist in the health store, this returns an
error.
:param node_name: The name of the node.
:type node_name: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param cluster_health_policy: Describes the health policies used to
evaluate the health of a cluster or node. If not present, the health
evaluation uses the health policy from cluster manifest or the default
health policy.
:type cluster_health_policy:
~azure.servicefabric.models.ClusterHealthPolicy
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NodeHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.NodeHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if cluster_health_policy is not None:
body_content = self._serialize.body(cluster_health_policy, 'ClusterHealthPolicy')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NodeHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_node_health(
self, node_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric node.
Reports health state of the specified Service Fabric node. The report
must contain the information about the source of the health report and
property on which it is reported.
The report is sent to a Service Fabric gateway node, which forwards to
the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, run
GetNodeHealth and check that the report appears in the HealthEvents
section.
.
:param node_name: The name of the node.
:type node_name: str
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/ReportHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_node_load_info(
self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the load information of a Service Fabric node.
Retrieves the load information of a Service Fabric node for all the
metrics that have load or capacity defined.
:param node_name: The name of the node.
:type node_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NodeLoadInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.NodeLoadInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetLoadInformation'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NodeLoadInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def disable_node(
self, node_name, timeout=60, deactivation_intent=None, custom_headers=None, raw=False, **operation_config):
"""Deactivate a Service Fabric cluster node with the specified
deactivation intent.
Deactivate a Service Fabric cluster node with the specified
deactivation intent. Once the deactivation is in progress, the
deactivation intent can be increased, but not decreased (for example, a
node which is was deactivated with the Pause intent can be deactivated
further with Restart, but not the other way around. Nodes may be
reactivated using the Activate a node operation any time after they are
deactivated. If the deactivation is not complete this will cancel the
deactivation. A node which goes down and comes back up while
deactivated will still need to be reactivated before services will be
placed on that node.
:param node_name: The name of the node.
:type node_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param deactivation_intent: Describes the intent or reason for
deactivating the node. The possible values are following.
. Possible values include: 'Pause', 'Restart', 'RemoveData'
:type deactivation_intent: str or
~azure.servicefabric.models.DeactivationIntent
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
deactivation_intent_description = models.DeactivationIntentDescription(deactivation_intent=deactivation_intent)
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/Deactivate'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(deactivation_intent_description, 'DeactivationIntentDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enable_node(
self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Activate a Service Fabric cluster node which is currently deactivated.
Activates a Service Fabric cluster node which is currently deactivated.
Once activated, the node will again become a viable target for placing
new replicas, and any deactivated replicas remaining on the node will
be reactivated.
:param node_name: The name of the node.
:type node_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/Activate'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def remove_node_state(
self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Notifies Service Fabric that the persisted state on a node has been
permanently removed or lost.
Notifies Service Fabric that the persisted state on a node has been
permanently removed or lost. This implies that it is not possible to
recover the persisted state of that node. This generally happens if a
hard disk has been wiped clean, or if a hard disk crashes. The node has
to be down for this operation to be successful. This operation lets
Service Fabric know that the replicas on that node no longer exist, and
that Service Fabric should stop waiting for those replicas to come back
up. Do not run this cmdlet if the state on the node has not been
removed and the node can comes back up with its state intact.
:param node_name: The name of the node.
:type node_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/RemoveNodeState'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def restart_node(
self, node_name, node_instance_id="0", timeout=60, create_fabric_dump="False", custom_headers=None, raw=False, **operation_config):
"""Restarts a Service Fabric cluster node.
Restarts a Service Fabric cluster node that is already started.
:param node_name: The name of the node.
:type node_name: str
:param node_instance_id: The instance id of the target node. If
instance id is specified the node is restarted only if it matches with
the current instance of the node. A default value of "0" would match
any instance id. The instance id can be obtained using get node query.
:type node_instance_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param create_fabric_dump: Specify True to create a dump of the fabric
node process. This is case sensitive. Possible values include:
'False', 'True'
:type create_fabric_dump: str or
~azure.servicefabric.models.CreateFabricDump
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
restart_node_description = models.RestartNodeDescription(node_instance_id=node_instance_id, create_fabric_dump=create_fabric_dump)
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/Restart'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(restart_node_description, 'RestartNodeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_application_type_info_list(
self, application_type_definition_kind_filter=0, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of application types in the Service Fabric cluster.
Returns the information about the application types that are
provisioned or in the process of being provisioned in the Service
Fabric cluster. Each version of an application type is returned as one
application type. The response includes the name, version, status and
other details about the application type. This is a paged query,
meaning that if not all of the application types fit in a page, one
page of results is returned as well as a continuation token which can
be used to get the next page. For example, if there are 10 application
types but a page only fits the first 3 application types, or if max
results is set to 3, then 3 is returned. To access the rest of the
results, retrieve subsequent pages by using the returned continuation
token in the next query. An empty continuation token is returned if
there are no subsequent pages.
:param application_type_definition_kind_filter: Used to filter on
ApplicationTypeDefinitionKind which is the mechanism used to define a
Service Fabric application type.
- Default - Default value, which performs the same function as
selecting "All". The value is 0.
- All - Filter that matches input with any
ApplicationTypeDefinitionKind value. The value is 65535.
- ServiceFabricApplicationPackage - Filter that matches input with
ApplicationTypeDefinitionKind value ServiceFabricApplicationPackage.
The value is 1.
- Compose - Filter that matches input with
ApplicationTypeDefinitionKind value Compose. The value is 2.
:type application_type_definition_kind_filter: int
:param exclude_application_parameters: The flag that specifies whether
application parameters will be excluded from the result.
:type exclude_application_parameters: bool
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param max_results: The maximum number of results to be returned as
part of the paged queries. This parameter defines the upper bound on
the number of results returned. The results returned can be less than
the specified maximum results if they do not fit in the message as per
the max message size restrictions defined in the configuration. If
this parameter is zero or not specified, the paged queries includes as
much results as possible that fit in the return message.
:type max_results: long
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedApplicationTypeInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ApplicationTypes'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if application_type_definition_kind_filter is not None:
query_parameters['ApplicationTypeDefinitionKindFilter'] = self._serialize.query("application_type_definition_kind_filter", application_type_definition_kind_filter, 'int')
if exclude_application_parameters is not None:
query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if max_results is not None:
query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedApplicationTypeInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_application_type_info_list_by_name(
self, application_type_name, application_type_version=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of application types in the Service Fabric cluster
matching exactly the specified name.
Returns the information about the application types that are
provisioned or in the process of being provisioned in the Service
Fabric cluster. These results are of application types whose name match
exactly the one specified as the parameter, and which comply with the
given query parameters. All versions of the application type matching
the application type name are returned, with each version returned as
one application type. The response includes the name, version, status
and other details about the application type. This is a paged query,
meaning that if not all of the application types fit in a page, one
page of results is returned as well as a continuation token which can
be used to get the next page. For example, if there are 10 application
types but a page only fits the first 3 application types, or if max
results is set to 3, then 3 is returned. To access the rest of the
results, retrieve subsequent pages by using the returned continuation
token in the next query. An empty continuation token is returned if
there are no subsequent pages.
:param application_type_name: The name of the application type.
:type application_type_name: str
:param application_type_version: The version of the application type.
:type application_type_version: str
:param exclude_application_parameters: The flag that specifies whether
application parameters will be excluded from the result.
:type exclude_application_parameters: bool
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param max_results: The maximum number of results to be returned as
part of the paged queries. This parameter defines the upper bound on
the number of results returned. The results returned can be less than
the specified maximum results if they do not fit in the message as per
the max message size restrictions defined in the configuration. If
this parameter is zero or not specified, the paged queries includes as
much results as possible that fit in the return message.
:type max_results: long
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedApplicationTypeInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ApplicationTypes/{applicationTypeName}'
path_format_arguments = {
'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if application_type_version is not None:
query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str')
if exclude_application_parameters is not None:
query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if max_results is not None:
query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedApplicationTypeInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def provision_application_type(
self, provision_application_type_description_base_required_body_param, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Provisions or registers a Service Fabric application type with the
cluster using the .sfpkg package in the external store or using the
application package in the image store.
Provisions a Service Fabric application type with the cluster. This is
required before any new applications can be instantiated.
The provision operation can be performed either on the application
package specified by the relativePathInImageStore, or by using the URI
of the external .sfpkg.
.
:param
provision_application_type_description_base_required_body_param: The
base type of provision application type description which supports
either image store based provision or external store based provision.
:type provision_application_type_description_base_required_body_param:
~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.1"
# Construct URL
url = '/ApplicationTypes/$/Provision'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(provision_application_type_description_base_required_body_param, 'ProvisionApplicationTypeDescriptionBase')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def unprovision_application_type(
self, application_type_name, application_type_version, timeout=60, async_parameter=None, custom_headers=None, raw=False, **operation_config):
"""Removes or unregisters a Service Fabric application type from the
cluster.
Removes or unregisters a Service Fabric application type from the
cluster. This operation can only be performed if all application
instances of the application type has been deleted. Once the
application type is unregistered, no new application instances can be
created for this particular application type.
:param application_type_name: The name of the application type.
:type application_type_name: str
:param application_type_version: The version of the application type
as defined in the application manifest.
:type application_type_version: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param async_parameter: The flag indicating whether or not unprovision
should occur asynchronously. When set to true, the unprovision
operation returns when the request is accepted by the system, and the
unprovision operation continues without any timeout limit. The default
value is false. However, we recommend to set it to true for large
application packages that were provisioned.
:type async_parameter: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
unprovision_application_type_description_info = models.UnprovisionApplicationTypeDescriptionInfo(application_type_version=application_type_version)
api_version = "6.0"
# Construct URL
url = '/ApplicationTypes/{applicationTypeName}/$/Unprovision'
path_format_arguments = {
'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(unprovision_application_type_description_info, 'UnprovisionApplicationTypeDescriptionInfo')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_service_type_info_list(
self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list containing the information about service types that are
supported by a provisioned application type in a Service Fabric
cluster.
Gets the list containing the information about service types that are
supported by a provisioned application type in a Service Fabric
cluster. The response includes the name of the service type, the name
and version of the service manifest the type is defined in, kind
(stateless or stateless) of the service type and other information
about it.
:param application_type_name: The name of the application type.
:type application_type_name: str
:param application_type_version: The version of the application type.
:type application_type_version: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.ServiceTypeInfo] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'
path_format_arguments = {
'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[ServiceTypeInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_service_manifest(
self, application_type_name, application_type_version, service_manifest_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the manifest describing a service type.
Gets the manifest describing a service type. The response contains the
service manifest XML as a string.
:param application_type_name: The name of the application type.
:type application_type_name: str
:param application_type_version: The version of the application type.
:type application_type_version: str
:param service_manifest_name: The name of a service manifest
registered as part of an application type in a Service Fabric cluster.
:type service_manifest_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServiceTypeManifest or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ServiceTypeManifest or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'
path_format_arguments = {
'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str')
query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceTypeManifest', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_service_type_info_list(
self, node_name, application_id, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list containing the information about service types from the
applications deployed on a node in a Service Fabric cluster.
Gets the list containing the information about service types from the
applications deployed on a node in a Service Fabric cluster. The
response includes the name of the service type, its registration
status, the code package that registered it and activation id of the
service package.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_manifest_name: The name of the service manifest to
filter the list of deployed service type information. If specified,
the response will only contain the information about service types
that are defined in this service manifest.
:type service_manifest_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if service_manifest_name is not None:
query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DeployedServiceTypeInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_service_type_info_by_name(
self, node_name, application_id, service_type_name, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about a specified service type of the application
deployed on a node in a Service Fabric cluster.
Gets the list containing the information about a specific service type
from the applications deployed on a node in a Service Fabric cluster.
The response includes the name of the service type, its registration
status, the code package that registered it and activation id of the
service package. Each entry represents one activation of a service
type, differentiated by the activation id.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_type_name: Specifies the name of a Service Fabric
service type.
:type service_type_name: str
:param service_manifest_name: The name of the service manifest to
filter the list of deployed service type information. If specified,
the response will only contain the information about service types
that are defined in this service manifest.
:type service_manifest_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True),
'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if service_manifest_name is not None:
query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DeployedServiceTypeInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_application(
self, application_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Creates a Service Fabric application.
Creates a Service Fabric application using the specified description.
:param application_description: Description for creating an
application.
:type application_description:
~azure.servicefabric.models.ApplicationDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/$/Create'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(application_description, 'ApplicationDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete_application(
self, application_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing Service Fabric application.
Deletes an existing Service Fabric application. An application must be
created before it can be deleted. Deleting an application will delete
all services that are part of that application. By default Service
Fabric will try to close service replicas in a graceful manner and then
delete the service. However if service is having issues closing the
replica gracefully, the delete operation may take a long time or get
stuck. Use the optional ForceRemove flag to skip the graceful close
sequence and forcefully delete the application and all of the its
services.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param force_remove: Remove a Service Fabric application or service
forcefully without going through the graceful shutdown sequence. This
parameter can be used to forcefully delete an application or service
for which delete is timing out due to issues in the service code that
prevents graceful close of replicas.
:type force_remove: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/Delete'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if force_remove is not None:
query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_application_load_info(
self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets load information about a Service Fabric application.
Returns the load information about the application that was created or
in the process of being created in the Service Fabric cluster and whose
name matches the one specified as the parameter. The response includes
the name, minimum nodes, maximum nodes, the number of nodes the app is
occupying currently, and application load metric information about the
application.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationLoadInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ApplicationLoadInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetLoadInformation'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationLoadInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_application_info_list(
self, application_definition_kind_filter=0, application_type_name=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of applications created in the Service Fabric cluster
that match filters specified as the parameter.
Gets the information about the applications that were created or in the
process of being created in the Service Fabric cluster and match
filters specified as the parameter. The response includes the name,
type, status, parameters and other details about the application. If
the applications do not fit in a page, one page of results is returned
as well as a continuation token which can be used to get the next page.
Filters ApplicationTypeName and ApplicationDefinitionKindFilter cannot
be specified at the same time.
:param application_definition_kind_filter: Used to filter on
ApplicationDefinitionKind which is the mechanism used to define a
Service Fabric application.
- Default - Default value, which performs the same function as
selecting "All". The value is 0.
- All - Filter that matches input with any ApplicationDefinitionKind
value. The value is 65535.
- ServiceFabricApplicationDescription - Filter that matches input with
ApplicationDefinitionKind value ServiceFabricApplicationDescription.
The value is 1.
- Compose - Filter that matches input with ApplicationDefinitionKind
value Compose. The value is 2.
:type application_definition_kind_filter: int
:param application_type_name: The application type name used to filter
the applications to query for. This value should not contain the
application type version.
:type application_type_name: str
:param exclude_application_parameters: The flag that specifies whether
application parameters will be excluded from the result.
:type exclude_application_parameters: bool
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param max_results: The maximum number of results to be returned as
part of the paged queries. This parameter defines the upper bound on
the number of results returned. The results returned can be less than
the specified maximum results if they do not fit in the message as per
the max message size restrictions defined in the configuration. If
this parameter is zero or not specified, the paged queries includes as
much results as possible that fit in the return message.
:type max_results: long
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedApplicationInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedApplicationInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.1"
# Construct URL
url = '/Applications'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if application_definition_kind_filter is not None:
query_parameters['ApplicationDefinitionKindFilter'] = self._serialize.query("application_definition_kind_filter", application_definition_kind_filter, 'int')
if application_type_name is not None:
query_parameters['ApplicationTypeName'] = self._serialize.query("application_type_name", application_type_name, 'str')
if exclude_application_parameters is not None:
query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if max_results is not None:
query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedApplicationInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_application_info(
self, application_id, exclude_application_parameters=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets information about a Service Fabric application.
Returns the information about the application that was created or in
the process of being created in the Service Fabric cluster and whose
name matches the one specified as the parameter. The response includes
the name, type, status, parameters and other details about the
application.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param exclude_application_parameters: The flag that specifies whether
application parameters will be excluded from the result.
:type exclude_application_parameters: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ApplicationInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if exclude_application_parameters is not None:
query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_application_health(
self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of the service fabric application.
Returns the heath state of the service fabric application. The response
reports either Ok, Error or Warning health state. If the entity is not
found in the health store, it will return Error.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param deployed_applications_health_state_filter: Allows filtering of
the deployed applications health state objects returned in the result
of application health query based on their health state.
The possible values for this parameter include integer value of one of
the following health states. Only deployed applications that match the
filter will be returned.
All deployed applications are used to evaluate the aggregated health
state. If not specified, all entries are returned.
The state values are flag based enumeration, so the value could be a
combination of these value obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of deployed
applications with HealthState value of OK (2) and Warning (4) are
returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type deployed_applications_health_state_filter: int
:param services_health_state_filter: Allows filtering of the services
health state objects returned in the result of services health query
based on their health state.
The possible values for this parameter include integer value of one of
the following health states.
Only services that match the filter are returned. All services are
used to evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, if the provided
value is 6 then health state of services with HealthState value of OK
(2) and Warning (4) will be returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type services_health_state_filter: int
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ApplicationHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetHealth'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if deployed_applications_health_state_filter is not None:
query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int')
if services_health_state_filter is not None:
query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_application_health_using_policy(
self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric application using the specified
policy.
Gets the health of a Service Fabric application. Use
EventsHealthStateFilter to filter the collection of health events
reported on the node based on the health state. Use
ClusterHealthPolicies to override the health policies used to evaluate
the health.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param deployed_applications_health_state_filter: Allows filtering of
the deployed applications health state objects returned in the result
of application health query based on their health state.
The possible values for this parameter include integer value of one of
the following health states. Only deployed applications that match the
filter will be returned.
All deployed applications are used to evaluate the aggregated health
state. If not specified, all entries are returned.
The state values are flag based enumeration, so the value could be a
combination of these value obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of deployed
applications with HealthState value of OK (2) and Warning (4) are
returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type deployed_applications_health_state_filter: int
:param services_health_state_filter: Allows filtering of the services
health state objects returned in the result of services health query
based on their health state.
The possible values for this parameter include integer value of one of
the following health states.
Only services that match the filter are returned. All services are
used to evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, if the provided
value is 6 then health state of services with HealthState value of OK
(2) and Warning (4) will be returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type services_health_state_filter: int
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param application_health_policy: Describes the health policies used
to evaluate the health of an application or one of its children.
If not present, the health evaluation uses the health policy from
application manifest or the default health policy.
:type application_health_policy:
~azure.servicefabric.models.ApplicationHealthPolicy
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ApplicationHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetHealth'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if deployed_applications_health_state_filter is not None:
query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int')
if services_health_state_filter is not None:
query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if application_health_policy is not None:
body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_application_health(
self, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric application.
Reports health state of the specified Service Fabric application. The
report must contain the information about the source of the health
report and property on which it is reported.
The report is sent to a Service Fabric gateway Application, which
forwards to the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, get
application health and check that the report appears in the
HealthEvents section.
.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/ReportHealth'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start_application_upgrade(
self, application_id, application_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Starts upgrading an application in the Service Fabric cluster.
Validates the supplied application upgrade parameters and starts
upgrading the application if the parameters are valid.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param application_upgrade_description: Parameters for an application
upgrade.
:type application_upgrade_description:
~azure.servicefabric.models.ApplicationUpgradeDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/Upgrade'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(application_upgrade_description, 'ApplicationUpgradeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_application_upgrade(
self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets details for the latest upgrade performed on this application.
Returns information about the state of the latest application upgrade
along with details to aid debugging application health issues.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationUpgradeProgressInfo or ClientRawResponse if
raw=true
:rtype: ~azure.servicefabric.models.ApplicationUpgradeProgressInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetUpgradeProgress'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationUpgradeProgressInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_application_upgrade(
self, application_id, application_upgrade_update_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Updates an ongoing application upgrade in the Service Fabric cluster.
Updates the parameters of an ongoing application upgrade from the ones
specified at the time of starting the application upgrade. This may be
required to mitigate stuck application upgrades due to incorrect
parameters or issues in the application to make progress.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param application_upgrade_update_description: Parameters for updating
an existing application upgrade.
:type application_upgrade_update_description:
~azure.servicefabric.models.ApplicationUpgradeUpdateDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/UpdateUpgrade'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(application_upgrade_update_description, 'ApplicationUpgradeUpdateDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def resume_application_upgrade(
self, application_id, upgrade_domain_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Resumes upgrading an application in the Service Fabric cluster.
Resumes an unmonitored manual Service Fabric application upgrade.
Service Fabric upgrades one upgrade domain at a time. For unmonitored
manual upgrades, after Service Fabric finishes an upgrade domain, it
waits for you to call this API before proceeding to the next upgrade
domain.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param upgrade_domain_name: The name of the upgrade domain in which to
resume the upgrade.
:type upgrade_domain_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
resume_application_upgrade_description = models.ResumeApplicationUpgradeDescription(upgrade_domain_name=upgrade_domain_name)
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(resume_application_upgrade_description, 'ResumeApplicationUpgradeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def rollback_application_upgrade(
self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Starts rolling back the currently on-going upgrade of an application in
the Service Fabric cluster.
Starts rolling back the current application upgrade to the previous
version. This API can only be used to rollback the current in-progress
upgrade that is rolling forward to new version. If the application is
not currently being upgraded use StartApplicationUpgrade API to upgrade
it to desired version including rolling back to a previous version.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/RollbackUpgrade'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_deployed_application_info_list(
self, node_name, timeout=60, include_health_state=False, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config):
"""Gets the list of applications deployed on a Service Fabric node.
Gets the list of applications deployed on a Service Fabric node. The
results do not include information about deployed system applications
unless explicitly queried for by ID. Results encompass deployed
applications in active, activating, and downloading states. This query
requires that the node name corresponds to a node on the cluster. The
query fails if the provided node name does not point to any active
Service Fabric nodes on the cluster.
.
:param node_name: The name of the node.
:type node_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param include_health_state: Include the health state of an entity.
If this parameter is false or not specified, then the health state
returned is "Unknown".
When set to true, the query goes in parallel to the node and the
health system service before the results are merged.
As a result, the query is more expensive and may take a longer time.
:type include_health_state: bool
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param max_results: The maximum number of results to be returned as
part of the paged queries. This parameter defines the upper bound on
the number of results returned. The results returned can be less than
the specified maximum results if they do not fit in the message as per
the max message size restrictions defined in the configuration. If
this parameter is zero or not specified, the paged queries includes as
much results as possible that fit in the return message.
:type max_results: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedDeployedApplicationInfoList or ClientRawResponse if
raw=true
:rtype: ~azure.servicefabric.models.PagedDeployedApplicationInfoList
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.1"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
if include_health_state is not None:
query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if max_results is not None:
query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedDeployedApplicationInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_application_info(
self, node_name, application_id, timeout=60, include_health_state=False, custom_headers=None, raw=False, **operation_config):
"""Gets the information about an application deployed on a Service Fabric
node.
Gets the information about an application deployed on a Service Fabric
node. This query returns system application information if the
application ID provided is for system application. Results encompass
deployed applications in active, activating, and downloading states.
This query requires that the node name corresponds to a node on the
cluster. The query fails if the provided node name does not point to
any active Service Fabric nodes on the cluster.
.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param include_health_state: Include the health state of an entity.
If this parameter is false or not specified, then the health state
returned is "Unknown".
When set to true, the query goes in parallel to the node and the
health system service before the results are merged.
As a result, the query is more expensive and may take a longer time.
:type include_health_state: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeployedApplicationInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.DeployedApplicationInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.1"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
if include_health_state is not None:
query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeployedApplicationInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_application_health(
self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about health of an application deployed on a
Service Fabric node.
Gets the information about health of an application deployed on a
Service Fabric node. Use EventsHealthStateFilter to optionally filter
for the collection of HealthEvent objects reported on the deployed
application based on health state. Use
DeployedServicePackagesHealthStateFilter to optionally filter for
DeployedServicePackageHealth children based on health state.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param deployed_service_packages_health_state_filter: Allows filtering
of the deployed service package health state objects returned in the
result of deployed application health query based on their health
state.
The possible values for this parameter include integer value of one of
the following health states.
Only deployed service packages that match the filter are returned. All
deployed service packages are used to evaluate the aggregated health
state of the deployed application.
If not specified, all entries are returned.
The state values are flag based enumeration, so the value can be a
combination of these value obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of service
packages with HealthState value of OK (2) and Warning (4) are
returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type deployed_service_packages_health_state_filter: int
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeployedApplicationHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.DeployedApplicationHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if deployed_service_packages_health_state_filter is not None:
query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeployedApplicationHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_application_health_using_policy(
self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about health of an application deployed on a
Service Fabric node. using the specified policy.
Gets the information about health of an application deployed on a
Service Fabric node using the specified policy. Use
EventsHealthStateFilter to optionally filter for the collection of
HealthEvent objects reported on the deployed application based on
health state. Use DeployedServicePackagesHealthStateFilter to
optionally filter for DeployedServicePackageHealth children based on
health state. Use ApplicationHealthPolicy to optionally override the
health policies used to evaluate the health. This API only uses
'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest
of the fields are ignored while evaluating the health of the deployed
application.
.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param deployed_service_packages_health_state_filter: Allows filtering
of the deployed service package health state objects returned in the
result of deployed application health query based on their health
state.
The possible values for this parameter include integer value of one of
the following health states.
Only deployed service packages that match the filter are returned. All
deployed service packages are used to evaluate the aggregated health
state of the deployed application.
If not specified, all entries are returned.
The state values are flag based enumeration, so the value can be a
combination of these value obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of service
packages with HealthState value of OK (2) and Warning (4) are
returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type deployed_service_packages_health_state_filter: int
:param application_health_policy: Describes the health policies used
to evaluate the health of an application or one of its children.
If not present, the health evaluation uses the health policy from
application manifest or the default health policy.
:type application_health_policy:
~azure.servicefabric.models.ApplicationHealthPolicy
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeployedApplicationHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.DeployedApplicationHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if deployed_service_packages_health_state_filter is not None:
query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if application_health_policy is not None:
body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeployedApplicationHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_deployed_application_health(
self, node_name, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric application deployed on a
Service Fabric node.
Reports health state of the application deployed on a Service Fabric
node. The report must contain the information about the source of the
health report and property on which it is reported.
The report is sent to a Service Fabric gateway Service, which forwards
to the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, get deployed
application health and check that the report appears in the
HealthEvents section.
.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_application_manifest(
self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the manifest describing an application type.
Gets the manifest describing an application type. The response contains
the application manifest XML as a string.
:param application_type_name: The name of the application type.
:type application_type_name: str
:param application_type_version: The version of the application type.
:type application_type_version: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationTypeManifest or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ApplicationTypeManifest or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'
path_format_arguments = {
'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationTypeManifest', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_service_info_list(
self, application_id, service_type_name=None, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about all services belonging to the application
specified by the application id.
Returns the information about all services belonging to the application
specified by the application id.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_type_name: The service type name used to filter the
services to query for.
:type service_type_name: str
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedServiceInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedServiceInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetServices'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if service_type_name is not None:
query_parameters['ServiceTypeName'] = self._serialize.query("service_type_name", service_type_name, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedServiceInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_service_info(
self, application_id, service_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about the specific service belonging to a Service
Fabric application.
Returns the information about specified service belonging to the
specified Service Fabric application.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServiceInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ServiceInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetServices/{serviceId}'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True),
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_application_name_info(
self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the name of the Service Fabric application for a service.
The GetApplicationName endpoint returns the name of the application for
the specified service.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationNameInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ApplicationNameInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/GetApplicationName'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationNameInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_service(
self, application_id, service_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Creates the specified Service Fabric service.
This api allows creating a new Service Fabric stateless or stateful
service under a specified Service Fabric application. The description
for creating the service includes partitioning information and optional
properties for placement and load balancing. Some of the properties can
later be modified using `UpdateService` API.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_description: The information necessary to create a
service.
:type service_description:
~azure.servicefabric.models.ServiceDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetServices/$/Create'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(service_description, 'ServiceDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def create_service_from_template(
self, application_id, service_from_template_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Creates a Service Fabric service from the service template.
Creates a Service Fabric service from the service template defined in
the application manifest. A service template contains the properties
that will be same for the service instance of the same type. The API
allows overriding the properties that are usually different for
different services of the same service type.
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_from_template_description: Describes the service that
needs to be created from the template defined in the application
manifest.
:type service_from_template_description:
~azure.servicefabric.models.ServiceFromTemplateDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'
path_format_arguments = {
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(service_from_template_description, 'ServiceFromTemplateDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete_service(
self, service_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing Service Fabric service.
Deletes an existing Service Fabric service. A service must be created
before it can be deleted. By default Service Fabric will try to close
service replicas in a graceful manner and then delete the service.
However if service is having issues closing the replica gracefully, the
delete operation may take a long time or get stuck. Use the optional
ForceRemove flag to skip the graceful close sequence and forcefully
delete the service.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param force_remove: Remove a Service Fabric application or service
forcefully without going through the graceful shutdown sequence. This
parameter can be used to forcefully delete an application or service
for which delete is timing out due to issues in the service code that
prevents graceful close of replicas.
:type force_remove: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/Delete'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if force_remove is not None:
query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def update_service(
self, service_id, service_update_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Updates a Service Fabric service using the specified update
description.
This API allows updating properties of a running Service Fabric
service. The set of properties that can be updated are a subset of the
properties that were specified at the time of creating the service. The
current set of properties can be obtained using `GetServiceDescription`
API. Please note that updating the properties of a running service is
different than upgrading your application using
`StartApplicationUpgrade` API. The upgrade is a long running background
operation that involves moving the application from one version to
another, one upgrade domain at a time, whereas update applies the new
properties immediately to the service.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param service_update_description: The information necessary to update
a service.
:type service_update_description:
~azure.servicefabric.models.ServiceUpdateDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/Update'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(service_update_description, 'ServiceUpdateDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_service_description(
self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the description of an existing Service Fabric service.
Gets the description of an existing Service Fabric service. A service
must be created before its description can be obtained.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServiceDescription or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ServiceDescription or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/GetDescription'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_service_health(
self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of the specified Service Fabric service.
Gets the health information of the specified service.
Use EventsHealthStateFilter to filter the collection of health events
reported on the service based on the health state.
Use PartitionsHealthStateFilter to filter the collection of partitions
returned.
If you specify a service that does not exist in the health store, this
cmdlet returns an error.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param partitions_health_state_filter: Allows filtering of the
partitions health state objects returned in the result of service
health query based on their health state.
The possible values for this parameter include integer value of one of
the following health states.
Only partitions that match the filter are returned. All partitions are
used to evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, if the provided
value is 6 then health state of partitions with HealthState value of
OK (2) and Warning (4) will be returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type partitions_health_state_filter: int
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServiceHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ServiceHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/GetHealth'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if partitions_health_state_filter is not None:
query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_service_health_using_policy(
self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of the specified Service Fabric service, by using the
specified health policy.
Gets the health information of the specified service.
If the application health policy is specified, the health evaluation
uses it to get the aggregated health state.
If the policy is not specified, the health evaluation uses the
application health policy defined in the application manifest, or the
default health policy, if no policy is defined in the manifest.
Use EventsHealthStateFilter to filter the collection of health events
reported on the service based on the health state.
Use PartitionsHealthStateFilter to filter the collection of partitions
returned.
If you specify a service that does not exist in the health store, this
cmdlet returns an error.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param partitions_health_state_filter: Allows filtering of the
partitions health state objects returned in the result of service
health query based on their health state.
The possible values for this parameter include integer value of one of
the following health states.
Only partitions that match the filter are returned. All partitions are
used to evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, if the provided
value is 6 then health state of partitions with HealthState value of
OK (2) and Warning (4) will be returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type partitions_health_state_filter: int
:param application_health_policy: Describes the health policies used
to evaluate the health of an application or one of its children.
If not present, the health evaluation uses the health policy from
application manifest or the default health policy.
:type application_health_policy:
~azure.servicefabric.models.ApplicationHealthPolicy
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServiceHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ServiceHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/GetHealth'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if partitions_health_state_filter is not None:
query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if application_health_policy is not None:
body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_service_health(
self, service_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric service.
Reports health state of the specified Service Fabric service. The
report must contain the information about the source of the health
report and property on which it is reported.
The report is sent to a Service Fabric gateway Service, which forwards
to the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, run
GetServiceHealth and check that the report appears in the HealthEvents
section.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/ReportHealth'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def resolve_service(
self, service_id, partition_key_type=None, partition_key_value=None, previous_rsp_version=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Resolve a Service Fabric partition.
Resolve a Service Fabric service partition, to get the endpoints of the
service replicas.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param partition_key_type: Key type for the partition. This parameter
is required if the partition scheme for the service is Int64Range or
Named. The possible values are following.
- None (1) - Indicates that the the PartitionKeyValue parameter is not
specified. This is valid for the partitions with partitioning scheme
as Singleton. This is the default value. The value is 1.
- Int64Range (2) - Indicates that the the PartitionKeyValue parameter
is an int64 partition key. This is valid for the partitions with
partitioning scheme as Int64Range. The value is 2.
- Named (3) - Indicates that the the PartitionKeyValue parameter is a
name of the partition. This is valid for the partitions with
partitioning scheme as Named. The value is 3.
:type partition_key_type: int
:param partition_key_value: Partition key. This is required if the
partition scheme for the service is Int64Range or Named.
:type partition_key_value: str
:param previous_rsp_version: The value in the Version field of the
response that was received previously. This is required if the user
knows that the result that was got previously is stale.
:type previous_rsp_version: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ResolvedServicePartition or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ResolvedServicePartition or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/ResolvePartition'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if partition_key_type is not None:
query_parameters['PartitionKeyType'] = self._serialize.query("partition_key_type", partition_key_type, 'int')
if partition_key_value is not None:
query_parameters['PartitionKeyValue'] = self._serialize.query("partition_key_value", partition_key_value, 'str', skip_quote=True)
if previous_rsp_version is not None:
query_parameters['PreviousRspVersion'] = self._serialize.query("previous_rsp_version", previous_rsp_version, 'str', skip_quote=True)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResolvedServicePartition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_partition_info_list(
self, service_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of partitions of a Service Fabric service.
Gets the list of partitions of a Service Fabric service. The response
include the partition id, partitioning scheme information, keys
supported by the partition, status, health and other details about the
partition.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedServicePartitionInfoList or ClientRawResponse if
raw=true
:rtype: ~azure.servicefabric.models.PagedServicePartitionInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/{serviceId}/$/GetPartitions'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedServicePartitionInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_partition_info(
self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about a Service Fabric partition.
The Partitions endpoint returns information about the specified
partition. The response include the partition id, partitioning scheme
information, keys supported by the partition, status, health and other
details about the partition.
:param partition_id: The identity of the partition.
:type partition_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServicePartitionInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ServicePartitionInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServicePartitionInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_service_name_info(
self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the name of the Service Fabric service for a partition.
The GetServiceName endpoint returns the name of the service for the
specified partition.
:param partition_id: The identity of the partition.
:type partition_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServiceNameInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ServiceNameInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetServiceName'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceNameInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_partition_health(
self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of the specified Service Fabric partition.
Gets the health information of the specified partition.
Use EventsHealthStateFilter to filter the collection of health events
reported on the service based on the health state.
Use ReplicasHealthStateFilter to filter the collection of
ReplicaHealthState objects on the partition.
If you specify a partition that does not exist in the health store,
this cmdlet returns an error.
.
:param partition_id: The identity of the partition.
:type partition_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param replicas_health_state_filter: Allows filtering the collection
of ReplicaHealthState objects on the partition. The value can be
obtained from members or bitwise operations on members of
HealthStateFilter. Only replicas that match the filter will be
returned. All replicas will be used to evaluate the aggregated health
state. If not specified, all entries will be returned.The state values
are flag based enumeration, so the value could be a combination of
these value obtained using bitwise 'OR' operator. For example, If the
provided value is 6 then all of the events with HealthState value of
OK (2) and Warning (4) will be returned. The possible values for this
parameter include integer value of one of the following health states.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type replicas_health_state_filter: int
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PartitionHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PartitionHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetHealth'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if replicas_health_state_filter is not None:
query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PartitionHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_partition_health_using_policy(
self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of the specified Service Fabric partition, by using the
specified health policy.
Gets the health information of the specified partition.
If the application health policy is specified, the health evaluation
uses it to get the aggregated health state.
If the policy is not specified, the health evaluation uses the
application health policy defined in the application manifest, or the
default health policy, if no policy is defined in the manifest.
Use EventsHealthStateFilter to filter the collection of health events
reported on the partition based on the health state.
Use ReplicasHealthStateFilter to filter the collection of
ReplicaHealthState objects on the partition. Use
ApplicationHealthPolicy in the POST body to override the health
policies used to evaluate the health.
If you specify a partition that does not exist in the health store,
this cmdlet returns an error.
.
:param partition_id: The identity of the partition.
:type partition_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param replicas_health_state_filter: Allows filtering the collection
of ReplicaHealthState objects on the partition. The value can be
obtained from members or bitwise operations on members of
HealthStateFilter. Only replicas that match the filter will be
returned. All replicas will be used to evaluate the aggregated health
state. If not specified, all entries will be returned.The state values
are flag based enumeration, so the value could be a combination of
these value obtained using bitwise 'OR' operator. For example, If the
provided value is 6 then all of the events with HealthState value of
OK (2) and Warning (4) will be returned. The possible values for this
parameter include integer value of one of the following health states.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type replicas_health_state_filter: int
:param application_health_policy: Describes the health policies used
to evaluate the health of an application or one of its children.
If not present, the health evaluation uses the health policy from
application manifest or the default health policy.
:type application_health_policy:
~azure.servicefabric.models.ApplicationHealthPolicy
:param exclude_health_statistics: Indicates whether the health
statistics should be returned as part of the query result. False by
default.
The statistics show the number of children entities in health state
Ok, Warning, and Error.
:type exclude_health_statistics: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PartitionHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PartitionHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetHealth'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if replicas_health_state_filter is not None:
query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int')
if exclude_health_statistics is not None:
query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if application_health_policy is not None:
body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PartitionHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_partition_health(
self, partition_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric partition.
Reports health state of the specified Service Fabric partition. The
report must contain the information about the source of the health
report and property on which it is reported.
The report is sent to a Service Fabric gateway Partition, which
forwards to the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, run
GetPartitionHealth and check that the report appears in the
HealthEvents section.
.
:param partition_id: The identity of the partition.
:type partition_id: str
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/ReportHealth'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_partition_load_information(
self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the load of the specified Service Fabric partition.
Returns information about the specified partition.
The response includes a list of load information.
Each information includes load metric name, value and last reported
time in UTC.
.
:param partition_id: The identity of the partition.
:type partition_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PartitionLoadInformation or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PartitionLoadInformation or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetLoadInformation'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PartitionLoadInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset_partition_load(
self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Resets the current load of a Service Fabric partition.
Resets the current load of a Service Fabric partition to the default
load for the service.
:param partition_id: The identity of the partition.
:type partition_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/ResetLoad'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def recover_partition(
self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Indicates to the Service Fabric cluster that it should attempt to
recover a specific partition which is currently stuck in quorum loss.
Indicates to the Service Fabric cluster that it should attempt to
recover a specific partition which is currently stuck in quorum loss.
This operation should only be performed if it is known that the
replicas that are down cannot be recovered. Incorrect use of this API
can cause potential data loss.
:param partition_id: The identity of the partition.
:type partition_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/Recover'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def recover_service_partitions(
self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Indicates to the Service Fabric cluster that it should attempt to
recover the specified service which is currently stuck in quorum loss.
Indicates to the Service Fabric cluster that it should attempt to
recover the specified service which is currently stuck in quorum loss.
This operation should only be performed if it is known that the
replicas that are down cannot be recovered. Incorrect use of this API
can cause potential data loss.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Services/$/{serviceId}/$/GetPartitions/$/Recover'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def recover_system_partitions(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Indicates to the Service Fabric cluster that it should attempt to
recover the system services which are currently stuck in quorum loss.
Indicates to the Service Fabric cluster that it should attempt to
recover the system services which are currently stuck in quorum loss.
This operation should only be performed if it is known that the
replicas that are down cannot be recovered. Incorrect use of this API
can cause potential data loss.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/RecoverSystemPartitions'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def recover_all_partitions(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Indicates to the Service Fabric cluster that it should attempt to
recover any services (including system services) which are currently
stuck in quorum loss.
Indicates to the Service Fabric cluster that it should attempt to
recover any services (including system services) which are currently
stuck in quorum loss. This operation should only be performed if it is
known that the replicas that are down cannot be recovered. Incorrect
use of this API can cause potential data loss.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/RecoverAllPartitions'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def create_repair_task(
self, repair_task, custom_headers=None, raw=False, **operation_config):
"""Creates a new repair task.
For clusters that have the Repair Manager Service configured,
this API provides a way to create repair tasks that run automatically
or manually.
For repair tasks that run automatically, an appropriate repair executor
must be running for each repair action to run automatically.
These are currently only available in specially-configured Azure Cloud
Services.
To create a manual repair task, provide the set of impacted node names
and the
expected impact. When the state of the created repair task changes to
approved,
you can safely perform repair actions on those nodes.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param repair_task: Describes the repair task to be created or
updated.
:type repair_task: ~azure.servicefabric.models.RepairTask
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RepairTaskUpdateInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/CreateRepairTask'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(repair_task, 'RepairTask')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RepairTaskUpdateInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel_repair_task(
self, repair_task_cancel_description, custom_headers=None, raw=False, **operation_config):
"""Requests the cancellation of the given repair task.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param repair_task_cancel_description: Describes the repair task to be
cancelled.
:type repair_task_cancel_description:
~azure.servicefabric.models.RepairTaskCancelDescription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RepairTaskUpdateInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/CancelRepairTask'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(repair_task_cancel_description, 'RepairTaskCancelDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RepairTaskUpdateInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_repair_task(
self, task_id, version=None, custom_headers=None, raw=False, **operation_config):
"""Deletes a completed repair task.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param task_id: The ID of the completed repair task to be deleted.
:type task_id: str
:param version: The current version number of the repair task. If
non-zero, then the request will only succeed if this value matches the
actual current version of the repair task. If zero, then no version
check is performed.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
repair_task_delete_description = models.RepairTaskDeleteDescription(task_id=task_id, version=version)
api_version = "6.0"
# Construct URL
url = '/$/DeleteRepairTask'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(repair_task_delete_description, 'RepairTaskDeleteDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_repair_task_list(
self, task_id_filter=None, state_filter=None, executor_filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of repair tasks matching the given filters.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param task_id_filter: The repair task ID prefix to be matched.
:type task_id_filter: str
:param state_filter: A bitwise-OR of the following values, specifying
which task states should be included in the result list.
- 1 - Created
- 2 - Claimed
- 4 - Preparing
- 8 - Approved
- 16 - Executing
- 32 - Restoring
- 64 - Completed
:type state_filter: int
:param executor_filter: The name of the repair executor whose claimed
tasks should be included in the list.
:type executor_filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.RepairTask] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetRepairTaskList'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if task_id_filter is not None:
query_parameters['TaskIdFilter'] = self._serialize.query("task_id_filter", task_id_filter, 'str')
if state_filter is not None:
query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int')
if executor_filter is not None:
query_parameters['ExecutorFilter'] = self._serialize.query("executor_filter", executor_filter, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[RepairTask]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def force_approve_repair_task(
self, task_id, version=None, custom_headers=None, raw=False, **operation_config):
"""Forces the approval of the given repair task.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param task_id: The ID of the repair task.
:type task_id: str
:param version: The current version number of the repair task. If
non-zero, then the request will only succeed if this value matches the
actual current version of the repair task. If zero, then no version
check is performed.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RepairTaskUpdateInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
repair_task_approve_description = models.RepairTaskApproveDescription(task_id=task_id, version=version)
api_version = "6.0"
# Construct URL
url = '/$/ForceApproveRepairTask'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(repair_task_approve_description, 'RepairTaskApproveDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RepairTaskUpdateInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_repair_task_health_policy(
self, repair_task_update_health_policy_description, custom_headers=None, raw=False, **operation_config):
"""Updates the health policy of the given repair task.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param repair_task_update_health_policy_description: Describes the
repair task healthy policy to be updated.
:type repair_task_update_health_policy_description:
~azure.servicefabric.models.RepairTaskUpdateHealthPolicyDescription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RepairTaskUpdateInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/UpdateRepairTaskHealthPolicy'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(repair_task_update_health_policy_description, 'RepairTaskUpdateHealthPolicyDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RepairTaskUpdateInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_repair_execution_state(
self, repair_task, custom_headers=None, raw=False, **operation_config):
"""Updates the execution state of a repair task.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param repair_task: Describes the repair task to be created or
updated.
:type repair_task: ~azure.servicefabric.models.RepairTask
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RepairTaskUpdateInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/UpdateRepairExecutionState'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(repair_task, 'RepairTask')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RepairTaskUpdateInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_replica_info_list(
self, partition_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about replicas of a Service Fabric service
partition.
The GetReplicas endpoint returns information about the replicas of the
specified partition. The respons include the id, role, status, health,
node name, uptime, and other details about the replica.
:param partition_id: The identity of the partition.
:type partition_id: str
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedReplicaInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedReplicaInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetReplicas'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedReplicaInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_replica_info(
self, partition_id, replica_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about a replica of a Service Fabric partition.
The response include the id, role, status, health, node name, uptime,
and other details about the replica.
:param partition_id: The identity of the partition.
:type partition_id: str
:param replica_id: The identifier of the replica.
:type replica_id: str
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ReplicaInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ReplicaInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True),
'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicaInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_replica_health(
self, partition_id, replica_id, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric stateful service replica or
stateless service instance.
Gets the health of a Service Fabric replica.
Use EventsHealthStateFilter to filter the collection of health events
reported on the replica based on the health state.
.
:param partition_id: The identity of the partition.
:type partition_id: str
:param replica_id: The identifier of the replica.
:type replica_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ReplicaHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ReplicaHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True),
'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicaHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_replica_health_using_policy(
self, partition_id, replica_id, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric stateful service replica or
stateless service instance using the specified policy.
Gets the health of a Service Fabric stateful service replica or
stateless service instance.
Use EventsHealthStateFilter to filter the collection of health events
reported on the cluster based on the health state.
Use ApplicationHealthPolicy to optionally override the health policies
used to evaluate the health. This API only uses
'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest
of the fields are ignored while evaluating the health of the replica.
.
:param partition_id: The identity of the partition.
:type partition_id: str
:param replica_id: The identifier of the replica.
:type replica_id: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param application_health_policy: Describes the health policies used
to evaluate the health of an application or one of its children.
If not present, the health evaluation uses the health policy from
application manifest or the default health policy.
:type application_health_policy:
~azure.servicefabric.models.ApplicationHealthPolicy
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ReplicaHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ReplicaHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True),
'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if application_health_policy is not None:
body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicaHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_replica_health(
self, partition_id, replica_id, health_information, service_kind="Stateful", immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric replica.
Reports health state of the specified Service Fabric replica. The
report must contain the information about the source of the health
report and property on which it is reported.
The report is sent to a Service Fabric gateway Replica, which forwards
to the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, run
GetReplicaHealth and check that the report appears in the HealthEvents
section.
.
:param partition_id: The identity of the partition.
:type partition_id: str
:param replica_id: The identifier of the replica.
:type replica_id: str
:param service_kind: The kind of service replica (Stateless or
Stateful) for which the health is being reported. Following are the
possible values. Possible values include: 'Stateless', 'Stateful'
:type service_kind: str or
~azure.servicefabric.models.ReplicaHealthReportServiceKindRequiredQueryParam
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True),
'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_deployed_service_replica_info_list(
self, node_name, application_id, partition_id=None, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of replicas deployed on a Service Fabric node.
Gets the list containing the information about replicas deployed on a
Service Fabric node. The information include partition id, replica id,
status of the replica, name of the service, name of the service type
and other information. Use PartitionId or ServiceManifestName query
parameters to return information about the deployed replicas matching
the specified values for those parameters.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param service_manifest_name: The name of a service manifest
registered as part of an application type in a Service Fabric cluster.
:type service_manifest_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.DeployedServiceReplicaInfo]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if partition_id is not None:
query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str')
if service_manifest_name is not None:
query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DeployedServiceReplicaInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_service_replica_detail_info(
self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the details of replica deployed on a Service Fabric node.
Gets the details of the replica deployed on a Service Fabric node. The
information include service kind, service name, current service
operation, current service operation start date time, partition id,
replica/instance id, reported load and other information.
:param node_name: The name of the node.
:type node_name: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param replica_id: The identifier of the replica.
:type replica_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeployedServiceReplicaDetailInfo or ClientRawResponse if
raw=true
:rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True),
'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_service_replica_detail_info_by_partition_id(
self, node_name, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the details of replica deployed on a Service Fabric node.
Gets the details of the replica deployed on a Service Fabric node. The
information include service kind, service name, current service
operation, current service operation start date time, partition id,
replica/instance id, reported load and other information.
:param node_name: The name of the node.
:type node_name: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeployedServiceReplicaDetailInfo or ClientRawResponse if
raw=true
:rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def restart_replica(
self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Restarts a service replica of a persisted service running on a node.
Restarts a service replica of a persisted service running on a node.
Warning - There are no safety checks performed when this API is used.
Incorrect use of this API can lead to availability loss for stateful
services.
:param node_name: The name of the node.
:type node_name: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param replica_id: The identifier of the replica.
:type replica_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True),
'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def remove_replica(
self, node_name, partition_id, replica_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Removes a service replica running on a node.
This API simulates a Service Fabric replica failure by removing a
replica from a Service Fabric cluster. The removal closes the replica,
transitions the replica to the role None, and then removes all of the
state information of the replica from the cluster. This API tests the
replica state removal path, and simulates the report fault permanent
path through client APIs. Warning - There are no safety checks
performed when this API is used. Incorrect use of this API can lead to
data loss for stateful services.In addition, the forceRemove flag
impacts all other replicas hosted in the same process.
:param node_name: The name of the node.
:type node_name: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param replica_id: The identifier of the replica.
:type replica_id: str
:param force_remove: Remove a Service Fabric application or service
forcefully without going through the graceful shutdown sequence. This
parameter can be used to forcefully delete an application or service
for which delete is timing out due to issues in the service code that
prevents graceful close of replicas.
:type force_remove: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True),
'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if force_remove is not None:
query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_deployed_service_package_info_list(
self, node_name, application_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of service packages deployed on a Service Fabric node.
Returns the information about the service packages deployed on a
Service Fabric node for the given application.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DeployedServicePackageInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_service_package_info_list_by_name(
self, node_name, application_id, service_package_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of service packages deployed on a Service Fabric node
matching exactly the specified name.
Returns the information about the service packages deployed on a
Service Fabric node for the given application. These results are of
service packages whose name match exactly the service package name
specified as the parameter.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_package_name: The name of the service package.
:type service_package_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True),
'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 204:
deserialized = self._deserialize('[DeployedServicePackageInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_service_package_health(
self, node_name, application_id, service_package_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about health of an service package for a specific
application deployed for a Service Fabric node and application.
Gets the information about health of service package for a specific
application deployed on a Service Fabric node. Use
EventsHealthStateFilter to optionally filter for the collection of
HealthEvent objects reported on the deployed service package based on
health state.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_package_name: The name of the service package.
:type service_package_name: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeployedServicePackageHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.DeployedServicePackageHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True),
'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeployedServicePackageHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_deployed_service_package_health_using_policy(
self, node_name, application_id, service_package_name, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the information about health of service package for a specific
application deployed on a Service Fabric node using the specified
policy.
Gets the information about health of an service package for a specific
application deployed on a Service Fabric node. using the specified
policy. Use EventsHealthStateFilter to optionally filter for the
collection of HealthEvent objects reported on the deployed service
package based on health state. Use ApplicationHealthPolicy to
optionally override the health policies used to evaluate the health.
This API only uses 'ConsiderWarningAsError' field of the
ApplicationHealthPolicy. The rest of the fields are ignored while
evaluating the health of the deployed service package.
.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_package_name: The name of the service package.
:type service_package_name: str
:param events_health_state_filter: Allows filtering the collection of
HealthEvent objects returned based on health state.
The possible values for this parameter include integer value of one of
the following health states.
Only events that match the filter are returned. All events are used to
evaluate the aggregated health state.
If not specified, all entries are returned. The state values are flag
based enumeration, so the value could be a combination of these value
obtained using bitwise 'OR' operator. For example, If the provided
value is 6 then all of the events with HealthState value of OK (2) and
Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type events_health_state_filter: int
:param application_health_policy: Describes the health policies used
to evaluate the health of an application or one of its children.
If not present, the health evaluation uses the health policy from
application manifest or the default health policy.
:type application_health_policy:
~azure.servicefabric.models.ApplicationHealthPolicy
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeployedServicePackageHealth or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.DeployedServicePackageHealth or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True),
'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if events_health_state_filter is not None:
query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if application_health_policy is not None:
body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeployedServicePackageHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def report_deployed_service_package_health(
self, node_name, application_id, service_package_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Sends a health report on the Service Fabric deployed service package.
Reports health state of the service package of the application deployed
on a Service Fabric node. The report must contain the information about
the source of the health report and property on which it is reported.
The report is sent to a Service Fabric gateway Service, which forwards
to the health store.
The report may be accepted by the gateway, but rejected by the health
store after extra validation.
For example, the health store may reject the report because of an
invalid parameter, like a stale sequence number.
To see whether the report was applied in the health store, get deployed
service package health and check that the report appears in the
HealthEvents section.
.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_package_name: The name of the service package.
:type service_package_name: str
:param health_information: Describes the health information for the
health report. This information needs to be present in all of the
health reports sent to the health manager.
:type health_information:
~azure.servicefabric.models.HealthInformation
:param immediate: A flag which indicates whether the report should be
sent immediately.
A health report is sent to a Service Fabric gateway Application, which
forwards to the health store.
If Immediate is set to true, the report is sent immediately from Http
Gateway to the health store, regardless of the fabric client settings
that the Http Gateway Application is using.
This is useful for critical reports that should be sent as soon as
possible.
Depending on timing and other conditions, sending the report may still
fail, for example if the Http Gateway is closed or the message doesn't
reach the Gateway.
If Immediate is set to false, the report is sent based on the health
client settings from the Http Gateway. Therefore, it will be batched
according to the HealthReportSendInterval configuration.
This is the recommended setting because it allows the health client to
optimize health reporting messages to health store as well as health
report processing.
By default, reports are not sent immediately.
:type immediate: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True),
'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if immediate is not None:
query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(health_information, 'HealthInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def deployed_service_package_to_node(
self, node_name, deploy_service_package_to_node_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Downloads all of the code packagesassociated with specified service
manifest on the specified node.
This API provides a way to download code packages including the
container images on a specific node outside of the normal application
deployment and upgrade path. This is useful for the large code packages
and container iamges to be present on the node before the actual
application deployment and upgrade, thus significantly reducing the
total time required for the deployment or upgrade.
.
:param node_name: The name of the node.
:type node_name: str
:param deploy_service_package_to_node_description: Describes
information for deploying a service package to a Service Fabric node.
:type deploy_service_package_to_node_description:
~azure.servicefabric.models.DeployServicePackageToNodeDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/DeployServicePackage'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(deploy_service_package_to_node_description, 'DeployServicePackageToNodeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_deployed_code_package_info_list(
self, node_name, application_id, service_manifest_name=None, code_package_name=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of code packages deployed on a Service Fabric node.
Gets the list of code packages deployed on a Service Fabric node for
the given application.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_manifest_name: The name of a service manifest
registered as part of an application type in a Service Fabric cluster.
:type service_manifest_name: str
:param code_package_name: The name of code package specified in
service manifest registered as part of an application type in a
Service Fabric cluster.
:type code_package_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.DeployedCodePackageInfo] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if service_manifest_name is not None:
query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str')
if code_package_name is not None:
query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DeployedCodePackageInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def restart_deployed_code_package(
self, node_name, application_id, restart_deployed_code_package_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Restarts a code package deployed on a Service Fabric node in a cluster.
Restarts a code package deployed on a Service Fabric node in a cluster.
This aborts the code package process, which will restart all the user
service replicas hosted in that process.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param restart_deployed_code_package_description: Describes the
deployed code package on Service Fabric node to restart.
:type restart_deployed_code_package_description:
~azure.servicefabric.models.RestartDeployedCodePackageDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(restart_deployed_code_package_description, 'RestartDeployedCodePackageDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_container_logs_deployed_on_node(
self, node_name, application_id, service_manifest_name, code_package_name, tail=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the container logs for container deployed on a Service Fabric
node.
Gets the container logs for container deployed on a Service Fabric node
for the given code package.
:param node_name: The name of the node.
:type node_name: str
:param application_id: The identity of the application. This is
typically the full name of the application without the 'fabric:' URI
scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the application name is "fabric:/myapp/app1", the
application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in
previous versions.
:type application_id: str
:param service_manifest_name: The name of a service manifest
registered as part of an application type in a Service Fabric cluster.
:type service_manifest_name: str
:param code_package_name: The name of code package specified in
service manifest registered as part of an application type in a
Service Fabric cluster.
:type code_package_name: str
:param tail: Number of lines to fetch from tail end.
:type tail: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ContainerLogs or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ContainerLogs or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.1"
# Construct URL
url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str'),
'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str')
query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str')
if tail is not None:
query_parameters['Tail'] = self._serialize.query("tail", tail, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ContainerLogs', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_compose_deployment(
self, create_compose_deployment_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Creates a Service Fabric compose deployment.
Compose is a file format that describes multi-container applications.
This API allows deploying container based applications defined in
compose format in a Service Fabric cluster. Once the deployment is
created it's status can be tracked via `GetComposeDeploymentStatus`
API.
:param create_compose_deployment_description: Describes the compose
deployment that needs to be created.
:type create_compose_deployment_description:
~azure.servicefabric.models.CreateComposeDeploymentDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0-preview"
# Construct URL
url = '/ComposeDeployments/$/Create'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(create_compose_deployment_description, 'CreateComposeDeploymentDescription')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_compose_deployment_status(
self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets information about a Service Fabric compose deployment.
Returns the status of the compose deployment that was created or in the
process of being created in the Service Fabric cluster and whose name
matches the one specified as the parameter. The response includes the
name, status and other details about the deployment.
:param deployment_name: The identity of the deployment.
:type deployment_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ComposeDeploymentStatusInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ComposeDeploymentStatusInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0-preview"
# Construct URL
url = '/ComposeDeployments/{deploymentName}'
path_format_arguments = {
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ComposeDeploymentStatusInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_compose_deployment_status_list(
self, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the list of compose deployments created in the Service Fabric
cluster.
Gets the status about the compose deployments that were created or in
the process of being created in the Service Fabric cluster. The
response includes the name, status and other details about the compose
deployments. If the list of deployments do not fit in a page, one page
of results is returned as well as a continuation token which can be
used to get the next page.
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param max_results: The maximum number of results to be returned as
part of the paged queries. This parameter defines the upper bound on
the number of results returned. The results returned can be less than
the specified maximum results if they do not fit in the message as per
the max message size restrictions defined in the configuration. If
this parameter is zero or not specified, the paged queries includes as
much results as possible that fit in the return message.
:type max_results: long
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedComposeDeploymentStatusInfoList or ClientRawResponse if
raw=true
:rtype:
~azure.servicefabric.models.PagedComposeDeploymentStatusInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0-preview"
# Construct URL
url = '/ComposeDeployments'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if max_results is not None:
query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedComposeDeploymentStatusInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_compose_deployment_upgrade_progress(
self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets details for the latest upgrade performed on this Service Fabric
compose deployment.
Returns the information about the state of the compose deployment
upgrade along with details to aid debugging application health issues.
:param deployment_name: The identity of the deployment.
:type deployment_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ComposeDeploymentUpgradeProgressInfo or ClientRawResponse if
raw=true
:rtype:
~azure.servicefabric.models.ComposeDeploymentUpgradeProgressInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0-preview"
# Construct URL
url = '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'
path_format_arguments = {
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ComposeDeploymentUpgradeProgressInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def remove_compose_deployment(
self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing Service Fabric compose deployment from cluster.
Deletes an existing Service Fabric compose deployment.
:param deployment_name: The identity of the deployment.
:type deployment_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0-preview"
# Construct URL
url = '/ComposeDeployments/{deploymentName}/$/Delete'
path_format_arguments = {
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start_compose_deployment_upgrade(
self, deployment_name, compose_deployment_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Starts upgrading a compose deployment in the Service Fabric cluster.
Validates the supplied upgrade parameters and starts upgrading the
deployment if the parameters are valid.
:param deployment_name: The identity of the deployment.
:type deployment_name: str
:param compose_deployment_upgrade_description: Parameters for
upgrading compose deployment.
:type compose_deployment_upgrade_description:
~azure.servicefabric.models.ComposeDeploymentUpgradeDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0-preview"
# Construct URL
url = '/ComposeDeployments/{deploymentName}/$/Upgrade'
path_format_arguments = {
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(compose_deployment_upgrade_description, 'ComposeDeploymentUpgradeDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start_chaos(
self, chaos_parameters, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Starts Chaos in the cluster.
If Chaos is not already running in the cluster, it starts Chaos with
the passed in Chaos parameters.
If Chaos is already running when this call is made, the call fails with
the error code FABRIC_E_CHAOS_ALREADY_RUNNING.
Please refer to the article [Induce controlled Chaos in Service Fabric
clusters](https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-controlled-chaos)
for more details.
.
:param chaos_parameters: Describes all the parameters to configure a
Chaos run.
:type chaos_parameters: ~azure.servicefabric.models.ChaosParameters
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Tools/Chaos/$/Start'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(chaos_parameters, 'ChaosParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def stop_chaos(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Stops Chaos in the cluster if it is already running, otherwise it does
nothing.
Stops Chaos from scheduling further faults; but, the in-flight faults
are not affected.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Tools/Chaos/$/Stop'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_chaos_report(
self, continuation_token=None, start_time_utc=None, end_time_utc=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the next segment of the Chaos report based on the passed-in
continuation token or the passed-in time-range.
You can either specify the ContinuationToken to get the next segment of
the Chaos report or you can specify the time-range
through StartTimeUtc and EndTimeUtc, but you cannot specify both the
ContinuationToken and the time-range in the same call.
When there are more than 100 Chaos events, the Chaos report is returned
in segments where a segment contains no more than 100 Chaos events.
.
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param start_time_utc: The Windows file time representing the start
time of the time range for which a Chaos report is to be generated.
Please consult [DateTime.ToFileTimeUtc
Method](https://msdn.microsoft.com/en-us/library/system.datetime.tofiletimeutc(v=vs.110).aspx)
for details.
:type start_time_utc: str
:param end_time_utc: The Windows file time representing the end time
of the time range for which a Chaos report is to be generated. Please
consult [DateTime.ToFileTimeUtc
Method](https://msdn.microsoft.com/en-us/library/system.datetime.tofiletimeutc(v=vs.110).aspx)
for details.
:type end_time_utc: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ChaosReport or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ChaosReport or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Tools/Chaos/$/Report'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if start_time_utc is not None:
query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str')
if end_time_utc is not None:
query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ChaosReport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def upload_file(
self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Uploads contents of the file to the image store.
Uploads contents of the file to the image store. Use this API if the
file is small enough to upload again if the connection fails. The
file's data needs to be added to the request body. The contents will be
uploaded to the specified path. Image store service uses a mark file to
indicate the availability of the folder. The mark file is an empty file
named "_.dir". The mark file is generated by the image store service
when all files in a folder are uploaded. When using File-by-File
approach to upload application package in REST, the image store service
isn't aware of the file hierarchy of the application package; you need
to create a mark file per folder and upload it last, to let the image
store service know that the folder is complete.
.
:param content_path: Relative path to file or folder in the image
store from its root.
:type content_path: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/{contentPath}'
path_format_arguments = {
'contentPath': self._serialize.url("content_path", content_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_image_store_content(
self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the image store content information.
Returns the information about the image store content at the specified
contentPath relative to the root of the image store.
:param content_path: Relative path to file or folder in the image
store from its root.
:type content_path: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageStoreContent or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ImageStoreContent or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/{contentPath}'
path_format_arguments = {
'contentPath': self._serialize.url("content_path", content_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageStoreContent', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_image_store_content(
self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Deletes existing image store content.
Deletes existing image store content being found within the given image
store relative path. This can be used to delete uploaded application
packages once they are provisioned.
:param content_path: Relative path to file or folder in the image
store from its root.
:type content_path: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/{contentPath}'
path_format_arguments = {
'contentPath': self._serialize.url("content_path", content_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_image_store_root_content(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the content information at the root of the image store.
Returns the information about the image store content at the root of
the image store.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageStoreContent or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ImageStoreContent or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageStoreContent', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def copy_image_store_content(
self, image_store_copy_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Copies image store content internally.
Copies the image store content from the source image store relative
path to the destination image store relative path.
:param image_store_copy_description: Describes the copy description
for the image store.
:type image_store_copy_description:
~azure.servicefabric.models.ImageStoreCopyDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/$/Copy'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_store_copy_description, 'ImageStoreCopyDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete_image_store_upload_session(
self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Cancels an image store upload session.
The DELETE request will cause the existing upload session to expire and
remove any previously uploaded file chunks.
.
:param session_id: A GUID generated by the user for a file uploading.
It identifies an image store upload session which keeps track of all
file chunks until it is committed.
:type session_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/$/DeleteUploadSession'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def commit_image_store_upload_session(
self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Commit an image store upload session.
When all file chunks have been uploaded, the upload session needs to be
committed explicitly to complete the upload. Image store preserves the
upload session until the expiration time, which is 30 minutes after the
last chunk received.
.
:param session_id: A GUID generated by the user for a file uploading.
It identifies an image store upload session which keeps track of all
file chunks until it is committed.
:type session_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/$/CommitUploadSession'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_image_store_upload_session_by_id(
self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Get the image store upload session by ID.
Gets the image store upload session identified by the given ID. User
can query the upload session at any time during uploading.
.
:param session_id: A GUID generated by the user for a file uploading.
It identifies an image store upload session which keeps track of all
file chunks until it is committed.
:type session_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: UploadSession or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.UploadSession or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/$/GetUploadSession'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UploadSession', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_image_store_upload_session_by_path(
self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Get the image store upload session by relative path.
Gets the image store upload session associated with the given image
store relative path. User can query the upload session at any time
during uploading.
.
:param content_path: Relative path to file or folder in the image
store from its root.
:type content_path: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: UploadSession or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.UploadSession or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/{contentPath}/$/GetUploadSession'
path_format_arguments = {
'contentPath': self._serialize.url("content_path", content_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UploadSession', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def upload_file_chunk(
self, content_path, session_id, content_range, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Uploads a file chunk to the image store relative path.
Uploads a file chunk to the image store with the specified upload
session ID and image store relative path. This API allows user to
resume the file upload operation. user doesn't have to restart the file
upload from scratch whenever there is a network interruption. Use this
option if the file size is large.
To perform a resumable file upload, user need to break the file into
multiple chunks and upload these chunks to the image store one-by-one.
Chunks don't have to be uploaded in order. If the file represented by
the image store relative path already exists, it will be overwritten
when the upload session commits.
.
:param content_path: Relative path to file or folder in the image
store from its root.
:type content_path: str
:param session_id: A GUID generated by the user for a file uploading.
It identifies an image store upload session which keeps track of all
file chunks until it is committed.
:type session_id: str
:param content_range: When uploading file chunks to the image store,
the Content-Range header field need to be configured and sent with a
request. The format should looks like "bytes
{First-Byte-Position}-{Last-Byte-Position}/{File-Length}". For
example, Content-Range:bytes 300-5000/20000 indicates that user is
sending bytes 300 through 5,000 and the total file length is 20,000
bytes.
:type content_range: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/ImageStore/{contentPath}/$/UploadChunk'
path_format_arguments = {
'contentPath': self._serialize.url("content_path", content_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Range'] = self._serialize.header("content_range", content_range, 'str')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def invoke_infrastructure_command(
self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Invokes an administrative command on the given Infrastructure Service
instance.
For clusters that have one or more instances of the Infrastructure
Service configured,
this API provides a way to send infrastructure-specific commands to a
particular
instance of the Infrastructure Service.
Available commands and their corresponding response formats vary
depending upon
the infrastructure on which the cluster is running.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param command: The text of the command to be invoked. The content of
the command is infrastructure-specific.
:type command: str
:param service_id: The identity of the infrastructure service. This is
the full name of the infrastructure service without the 'fabric:' URI
scheme. This parameter required only for the cluster that have more
than one instance of infrastructure service running.
:type service_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/InvokeInfrastructureCommand'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['Command'] = self._serialize.query("command", command, 'str')
if service_id is not None:
query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def invoke_infrastructure_query(
self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Invokes a read-only query on the given infrastructure service instance.
For clusters that have one or more instances of the Infrastructure
Service configured,
this API provides a way to send infrastructure-specific queries to a
particular
instance of the Infrastructure Service.
Available commands and their corresponding response formats vary
depending upon
the infrastructure on which the cluster is running.
This API supports the Service Fabric platform; it is not meant to be
used directly from your code.
.
:param command: The text of the command to be invoked. The content of
the command is infrastructure-specific.
:type command: str
:param service_id: The identity of the infrastructure service. This is
the full name of the infrastructure service without the 'fabric:' URI
scheme. This parameter required only for the cluster that have more
than one instance of infrastructure service running.
:type service_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/InvokeInfrastructureQuery'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['Command'] = self._serialize.query("command", command, 'str')
if service_id is not None:
query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def start_data_loss(
self, service_id, partition_id, operation_id, data_loss_mode, timeout=60, custom_headers=None, raw=False, **operation_config):
"""This API will induce data loss for the specified partition. It will
trigger a call to the OnDataLossAsync API of the partition.
This API will induce data loss for the specified partition. It will
trigger a call to the OnDataLoss API of the partition.
Actual data loss will depend on the specified DataLossMode
PartialDataLoss - Only a quorum of replicas are removed and OnDataLoss
is triggered for the partition but actual data loss depends on the
presence of in-flight replication.
FullDataLoss - All replicas are removed hence all data is lost and
OnDataLoss is triggered.
This API should only be called with a stateful service as the target.
Calling this API with a system service as the target is not advised.
Note: Once this API has been called, it cannot be reversed. Calling
CancelOperation will only stop execution and clean up internal system
state.
It will not restore data if the command has progressed far enough to
cause data loss.
Call the GetDataLossProgress API with the same OperationId to return
information on the operation started with this API.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param data_loss_mode: This enum is passed to the StartDataLoss API to
indicate what type of data loss to induce. Possible values include:
'Invalid', 'PartialDataLoss', 'FullDataLoss'
:type data_loss_mode: str or
~azure.servicefabric.models.DataLossModeRequiredQueryParam
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
query_parameters['DataLossMode'] = self._serialize.query("data_loss_mode", data_loss_mode, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_data_loss_progress(
self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the progress of a partition data loss operation started using the
StartDataLoss API.
Gets the progress of a data loss operation started with StartDataLoss,
using the OperationId.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PartitionDataLossProgress or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PartitionDataLossProgress or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PartitionDataLossProgress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def start_quorum_loss(
self, service_id, partition_id, operation_id, quorum_loss_mode, quorum_loss_duration, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Induces quorum loss for a given stateful service partition.
Induces quorum loss for a given stateful service partition. This API
is useful for a temporary quorum loss situation on your service.
Call the GetQuorumLossProgress API with the same OperationId to return
information on the operation started with this API.
This can only be called on stateful persisted (HasPersistedState==true)
services. Do not use this API on stateless services or stateful
in-memory only services.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param quorum_loss_mode: This enum is passed to the StartQuorumLoss
API to indicate what type of quorum loss to induce. Possible values
include: 'Invalid', 'QuorumReplicas', 'AllReplicas'
:type quorum_loss_mode: str or
~azure.servicefabric.models.QuorumLossModeRequiredQueryParam
:param quorum_loss_duration: The amount of time for which the
partition will be kept in quorum loss. This must be specified in
seconds.
:type quorum_loss_duration: int
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
query_parameters['QuorumLossMode'] = self._serialize.query("quorum_loss_mode", quorum_loss_mode, 'str')
query_parameters['QuorumLossDuration'] = self._serialize.query("quorum_loss_duration", quorum_loss_duration, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_quorum_loss_progress(
self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the progress of a quorum loss operation on a partition started
using the StartQuorumLoss API.
Gets the progress of a quorum loss operation started with
StartQuorumLoss, using the provided OperationId.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PartitionQuorumLossProgress or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PartitionQuorumLossProgress or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PartitionQuorumLossProgress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def start_partition_restart(
self, service_id, partition_id, operation_id, restart_partition_mode, timeout=60, custom_headers=None, raw=False, **operation_config):
"""This API will restart some or all replicas or instances of the
specified partition.
This API is useful for testing failover.
If used to target a stateless service partition, RestartPartitionMode
must be AllReplicasOrInstances.
Call the GetPartitionRestartProgress API using the same OperationId to
get the progress.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param restart_partition_mode: Describe which partitions to restart.
Possible values include: 'Invalid', 'AllReplicasOrInstances',
'OnlyActiveSecondaries'
:type restart_partition_mode: str or
~azure.servicefabric.models.RestartPartitionModeRequiredQueryParam
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
query_parameters['RestartPartitionMode'] = self._serialize.query("restart_partition_mode", restart_partition_mode, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_partition_restart_progress(
self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the progress of a PartitionRestart operation started using
StartPartitionRestart.
Gets the progress of a PartitionRestart started with
StartPartitionRestart using the provided OperationId.
.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param partition_id: The identity of the partition.
:type partition_id: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PartitionRestartProgress or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PartitionRestartProgress or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True),
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PartitionRestartProgress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def start_node_transition(
self, node_name, operation_id, node_transition_type, node_instance_id, stop_duration_in_seconds, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Starts or stops a cluster node.
Starts or stops a cluster node. A cluster node is a process, not the
OS instance itself. To start a node, pass in "Start" for the
NodeTransitionType parameter.
To stop a node, pass in "Stop" for the NodeTransitionType parameter.
This API starts the operation - when the API returns the node may not
have finished transitioning yet.
Call GetNodeTransitionProgress with the same OperationId to get the
progress of the operation.
.
:param node_name: The name of the node.
:type node_name: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param node_transition_type: Indicates the type of transition to
perform. NodeTransitionType.Start will start a stopped node.
NodeTransitionType.Stop will stop a node that is up. Possible values
include: 'Invalid', 'Start', 'Stop'
:type node_transition_type: str or
~azure.servicefabric.models.NodeTransitionTypeRequiredQueryParam
:param node_instance_id: The node instance ID of the target node.
This can be determined through GetNodeInfo API.
:type node_instance_id: str
:param stop_duration_in_seconds: The duration, in seconds, to keep the
node stopped. The minimum value is 600, the maximum is 14400. After
this time expires, the node will automatically come back up.
:type stop_duration_in_seconds: int
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Nodes/{nodeName}/$/StartTransition/'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
query_parameters['NodeTransitionType'] = self._serialize.query("node_transition_type", node_transition_type, 'str')
query_parameters['NodeInstanceId'] = self._serialize.query("node_instance_id", node_instance_id, 'str')
query_parameters['StopDurationInSeconds'] = self._serialize.query("stop_duration_in_seconds", stop_duration_in_seconds, 'int', minimum=0)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_node_transition_progress(
self, node_name, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the progress of an operation started using StartNodeTransition.
Gets the progress of an operation started with StartNodeTransition
using the provided OperationId.
.
:param node_name: The name of the node.
:type node_name: str
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NodeTransitionProgress or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.NodeTransitionProgress or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'
path_format_arguments = {
'nodeName': self._serialize.url("node_name", node_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NodeTransitionProgress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_fault_operation_list(
self, type_filter=65535, state_filter=65535, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets a list of user-induced fault operations filtered by provided
input.
Gets the a list of user-induced fault operations filtered by provided
input.
:param type_filter: Used to filter on OperationType for user-induced
operations.
65535 - select all
1 - select PartitionDataLoss.
2 - select PartitionQuorumLoss.
4 - select PartitionRestart.
8 - select NodeTransition.
:type type_filter: int
:param state_filter: Used to filter on OperationState's for
user-induced operations.
65535 - select All
1 - select Running
2 - select RollingBack
8 - select Completed
16 - select Faulted
32 - select Cancelled
64 - select ForceCancelled
:type state_filter: int
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.servicefabric.models.OperationStatus] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['TypeFilter'] = self._serialize.query("type_filter", type_filter, 'int')
query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[OperationStatus]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel_operation(
self, operation_id, force=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Cancels a user-induced fault operation.
The following is a list of APIs that start fault operations that may be
cancelled using CancelOperation -
- StartDataLoss
- StartQuorumLoss
- StartPartitionRestart
- StartNodeTransition
If force is false, then the specified user-induced operation will be
gracefully stopped and cleaned up. If force is true, the command will
be aborted, and some internal state
may be left behind. Specifying force as true should be used with care.
Calling this API with force set to true is not allowed until this API
has already
been called on the same test command with force set to false first, or
unless the test command already has an OperationState of
OperationState.RollingBack.
Clarification: OperationState.RollingBack means that the system will/is
be cleaning up internal system state caused by executing the command.
It will not restore data if the
test command was to cause data loss. For example, if you call
StartDataLoss then call this API, the system will only clean up
internal state from running the command.
It will not restore the target partition's data, if the command
progressed far enough to cause data loss.
Important note: if this API is invoked with force==true, internal
state may be left behind.
.
:param operation_id: A GUID that identifies a call of this API. This
is passed into the corresponding GetProgress API
:type operation_id: str
:param force: Indicates whether to gracefully rollback and clean up
internal system state modified by executing the user-induced
operation.
:type force: bool
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Faults/$/Cancel'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str')
query_parameters['Force'] = self._serialize.query("force", force, 'bool')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def create_name(
self, name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Creates a Service Fabric name.
Creates the specified Service Fabric name.
:param name: The Service Fabric name, including the 'fabric:' URI
scheme.
:type name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
name_description = models.NameDescription(name=name)
api_version = "6.0"
# Construct URL
url = '/Names/$/Create'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(name_description, 'NameDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_name_exists_info(
self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Returns whether the Service Fabric name exists.
Returns whether the specified Service Fabric name exists.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete_name(
self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Deletes a Service Fabric name.
Deletes the specified Service Fabric name. A name must be created
before it can be deleted. Deleting a name with child properties will
fail.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_sub_name_info_list(
self, name_id, recursive=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Enumerates all the Service Fabric names under a given name.
Enumerates all the Service Fabric names under a given name. If the
subnames do not fit in a page, one page of results is returned as well
as a continuation token which can be used to get the next page.
Querying a name that doesn't exist will fail.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param recursive: Allows specifying that the search performed should
be recursive.
:type recursive: bool
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedSubNameInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedSubNameInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}/$/GetSubNames'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if recursive is not None:
query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedSubNameInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_property_info_list(
self, name_id, include_values=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets information on all Service Fabric properties under a given name.
A Service Fabric name can have one or more named properties that stores
custom information. This operation gets the information about these
properties in a paged list. The information include name, value and
metadata about each of the properties.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param include_values: Allows specifying whether to include the values
of the properties returned. True if values should be returned with the
metadata; False to return only property metadata.
:type include_values: bool
:param continuation_token: The continuation token parameter is used to
obtain next set of results. A continuation token with a non empty
value is included in the response of the API when the results from the
system do not fit in a single response. When this value is passed to
the next API call, the API returns next set of results. If there are
no further results then the continuation token does not contain a
value. The value of this parameter should not be URL encoded.
:type continuation_token: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PagedPropertyInfoList or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PagedPropertyInfoList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}/$/GetProperties'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if include_values is not None:
query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool')
if continuation_token is not None:
query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PagedPropertyInfoList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_property(
self, name_id, property_description, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a Service Fabric property.
Creates or updates the specified Service Fabric property under a given
name.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param property_description: Describes the Service Fabric property to
be created.
:type property_description:
~azure.servicefabric.models.PropertyDescription
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}/$/GetProperty'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(property_description, 'PropertyDescription')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_property_info(
self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Service Fabric property.
Gets the specified Service Fabric property under a given name. This
will always return both value and metadata.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param property_name: Specifies the name of the property to get.
:type property_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PropertyInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PropertyInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}/$/GetProperty'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PropertyInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_property(
self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified Service Fabric property.
Deletes the specified Service Fabric property under a given name. A
property must be created before it can be deleted.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param property_name: Specifies the name of the property to get.
:type property_name: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}/$/GetProperty'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def submit_property_batch(
self, name_id, timeout=60, operations=None, custom_headers=None, raw=False, **operation_config):
"""Submits a property batch.
Submits a batch of property operations. Either all or none of the
operations will be committed.
:param name_id: The Service Fabric name, without the 'fabric:' URI
scheme.
:type name_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param operations: A list of the property batch operations to be
executed.
:type operations:
list[~azure.servicefabric.models.PropertyBatchOperation]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PropertyBatchInfo or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.PropertyBatchInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
property_batch_description_list = models.PropertyBatchDescriptionList(operations=operations)
api_version = "6.0"
# Construct URL
url = '/Names/{nameId}/$/GetProperties/$/SubmitBatch'
path_format_arguments = {
'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(property_batch_description_list, 'PropertyBatchDescriptionList')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 409]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SuccessfulPropertyBatchInfo', response)
if response.status_code == 409:
deserialized = self._deserialize('FailedPropertyBatchInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
lmazuel/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/service_fabric_client_ap_is.py
|
Python
|
mit
| 582,969
| 0.001683
|
#!/usr/bin/env python
class PLUGIN_test_test2:
def __init__(self, screensurf, keylist, vartree):
self.screensurf=screensurf
self.keylist=keylist
#best practice to init keyid variables during init, and default them to "0" (the null keyid)
self.keyid="0"
def fork(self, tagobj):
return
#core object. should either return None, or pygame Rect.
#if Rect is returned, the system will attempt to parse the standard
#"act" component, and associated related attributes...
#you may also want to use the provided click events in place of the standard act component.
#if you want hoverkey to be active, you MUST return a Rect!
#onkey/offkey masking is honored by the system regardless.
def core(self, tagobj):
if tagobj.tag=="test2":
self.xpos=int(tagobj.attrib.get("x"))
self.ypos=int(tagobj.attrib.get("y"))
#note: these core object tests are in blue
self.testrect=pygame.Rect(self.xpos, self.ypos, 60, 20)
pygame.draw.rect(self.screensurf, (0, 127, 255), self.testrect)
return self.testrect
#called every loop.
def pump(self):
return
#called on pygame mousebuttondown events
def click(self, event):
return
#called on pygame mousebuttonup events
def clickup(self, event):
return
#called upon page load
def pageclear(self):
return
#pause & resume can be useful for various things. such as properly extending timers. for that, its reccomended using the calculated seconds.
def pause(self, time):
print("plugin test2.dzup.py receved pause call.")
print(time)
#seconds referrs to a calculated seconds paused as a float.
def resume(self, seconds):
print("plugin test2.dzup.py receved resume call.")
print(seconds)
def keyup(self, event):
print("plugin test2.dzup.py receved KEYUP")
def keydown(self, event):
print("plugin test2.dzup.py receved KEYDOWN")
plugname="test plugin2"
plugclass=PLUGIN_test_test2
plugpath=None
|
ThomasTheSpaceFox/Desutezeoid
|
plugins/test2.dzup.py
|
Python
|
gpl-3.0
| 1,889
| 0.044997
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from app import create_app, celery
app = create_app()
|
taogeT/flask-celery
|
example/celery_run.py
|
Python
|
bsd-2-clause
| 101
| 0
|
import collections
g=open("depth_29.txt","w")
with open('depth_28.txt') as infile:
counts = collections.Counter(l.strip() for l in infile)
for line, count in counts.most_common():
g.write(str(line))
#g.write(str(count))
g.write("\n")
|
join2saurav/Lexical-syntax-semantic-analysis-of-Hindi-text-
|
test10.py
|
Python
|
apache-2.0
| 256
| 0.019531
|
from distutils.core import setup
setup(
name='dkcoverage',
version='0.0.0',
packages=[''],
url='https://github.com/thebjorn/dkcoverage',
license='GPL v2',
author='bjorn',
author_email='bp@datakortet.no',
description='Run tests and compute coverage.'
)
|
thebjorn/dkcoverage
|
setup.py
|
Python
|
gpl-2.0
| 285
| 0
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_shear_mite_broodling.iff"
result.attribute_template_id = 9
result.stfName("monster_name","shear_mite")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_shear_mite_broodling.py
|
Python
|
mit
| 444
| 0.047297
|
#!/usr/bin/env python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys, os, re
import scsiutil, util
import xml.dom.minidom
import xs_errors, time
import glob
DEVPATH='/dev/disk/by-id'
DMDEVPATH='/dev/mapper'
SYSFS_PATH1='/sys/class/scsi_host'
SYSFS_PATH2='/sys/class/scsi_disk'
SYSFS_PATH3='/sys/class/fc_transport'
DRIVER_BLACKLIST = ['^(s|p|)ata_.*', '^ahci$', '^pdc_adma$', '^iscsi_tcp$']
INVALID_DEVICE_NAME = ''
def getManufacturer(s):
(rc,stdout,stderr) = util.doexec(['/sbin/modinfo', '-d', s])
if stdout:
return stdout.strip()
else:
return "Unknown"
def update_devs_dict(devs, dev, entry):
if dev != INVALID_DEVICE_NAME:
devs[dev] = entry
def adapters(filterstr="any"):
dict = {}
devs = {}
adt = {}
for a in os.listdir(SYSFS_PATH1):
proc = match_hbadevs(a, filterstr)
if not proc:
continue
adt[a] = proc
id = a.replace("host","")
scsiutil.rescan([id])
emulex = False
paths = []
if proc == "lpfc":
emulex = True
paths.append(SYSFS_PATH3)
else:
for p in [os.path.join(SYSFS_PATH1,a,"device","session*"),os.path.join(SYSFS_PATH1,a,"device"),\
os.path.join(SYSFS_PATH2,"%s:*"%id)]:
paths += glob.glob(p)
if not len(paths):
continue
for path in paths:
for i in filter(match_targets,os.listdir(path)):
tgt = i.replace('target','')
if emulex:
sysfs = os.path.join(SYSFS_PATH3,i,"device")
else:
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
if emulex:
dir = os.path.join(sysfs,lun)
else:
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
# for new qlogic sysfs layout (rport under device, then target)
for i in filter(match_rport,os.listdir(path)):
newpath = os.path.join(path, i)
for j in filter(match_targets,os.listdir(newpath)):
tgt = j.replace('target','')
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
# for new mptsas sysfs entries, check for phy* node
for i in filter(match_phy,os.listdir(path)):
(target,lunid) = i.replace('phy-','').split(':')
tgt = "%s:0:0:%s" % (target,lunid)
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
if path.startswith(SYSFS_PATH2):
os.path.join(path,"device","block:*")
dev = _extract_dev_name(os.path.join(path, 'device'))
if devs.has_key(dev):
continue
hbtl = os.path.basename(path)
(h,b,t,l) = hbtl.split(':')
entry = {'procname':proc, 'host':id, 'target':l}
update_devs_dict(devs, dev, entry)
dict['devs'] = devs
dict['adt'] = adt
return dict
def _get_driver_name(scsihost):
driver_name = 'Unknown'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'fnic_state')):
driver_name = 'fnic'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'lpfc_fcp_class')):
driver_name = 'lpfc'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, '84xx_fw_version')):
driver_name = 'qla2xxx'
if 'Unknown' == driver_name:
namepath = os.path.join(SYSFS_PATH1, scsihost, 'driver_name')
if not os.path.exists(namepath):
namepath = os.path.join(SYSFS_PATH1, scsihost, 'proc_name')
if os.path.exists(namepath):
try:
f = open(namepath, 'r')
line = f.readline()[:-1]
f.close()
if not line in ['<NULL>', '(NULL)', '']:
driver_name = line
except IOError:
pass
if 'Unknown' == driver_name:
ueventpath = os.path.join(SYSFS_PATH1, scsihost, 'uevent')
if os.path.exists(ueventpath):
try:
f = open(ueventpath, 'r')
for line in f:
if line.startswith('PHYSDEVDRIVER='):
driver_name = line.replace('PHYSDEVDRIVER=','').strip()
f.close()
except IOError:
pass
return driver_name
def _parseHostId(str):
id = str.split()
val = "%s:%s:%s" % (id[1],id[3],id[5])
return val.replace(',','')
def _genMPPHBA(id):
devs = scsiutil.cacheSCSIidentifiers()
mppdict = {}
for dev in devs:
item = devs[dev]
if item[1] == id:
arr = scsiutil._genArrayIdentifier(dev)
if not len(arr):
continue
try:
cmd = ['/usr/sbin/mppUtil', '-a']
for line in util.doexec(cmd)[1].split('\n'):
if line.find(arr) != -1:
rec = line.split()[0]
cmd2 = ['/usr/sbin/mppUtil', '-g',rec]
li = []
for newline in util.doexec(cmd2)[1].split('\n'):
if newline.find('hostId') != -1:
li.append(_parseHostId(newline))
mppdict[dev.split('/')[-1]] = li
except:
continue
return mppdict
def match_hbadevs(s, filterstr):
driver_name = _get_driver_name(s)
if match_host(s) and not match_blacklist(driver_name) \
and ( filterstr == "any" or match_filterstr(filterstr, driver_name) ):
return driver_name
else:
return ""
def match_blacklist(driver_name):
return re.search("(" + ")|(".join(DRIVER_BLACKLIST) + ")", driver_name)
def match_filterstr(filterstr, driver_name):
return re.search("^%s" % filterstr, driver_name)
def match_host(s):
return re.search("^host[0-9]", s)
def match_rport(s):
regex = re.compile("^rport-*")
return regex.search(s, 0)
def match_targets(s):
regex = re.compile("^target[0-9]")
return regex.search(s, 0)
def match_phy(s):
regex = re.compile("^phy-*")
return regex.search(s, 0)
def match_LUNs(s, prefix):
regex = re.compile("^%s" % prefix)
return regex.search(s, 0)
def match_dev(s):
regex = re.compile("^block:")
return regex.search(s, 0)
def _extract_dev_name(device_dir):
"""Returns the name of the block device from sysfs e.g. 'sda'"""
kernel_version = os.uname()[2]
if kernel_version.startswith('2.6'):
# sub-directory of form block:sdx/
dev = filter(match_dev, os.listdir(device_dir))[0]
# remove 'block:' from entry and return
return dev.lstrip('block:')
elif kernel_version.startswith('3.'):
# directory for device name lives inside block directory e.g. block/sdx
return _get_block_device_name_with_kernel_3x(device_dir)
else:
msg = 'Kernel version detected: %s' % kernel_version
raise xs_errors.XenError('UnsupportedKernel', msg)
def _get_block_device_name_with_kernel_3x(device_dir):
devs = glob.glob(os.path.join(device_dir, 'block/*'))
if len(devs):
# prune path to extract the device name
return os.path.basename(devs[0])
else:
return INVALID_DEVICE_NAME
def _extract_dev(device_dir, procname, host, target):
"""Returns device name and creates dictionary entry for it"""
dev = _extract_dev_name(device_dir)
entry = {}
entry['procname'] = procname
entry['host'] = host
entry['target'] = target
return (dev, entry)
def _add_host_parameters_to_adapter(dom, adapter, host_class, host_id,
parameters):
"""Adds additional information about the adapter to the the adapter node"""
host_path = os.path.join('/sys/class/', host_class, 'host%s' % (host_id))
if os.path.exists(host_path):
host_entry = dom.createElement(host_class)
adapter.appendChild(host_entry)
for parameter in parameters:
try:
filehandle = open(os.path.join(host_path, parameter))
parameter_value = filehandle.read(512).strip()
filehandle.close()
if parameter_value:
entry = dom.createElement(parameter)
host_entry.appendChild(entry)
text_node = dom.createTextNode(parameter_value)
entry.appendChild(text_node)
except IOError:
pass
def scan(srobj):
systemrootID = util.getrootdevID()
hbadict = srobj.hbadict
hbas = srobj.hbas
dom = xml.dom.minidom.Document()
e = dom.createElement("Devlist")
dom.appendChild(e)
if not os.path.exists(DEVPATH):
return dom.toprettyxml()
devs = srobj.devs
vdis = {}
for key in hbadict:
hba = hbadict[key]
path = os.path.join("/dev",key)
realpath = path
obj = srobj.vdi("")
try:
obj._query(realpath, devs[realpath][4])
except:
continue
# Test for root dev or existing PBD
if len(obj.SCSIid) and len(systemrootID) and util.match_scsiID(obj.SCSIid, systemrootID):
util.SMlog("Ignoring root device %s" % realpath)
continue
elif util.test_SCSIid(srobj.session, None, obj.SCSIid):
util.SMlog("SCSIid in use, ignoring (%s)" % obj.SCSIid)
continue
elif not devs.has_key(realpath):
continue
ids = devs[realpath]
obj.adapter = ids[1]
obj.channel = ids[2]
obj.id = ids[3]
obj.lun = ids[4]
obj.hba = hba['procname']
obj.numpaths = 1
if vdis.has_key(obj.SCSIid):
vdis[obj.SCSIid].numpaths += 1
vdis[obj.SCSIid].path += " [%s]" % key
elif obj.hba == 'mpp':
mppdict = _genMPPHBA(obj.adapter)
if mppdict.has_key(key):
item = mppdict[key]
adapters = ''
for i in item:
if len(adapters):
adapters += ', '
obj.numpaths += 1
adapters += i
if len(adapters):
obj.mpp = adapters
vdis[obj.SCSIid] = obj
else:
vdis[obj.SCSIid] = obj
for key in vdis:
obj = vdis[key]
d = dom.createElement("BlockDevice")
e.appendChild(d)
for attr in ['path','numpaths','SCSIid','vendor','serial','size','adapter','channel','id','lun','hba','mpp']:
try:
aval = getattr(obj, attr)
except AttributeError:
if attr in ['mpp']:
continue
raise xs_errors.XenError('InvalidArg', \
opterr='Missing required field [%s]' % attr)
entry = dom.createElement(attr)
d.appendChild(entry)
textnode = dom.createTextNode(str(aval))
entry.appendChild(textnode)
for key in hbas.iterkeys():
a = dom.createElement("Adapter")
e.appendChild(a)
entry = dom.createElement('host')
a.appendChild(entry)
textnode = dom.createTextNode(key)
entry.appendChild(textnode)
entry = dom.createElement('name')
a.appendChild(entry)
textnode = dom.createTextNode(hbas[key])
entry.appendChild(textnode)
entry = dom.createElement('manufacturer')
a.appendChild(entry)
textnode = dom.createTextNode(getManufacturer(hbas[key]))
entry.appendChild(textnode)
id = key.replace("host","")
entry = dom.createElement('id')
a.appendChild(entry)
textnode = dom.createTextNode(id)
entry.appendChild(textnode)
_add_host_parameters_to_adapter(dom, a, 'fc_host', id,
['node_name', 'port_name',
'port_state', 'speed',
'supported_speeds'])
_add_host_parameters_to_adapter(dom, a, 'iscsi_host', id,
['hwaddress', 'initiatorname',
'ipaddress', 'port_speed',
'port_state'])
return dom.toprettyxml()
def check_iscsi(adapter):
ret = False
str = "host%s" % adapter
try:
filename = os.path.join('/sys/class/scsi_host',str,'proc_name')
f = open(filename, 'r')
if f.readline().find("iscsi_tcp") != -1:
ret = True
except:
pass
return ret
def match_nonpartitions(s):
regex = re.compile("-part[0-9]")
if not regex.search(s, 0):
return True
|
robertbreker/sm
|
drivers/devscan.py
|
Python
|
lgpl-2.1
| 14,406
| 0.00833
|
# test rasl inner loop on simulated data
#
# pylint:disable=import-error
from __future__ import division, print_function
import numpy as np
from rasl.inner import inner_ialm
from rasl import (warp_image_gradient, EuclideanTransform,
SimilarityTransform, AffineTransform, ProjectiveTransform)
def setup_function(_):
np.random.seed(0)
np.set_printoptions(threshold=np.inf,
formatter={'float_kind':lambda x: "%.3f" % x})
def gauss_image(h=60, v=60):
"""a gaussian image as described in RASL and RPCA papers"""
return np.random.normal(0, 1.0, (h, v))
def image_noise(likeimg, p=0.1):
"""sparse noise as described in RASL and RPCA papers"""
sgn = np.random.choice((-1.0, 1.0), size=likeimg.shape)
return sgn * np.random.binomial(1, p, size=likeimg.shape)
def inner_aligned(Ttype, inset=10):
"""don't mess (much) with a stack of aligned images"""
N = 40
image0 = gauss_image()
insetT = Ttype().inset(image0.shape, inset)
Image = [image0 for _ in range(N)]
TI, J = zip(*[warp_image_gradient(insetT, image, normalize=True)
for image in Image])
_, _, dParamv = inner_ialm(TI, J, tol=1e-4)
# for this test, verify that all images have same dParamv
# (inner insists on stepping dParamv a small amount when all images
# are aligned, so image comparisons are no good)
assert np.allclose(dParamv, dParamv[0], atol=1e-3)
def test_inner_aligned_similarity():
inner_aligned(SimilarityTransform)
def test_inner_aligned_euclidean():
inner_aligned(EuclideanTransform)
def test_inner_aligned_affine():
inner_aligned(AffineTransform)
def test_inner_aligned_projective():
inner_aligned(ProjectiveTransform)
def inner_jittered(T, inset=10, rtol=1e-3, atol=0):
"""move a stack of jittered noisy images in the direction of aligned"""
image0 = gauss_image()
Image = [image0 + image_noise(image0, p=.05) for _ in T]
T = [tform.inset(image0.shape, inset) for tform in T]
TImage, J = zip(*[warp_image_gradient(tform, image, normalize=True)
for tform, image in zip(T, Image)])
_, _, dParamv = inner_ialm(TImage, J, tol=1e-4)
# does dParamv move towards alignment? check if stdev of
# parameters decreased.
before = np.array([t.paramv for t in T])
beforeStd = np.std(before, 0)
after = np.array([t.paramv + dparamv
for t, dparamv in zip(T, dParamv)])
afterStd = np.std(after, 0)
assert np.all(np.logical_or(afterStd < beforeStd,
np.isclose(after, before, rtol=rtol, atol=atol)))
def test_inner_jittered_euclidean():
N = 40
dtheta, dx, dy= .05, 1, 1
Jitters = [[(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dy]
for _ in range(N)]
inner_jittered([EuclideanTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_similarity():
N = 40
ds, dtheta, dx, dy= .05, .05, 1, 1
Jitters = [[(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dy]
for _ in range(N)]
inner_jittered([SimilarityTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_affine():
N = 40
ds, dtheta, dx = .05, .05, 1
Jitters = [[(np.random.random() * 2 - 1) * ds + 1.0,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * ds + 1.0,
(np.random.random() * 2 - 1) * dx]
for _ in range(N)]
inner_jittered([AffineTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_projective():
# projective is a pain to test this way. the two projective
# parameters are badly conditioned and change too much in a single
# step. for now, set tolerance to disregard a wobbly step in the
# final two parameters, while assuring we converge the others.
N = 40
ds, dtheta, dx, dh = .05, .05, 1, 0.0005
Jitters = [[(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dh,
(np.random.random() * 2 - 1) * dh]
for _ in range(N)]
inner_jittered([ProjectiveTransform(paramv=jitter) for jitter in Jitters],
atol=.001)
|
welch/rasl
|
tests/inner_test.py
|
Python
|
mit
| 4,843
| 0.00351
|
# -*- coding: utf-8 -*-
"""
Package of failing integer functions.
"""
from metaopt.objective.integer.failing.f import f as f
from metaopt.objective.integer.failing.g import f as g
FUNCTIONS_FAILING = [f, g]
|
cigroup-ol/metaopt
|
metaopt/objective/integer/failing/__init__.py
|
Python
|
bsd-3-clause
| 209
| 0
|
# Copyright 2015 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestFloatingip(helpers.TestCase):
"""Checks that the user is able to allocate/release floatingip."""
def test_floatingip(self):
floatingip_page = \
self.home_pg.go_to_compute_accessandsecurity_floatingipspage()
floating_ip = floatingip_page.allocate_floatingip()
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(floatingip_page.is_floatingip_present(floating_ip))
floatingip_page.release_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(floatingip_page.is_floatingip_present(floating_ip))
class TestFloatingipAssociateDisassociate(helpers.TestCase):
"""Checks that the user is able to Associate/Disassociate floatingip."""
def test_floatingip_associate_disassociate(self):
instance_name = helpers.gen_random_resource_name('instance',
timestamp=False)
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.create_instance(instance_name)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_active(instance_name))
instance_ipv4 = instances_page.get_fixed_ipv4(instance_name)
instance_info = "{} {}".format(instance_name, instance_ipv4)
floatingip_page = \
self.home_pg.go_to_compute_accessandsecurity_floatingipspage()
floating_ip = floatingip_page.allocate_floatingip()
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(floatingip_page.is_floatingip_present(floating_ip))
self.assertEqual('-', floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.associate_floatingip(floating_ip, instance_name,
instance_ipv4)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertEqual(instance_info,
floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.disassociate_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertEqual('-', floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.release_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(floatingip_page.is_floatingip_present(floating_ip))
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.delete_instance(instance_name)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_deleted(instance_name))
|
coreycb/horizon
|
openstack_dashboard/test/integration_tests/tests/test_floatingips.py
|
Python
|
apache-2.0
| 4,543
| 0
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.contrib.auth import logout as django_logout
from restclients_core.exceptions import DataFailureException
from myuw.dao import is_action_disabled
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.emaillink import get_service_url_for_address
from myuw.dao.exceptions import (
EmailServiceUrlException, BlockedNetidErr)
from myuw.dao.gws import in_myuw_test_access_group
from myuw.dao.quicklinks import get_quicklink_data
from myuw.dao.card_display_dates import get_card_visibilty_date_values
from myuw.dao.messages import get_current_messages
from myuw.dao.term import add_term_data_to_context
from myuw.dao.user import get_updated_user, not_existing_user
from myuw.dao.user_pref import get_migration_preference
from myuw.dao.uwnetid import get_email_forwarding_for_current_user
from myuw.logger.timer import Timer
from myuw.logger.logresp import (
log_invalid_netid_response, log_page_view, log_exception)
from myuw.logger.session_log import (
log_session, is_native, log_session_end)
from myuw.util.settings import (
get_google_search_key, get_google_analytics_key, get_django_debug,
get_logout_url, no_access_check)
from myuw.views import prefetch_resources, get_enabled_features
from myuw.views.error import (
unknown_uwnetid, no_access, blocked_uwnetid, pws_error_404)
from django.contrib.auth.decorators import login_required
logger = logging.getLogger(__name__)
def page(request,
template,
context=None,
prefetch=True,
add_quicklink_context=False):
if context is None:
context = {}
timer = Timer()
try:
user = get_updated_user(request)
except DataFailureException as ex:
log_exception(logger, "PWS error", traceback)
if ex.status == 404:
if not_existing_user(request):
return unknown_uwnetid()
return pws_error_404()
return render(request, '500.html', status=500)
try:
if not can_access_myuw(request):
return no_access()
except DataFailureException:
log_exception(logger, "GWS error", traceback)
return render(request, '500.html', status=500)
netid = user.uwnetid
context["user"] = {
"netid": netid,
"isHybrid": is_native(request),
}
if prefetch:
# Some pages need to prefetch before this point
failure = try_prefetch(request, template, context)
if failure:
return failure
try:
affiliations = get_all_affiliations(request)
except BlockedNetidErr:
django_logout(request)
return blocked_uwnetid()
except DataFailureException as err:
log_exception(logger, err, traceback)
return render(request, '500.html', status=500)
user_pref = get_migration_preference(request)
log_session(request)
context["user"]["session_key"] = request.session.session_key
context["home_url"] = "/"
context["err"] = None
context["user"]["affiliations"] = affiliations
banner_messages = []
for message in get_current_messages(request):
banner_messages.append(message.message_body)
context["banner_messages"] = banner_messages
context["display_onboard_message"] = user_pref.display_onboard_message
context["display_pop_up"] = user_pref.display_pop_up
context["disable_actions"] = is_action_disabled()
_add_email_forwarding(request, context)
try:
context["card_display_dates"] = get_card_visibilty_date_values(request)
add_term_data_to_context(request, context)
except DataFailureException:
log_exception(logger, "SWS term data error", traceback)
context['enabled_features'] = get_enabled_features()
context['google_search_key'] = get_google_search_key()
context['google_analytics_key'] = get_google_analytics_key()
context['google_tracking_enabled'] = not get_django_debug()
if add_quicklink_context:
_add_quicklink_context(request, context)
log_page_view(timer, request, template)
return render(request, template, context)
def try_prefetch(request, template, context):
try:
prefetch_resources(request,
prefetch_migration_preference=True,
prefetch_enrollment=True,
prefetch_group=True,
prefetch_instructor=True,
prefetch_sws_person=True)
except DataFailureException:
log_exception(logger, "prefetch error", traceback)
context["webservice_outage"] = True
return render(request, template, context)
return
@login_required
def logout(request):
log_session_end(request)
django_logout(request) # clear the session data
if is_native(request):
return HttpResponse()
# Redirects to authN service logout page
return HttpResponseRedirect(get_logout_url())
def _add_quicklink_context(request, context):
link_data = get_quicklink_data(request)
for key in link_data:
context[key] = link_data[key]
def can_access_myuw(request):
return (no_access_check() or in_myuw_test_access_group(request))
def _add_email_forwarding(request, context):
my_uwemail_forwarding = get_email_forwarding_for_current_user(request)
c_user = context["user"]
if my_uwemail_forwarding and my_uwemail_forwarding.is_active():
try:
c_user['email_forward_url'] = get_service_url_for_address(
my_uwemail_forwarding.fwd)
return
except EmailServiceUrlException:
logger.error('No email url for {}'.format(
my_uwemail_forwarding.fwd))
return # MUWM-4700
c_user['email_forward_url'] = None
c_user['email_error'] = True
|
uw-it-aca/myuw
|
myuw/views/page.py
|
Python
|
apache-2.0
| 6,010
| 0
|
import logging
from stubo.ext.xmlutils import XPathValue
from stubo.ext.xmlexit import XMLManglerExit
log = logging.getLogger(__name__)
elements = dict(year=XPathValue('//dispatchTime/dateTime/year'),
month=XPathValue('//dispatchTime/dateTime/month'),
day=XPathValue('//dispatchTime/dateTime/day'),
hour=XPathValue('//dispatchTime/dateTime/hour'),
minutes=XPathValue('//dispatchTime/dateTime/minutes'),
seconds=XPathValue('//dispatchTime/dateTime/seconds'))
attrs = dict(y=XPathValue('//dispatchTime/date/@year'),
m=XPathValue('//dispatchTime/date/@month'),
d=XPathValue('//dispatchTime/date/@day'))
ignore = XMLManglerExit(elements=elements, attrs=attrs)
def exits(request, context):
return ignore.get_exit(request, context)
|
rusenask/stubo-app
|
stubo/static/cmds/tests/ext/auto_mangle/skip_xml/ignore.py
|
Python
|
gpl-3.0
| 867
| 0.00692
|
from django.conf import settings
from django.db import migrations, models
import mapentity.models
import django.contrib.gis.db.models.fields
import django.db.models.deletion
import geotrek.common.mixins
import geotrek.authent.models
class Migration(migrations.Migration):
dependencies = [
('authent', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comfort',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('comfort', models.CharField(max_length=50, verbose_name='Comfort', db_column='confort')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['comfort'],
'db_table': 'l_b_confort',
'verbose_name': 'Comfort',
'verbose_name_plural': 'Comforts',
},
),
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('network', models.CharField(max_length=50, verbose_name='Network', db_column='reseau')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['network'],
'db_table': 'l_b_reseau',
'verbose_name': 'Network',
'verbose_name_plural': 'Networks',
},
),
migrations.CreateModel(
name='Path',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_insert', models.DateTimeField(auto_now_add=True, verbose_name='Insertion date', db_column='date_insert')),
('date_update', models.DateTimeField(auto_now=True, verbose_name='Update date', db_column='date_update')),
('geom_3d', django.contrib.gis.db.models.fields.GeometryField(dim=3, default=None, editable=False, srid=settings.SRID, null=True, spatial_index=False)),
('length', models.FloatField(db_column='longueur', default=0.0, editable=False, blank=True, null=True, verbose_name='3D Length')),
('ascent', models.IntegerField(db_column='denivelee_positive', default=0, editable=False, blank=True, null=True, verbose_name='Ascent')),
('descent', models.IntegerField(db_column='denivelee_negative', default=0, editable=False, blank=True, null=True, verbose_name='Descent')),
('min_elevation', models.IntegerField(db_column='altitude_minimum', default=0, editable=False, blank=True, null=True, verbose_name='Minimum elevation')),
('max_elevation', models.IntegerField(db_column='altitude_maximum', default=0, editable=False, blank=True, null=True, verbose_name='Maximum elevation')),
('slope', models.FloatField(db_column='pente', default=0.0, editable=False, blank=True, null=True, verbose_name='Slope')),
('geom', django.contrib.gis.db.models.fields.LineStringField(srid=settings.SRID, spatial_index=False)),
('geom_cadastre', django.contrib.gis.db.models.fields.LineStringField(srid=settings.SRID, spatial_index=False, null=True, editable=False)),
('valid', models.BooleanField(default=True, help_text='Approved by manager', verbose_name='Validity', db_column='valide')),
('visible', models.BooleanField(default=True, help_text='Shown in lists and maps', verbose_name='Visible', db_column='visible')),
('name', models.CharField(db_column='nom', max_length=20, blank=True, help_text='Official name', null=True, verbose_name='Name')),
('comments', models.TextField(help_text='Remarks', null=True, verbose_name='Comments', db_column='remarques', blank=True)),
('departure', models.CharField(db_column='depart', default='', max_length=250, blank=True, help_text='Departure place', null=True, verbose_name='Departure')),
('arrival', models.CharField(db_column='arrivee', default='', max_length=250, blank=True, help_text='Arrival place', null=True, verbose_name='Arrival')),
('eid', models.CharField(max_length=128, null=True, verbose_name='External id', db_column='id_externe', blank=True)),
('comfort', models.ForeignKey(related_name='paths', on_delete=django.db.models.deletion.CASCADE, db_column='confort', blank=True, to='core.Comfort', null=True, verbose_name='Comfort')),
('networks', models.ManyToManyField(related_name='paths', db_table='l_r_troncon_reseau', verbose_name='Networks', to='core.Network', blank=True)),
],
options={
'db_table': 'l_t_troncon',
'verbose_name': 'Path',
'verbose_name_plural': 'Paths',
},
bases=(geotrek.common.mixins.AddPropertyMixin, mapentity.models.MapEntityMixin, models.Model),
),
migrations.CreateModel(
name='PathAggregation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_position', models.FloatField(verbose_name='Start position', db_column='pk_debut', db_index=True)),
('end_position', models.FloatField(verbose_name='End position', db_column='pk_fin', db_index=True)),
('order', models.IntegerField(default=0, null=True, verbose_name='Order', db_column='ordre', blank=True)),
('path', models.ForeignKey(related_name='aggregations', on_delete=django.db.models.deletion.DO_NOTHING, db_column='troncon', verbose_name='Path', to='core.Path')),
],
options={
'ordering': ['order'],
'db_table': 'e_r_evenement_troncon',
'verbose_name': 'Path aggregation',
'verbose_name_plural': 'Path aggregations',
},
),
migrations.CreateModel(
name='PathSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source', models.CharField(max_length=50, verbose_name='Source')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['source'],
'db_table': 'l_b_source_troncon',
'verbose_name': 'Path source',
'verbose_name_plural': 'Path sources',
},
),
migrations.CreateModel(
name='Stake',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('stake', models.CharField(max_length=50, verbose_name='Stake', db_column='enjeu')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['id'],
'db_table': 'l_b_enjeu',
'verbose_name': 'Maintenance stake',
'verbose_name_plural': 'Maintenance stakes',
},
),
migrations.CreateModel(
name='Topology',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_insert', models.DateTimeField(auto_now_add=True, verbose_name='Insertion date', db_column='date_insert')),
('date_update', models.DateTimeField(auto_now=True, verbose_name='Update date', db_column='date_update')),
('deleted', models.BooleanField(default=False, verbose_name='Deleted', editable=False, db_column='supprime')),
('geom_3d', django.contrib.gis.db.models.fields.GeometryField(dim=3, default=None, editable=False, srid=settings.SRID, null=True, spatial_index=False)),
('length', models.FloatField(db_column='longueur', default=0.0, editable=False, blank=True, null=True, verbose_name='3D Length')),
('ascent', models.IntegerField(db_column='denivelee_positive', default=0, editable=False, blank=True, null=True, verbose_name='Ascent')),
('descent', models.IntegerField(db_column='denivelee_negative', default=0, editable=False, blank=True, null=True, verbose_name='Descent')),
('min_elevation', models.IntegerField(db_column='altitude_minimum', default=0, editable=False, blank=True, null=True, verbose_name='Minimum elevation')),
('max_elevation', models.IntegerField(db_column='altitude_maximum', default=0, editable=False, blank=True, null=True, verbose_name='Maximum elevation')),
('slope', models.FloatField(db_column='pente', default=0.0, editable=False, blank=True, null=True, verbose_name='Slope')),
('offset', models.FloatField(default=0.0, verbose_name='Offset', db_column='decallage')),
('kind', models.CharField(verbose_name='Kind', max_length=32, editable=False)),
('geom', django.contrib.gis.db.models.fields.GeometryField(default=None, srid=settings.SRID, spatial_index=False, null=True, editable=False)),
],
options={
'db_table': 'e_t_evenement',
'verbose_name': 'Topology',
'verbose_name_plural': 'Topologies',
},
bases=(geotrek.common.mixins.AddPropertyMixin, models.Model),
),
migrations.CreateModel(
name='Usage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('usage', models.CharField(max_length=50, verbose_name='Usage', db_column='usage')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['usage'],
'db_table': 'l_b_usage',
'verbose_name': 'Usage',
'verbose_name_plural': 'Usages',
},
),
migrations.CreateModel(
name='Trail',
fields=[
('topo_object', models.OneToOneField(parent_link=True, on_delete=django.db.models.deletion.CASCADE, primary_key=True, db_column='evenement', serialize=False, to='core.Topology')),
('name', models.CharField(max_length=64, verbose_name='Name', db_column='nom')),
('departure', models.CharField(max_length=64, verbose_name='Departure', db_column='depart')),
('arrival', models.CharField(max_length=64, verbose_name='Arrival', db_column='arrivee')),
('comments', models.TextField(default='', verbose_name='Comments', db_column='commentaire', blank=True)),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['name'],
'db_table': 'l_t_sentier',
'verbose_name': 'Trail',
'verbose_name_plural': 'Trails',
},
bases=(mapentity.models.MapEntityMixin, 'core.topology', models.Model),
),
migrations.AddField(
model_name='topology',
name='paths',
field=models.ManyToManyField(to='core.Path', through='core.PathAggregation', verbose_name='Path', db_column='troncons'),
),
migrations.AddField(
model_name='pathaggregation',
name='topo_object',
field=models.ForeignKey(related_name='aggregations', on_delete=django.db.models.deletion.CASCADE, db_column='evenement', verbose_name='Topology', to='core.Topology'),
),
migrations.AddField(
model_name='path',
name='source',
field=models.ForeignKey(related_name='paths', on_delete=django.db.models.deletion.CASCADE, db_column='source', blank=True, to='core.PathSource', null=True, verbose_name='Source'),
),
migrations.AddField(
model_name='path',
name='stake',
field=models.ForeignKey(related_name='paths', on_delete=django.db.models.deletion.CASCADE, db_column='enjeu', blank=True, to='core.Stake', null=True, verbose_name='Maintenance stake'),
),
migrations.AddField(
model_name='path',
name='structure',
field=models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure'),
),
migrations.AddField(
model_name='path',
name='usages',
field=models.ManyToManyField(related_name='paths', db_table='l_r_troncon_usage', verbose_name='Usages', to='core.Usage', blank=True),
),
]
|
GeotrekCE/Geotrek-admin
|
geotrek/core/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 14,022
| 0.004921
|
a = 'sdlbapm'
b = 'alam'
for d in a:
print d + b
|
motealle/python
|
01.py
|
Python
|
gpl-2.0
| 53
| 0
|
import unittest
from datetime import datetime
import tempfile
import os
from due.agent import Agent
from due.episode import Episode
from due.event import Event
from due.persistence import serialize, deserialize
from due.models.tfidf import TfIdfAgent
from due.models.dummy import DummyAgent
class TestTfIdfAgent(unittest.TestCase):
def test_save_load(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
saved_agent = agent.save()
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, 'serialized_tfidf_agent.due')
serialize(saved_agent, path)
loaded_agent = Agent.load(deserialize(path))
assert agent.parameters == loaded_agent.parameters
assert agent._normalized_past_utterances == loaded_agent._normalized_past_utterances
assert [e.save() for e in loaded_agent._past_episodes] == [e.save() for e in agent._past_episodes]
expected_utterance = agent._process_utterance('aaa bbb ccc mario')
loaded_utterance = loaded_agent._process_utterance('aaa bbb ccc mario')
assert (agent._vectorizer.transform([expected_utterance]) != loaded_agent._vectorizer.transform([loaded_utterance])).nnz == 0
assert (agent._vectorized_past_utterances != loaded_agent._vectorized_past_utterances).nnz == 0
assert agent.utterance_callback(_get_test_episode())[0].payload, loaded_agent.utterance_callback(_get_test_episode())[0].payload
def test_utterance_callback(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
result = agent.utterance_callback(_get_test_episode())
self.assertEqual(result[0].payload, 'bbb')
def test_tfidf_agent(self):
cb = TfIdfAgent()
# Learn sample episode
sample_episode, alice, bob = _sample_episode()
cb.learn_episodes([sample_episode])
# Predict answer
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def test_agent_load(self):
sample_episode, alice, bob = _sample_episode()
cb = TfIdfAgent()
cb.learn_episodes([sample_episode])
test_dir = tempfile.mkdtemp()
test_path = os.path.join(test_dir, 'test_agent_load.pkl')
serialize(cb.save(), test_path)
loaded_cb = Agent.load(deserialize(test_path))
self.assertIsInstance(loaded_cb, TfIdfAgent)
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = loaded_cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def _get_train_episodes():
result = []
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'bbb'),
Event(Event.Type.Utterance, datetime.now(), 'a', 'ccc'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'ddd')
]
result.append(e)
e = Episode('1', '2')
e.events = [
Event(Event.Type.Utterance, datetime.now(), '1', '111'),
Event(Event.Type.Utterance, datetime.now(), '2', '222'),
Event(Event.Type.Utterance, datetime.now(), '1', '333'),
Event(Event.Type.Utterance, datetime.now(), '2', '444')
]
result.append(e)
return result
def _get_test_episode():
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
]
return e
def _sample_episode():
alice = DummyAgent('alice')
bob = DummyAgent('bob')
result = alice.start_episode(bob)
alice.say("Hi!", result)
bob.say("Hello", result)
alice.say("How are you?", result)
bob.say("Good thanks, and you?", result)
alice.say("All good", result)
return result, alice, bob
|
dario-chiappetta/Due
|
due/models/test_tfidf.py
|
Python
|
gpl-3.0
| 3,631
| 0.024787
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unicodedata
from urlparse import urlparse
from threading import Thread
import httplib, sys
from Queue import Queue
import itertools
import codecs
import csv
import sys
import ssl
import re
if len(sys.argv) < 3:
print "Usage: %s <csv database> <out csv>" % (sys.argv[0])
exit()
# Unicode CSV reader
# http://stackoverflow.com/a/6187936
class UnicodeCsvReader(object):
def __init__(self, f, encoding="utf-8", **kwargs):
self.csv_reader = csv.reader(f, **kwargs)
self.encoding = encoding
def __iter__(self):
return self
def next(self):
# read and split the csv row into fields
row = self.csv_reader.next()
# now decode
return [unicode(cell, self.encoding) for cell in row]
@property
def line_num(self):
return self.csv_reader.line_num
class UnicodeDictReader(csv.DictReader):
def __init__(self, f, encoding="utf-8", fieldnames=None, **kwds):
csv.DictReader.__init__(self, f, fieldnames=fieldnames, **kwds)
self.reader = UnicodeCsvReader(f, encoding=encoding, **kwds)
# Remove particles and parenthesis in names
def cleanNames(names):
filtered_names = []
for word in names:
if len(word) and word[0].lower() != word[0]:
filtered_names.append(word)
return filtered_names
# Strips accents from a unicode string
def stripAccents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
# Generates all 2+ permutations of the given array
def allCombinations(tab):
out = []
for n in range(2, len(tab) + 1):
for comb in itertools.combinations(tab, n):
out.append(" ".join(comb))
return out
# Cycles through available urls and returns the next one in the list
def getNextBaseURL():
out = getNextBaseURL.urllist[getNextBaseURL.counter % len(getNextBaseURL.urllist)]
getNextBaseURL.counter += 1
return out
getNextBaseURL.counter = 0
getNextBaseURL.urllist = [l.strip() for l in open("urls.txt", "r")]
def fetchHandles(ourl, handles):
try:
url = urlparse(ourl)
conn = httplib.HTTPSConnection(url.netloc, context=ssl._create_unverified_context())
conn.request("GET", ourl)
res = conn.getresponse()
if res.status != 200:
print res.reason, ourl
return
for line in csv.reader((l for l in res.read().split("\n")[1:])):
if len(line) < 2:
continue
match = re.match('https?://twitter.com/(\w+)[^/]*$', line[1])
if match:
handle = match.group(1)
if handle not in handles:
handles.append(handle)
except Exception, e:
print "Error(%s): %s" % (ourl, e)
exit()
return
def doQueries():
base = getNextBaseURL()
while True:
names, region, party = q.get()
clean_names = cleanNames(stripAccents(names).split(" "))
handles = []
for comb in allCombinations(clean_names):
query = comb.replace(" ", "+") + "+" + region + "+" + party + "+site:twitter.com"
url = base + "/?format=csv&q=" + query
fetchHandles(url, handles)
with codecs.open(sys.argv[2], "a", "utf-8") as out:
out.write("%s, %s\n" % (names, handles))
print "%s, %s" % (names, handles)
q.task_done()
concurrent = 50
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doQueries)
t.daemon = True
t.start()
try:
with open(sys.argv[1], 'rb') as csvfile:
first = True
for line in UnicodeCsvReader(csvfile):
if first:
first = False
continue
names = line[0]
region = stripAccents(line[3]).replace(" ", "+")
party = stripAccents(line[5]).replace(" ", "+")
if party == "C's" or party == u"C´s":
party = "Ciudadanos"
q.put((names, region, party))
q.join()
except KeyboardInterrupt:
sys.exit(1)
|
florence-nocca/spanish-elections
|
retrieve-accounts/searx.py
|
Python
|
mit
| 4,109
| 0.004138
|
#!/usr/local/sci/bin/python
# PYTHON2.7
# import TestLeap
# TestVal = TestLeap.TestLeap(year)
import numpy as np
def TestLeap(year):
'''function to test if a year is a leap year'''
'''returns 0.0 if it is a leap year'''
'''returns a non-zero number if it is not a leap year'''
'''ONLY WORKS WITH SCALARS!!!'''
# first test - is it divisible by 4?
leapoo = (year/4.) - np.round(year/4.)
# second test - if it is divisible by 100. then is it also divisible by 400?
if (((year/100.) - np.round(year/100.)) == 0.):
leapoo = leapoo + ((year/400.) - np.round(year/400.))
return leapoo
|
Kate-Willett/HadISDH_Build
|
TestLeap.py
|
Python
|
cc0-1.0
| 632
| 0.006329
|
# coding=utf-8
import time
import json
import boto3
from botocore.errorfactory import ClientError
def lambda_handler(event, context):
instance_id = event.get('instance_id')
region_id = event.get('region_id', 'us-east-2')
image_name = 'beam-automation-'+time.strftime("%Y-%m-%d-%H%M%S", time.gmtime())
image_ids = {}
image_ids['us-east-2'] = create_ami(image_name, instance_id)
image_ids['us-east-1'] = copy_ami(image_name, image_ids['us-east-2'], 'us-east-1')
image_ids['us-west-2'] = copy_ami(image_name, image_ids['us-east-2'], 'us-west-2')
update_lambda(image_ids)
return json.dumps(image_ids)
def create_ami(image_name, instance_id):
ec2 = boto3.client('ec2',region_name='us-east-2')
res = ec2.create_image(InstanceId=instance_id,
Name=image_name)
wait4image(ec2, res['ImageId'])
ec2.terminate_instances(InstanceIds=[instance_id])
return res['ImageId']
def copy_ami(image_name, image_id, region):
ec2 = boto3.client('ec2',region_name=region)
res = ec2.copy_image(Name=image_name,
SourceImageId=image_id,
SourceRegion='us-east-2')
# wait4image(ec2, res['ImageId'])
return res['ImageId']
def wait4image(ec2, image_id):
waiter = ec2.get_waiter('image_available')
waiter.wait(Filters=[{'Name': 'state', 'Values': ['available']}],
ImageIds=[image_id])
def update_lambda(image_ids):
lm = boto3.client('lambda')
en_var = lm.get_function_configuration(FunctionName='simulateBeam')['Environment']['Variables']
en_var.update({
'us_east_2_IMAGE_ID': image_ids['us-east-2'],
'us_east_1_IMAGE_ID': image_ids['us-east-1'],
'us_west_2_IMAGE_ID': image_ids['us-west-2'],
})
lm.update_function_configuration(
FunctionName='simulateBeam',
Environment={
'Variables': en_var
}
)
def check_instance_id(instance_ids):
for reservation in ec2.describe_instances()['Reservations']:
for instance in reservation['Instances']:
if instance['InstanceId'] in instance_ids:
instance_ids.remove(instance['InstanceId'])
return instance_ids
def stop_instance(instance_ids):
return ec2.stop_instances(InstanceIds=instance_ids)
def terminate_instance(instance_ids):
return ec2.terminate_instances(InstanceIds=instance_ids)
|
colinsheppard/beam
|
aws/src/main/python/updateBeamAMI/lambda_function.py
|
Python
|
gpl-3.0
| 2,524
| 0.009113
|
# -*- coding: utf-8 -*-
#
# pynag - Python Nagios plug-in and configuration environment
# Copyright (C) 2010 Drew Stinnet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""This module contains low-level Parsers for nagios configuration and status objects.
Hint: If you are looking to parse some nagios configuration data, you probably
want pynag.Model module instead.
The highlights of this module are:
class Config: For Parsing nagios local nagios configuration files
class Livestatus: To connect to MK-Livestatus
class StatusDat: To read info from status.dat (not used a lot, migrate to mk-livestatus)
class LogFiles: To read nagios log-files
class MultiSite: To talk with multiple Livestatus instances
"""
import os
import re
import time
import sys
import socket # for mk_livestatus
import stat
import pynag.Plugins
import pynag.Utils
import StringIO
import tarfile
_sentinel = object()
class Config(object):
""" Parse and write nagios config files """
# Regex for beginning of object definition
# We want everything that matches:
# define <object_type> {
__beginning_of_object = re.compile("^\s*define\s+(\w+)\s*\{?(.*)$")
def __init__(self, cfg_file=None, strict=False):
""" Constructor for :py:class:`pynag.Parsers.config` class
Args:
cfg_file (str): Full path to nagios.cfg. If None, try to
auto-discover location
strict (bool): if True, use stricter parsing which is more prone to
raising exceptions
"""
self.cfg_file = cfg_file # Main configuration file
self.strict = strict # Use strict parsing or not
# If nagios.cfg is not set, lets do some minor autodiscover.
if self.cfg_file is None:
self.cfg_file = self.guess_cfg_file()
self.data = {}
self.maincfg_values = []
self._is_dirty = False
self.reset() # Initilize misc member variables
def guess_nagios_directory(self):
""" Returns a path to the nagios configuration directory on your system
Use this function for determining the nagios config directory in your
code
Returns:
str. directory containing the nagios.cfg file
Raises:
:py:class:`pynag.Parsers.ConfigFileNotFound` if cannot guess config
file location.
"""
cfg_file = self.guess_cfg_file()
if not cfg_file:
raise ConfigFileNotFound("Could not find nagios.cfg")
return os.path.dirname(cfg_file)
def guess_nagios_binary(self):
""" Returns a path to any nagios binary found on your system
Use this function if you don't want specify path to the nagios binary
in your code and you are confident that it is located in a common
location
Checked locations are as follows:
* /usr/bin/nagios
* /usr/sbin/nagios
* /usr/local/nagios/bin/nagios
* /nagios/bin/nagios
* /usr/bin/icinga
* /usr/sbin/icinga
* /usr/bin/naemon
* /usr/sbin/naemon
* /usr/local/naemon/bin/naemon.cfg
* /usr/bin/shinken
* /usr/sbin/shinken
Returns:
str. Path to the nagios binary
None if could not find a binary in any of those locations
"""
possible_files = ('/usr/bin/nagios',
'/usr/sbin/nagios',
'/usr/local/nagios/bin/nagios',
'/nagios/bin/nagios',
'/usr/bin/icinga',
'/usr/sbin/icinga',
'/usr/bin/naemon',
'/usr/sbin/naemon',
'/usr/local/naemon/bin/naemon.cfg',
'/usr/bin/shinken',
'/usr/sbin/shinken')
possible_binaries = ('nagios', 'nagios3', 'naemon', 'icinga', 'shinken')
for i in possible_binaries:
command = ['which', i]
code, stdout, stderr = pynag.Utils.runCommand(command=command, shell=False)
if code == 0:
return stdout.splitlines()[0].strip()
return None
def guess_cfg_file(self):
""" Returns a path to any nagios.cfg found on your system
Use this function if you don't want specify path to nagios.cfg in your
code and you are confident that it is located in a common location
Checked locations are as follows:
* /etc/nagios/nagios.cfg
* /etc/nagios3/nagios.cfg
* /usr/local/nagios/etc/nagios.cfg
* /nagios/etc/nagios/nagios.cfg
* ./nagios.cfg
* ./nagios/nagios.cfg
* /etc/icinga/icinga.cfg
* /usr/local/icinga/etc/icinga.cfg
* ./icinga.cfg
* ./icinga/icinga.cfg
* /etc/naemon/naemon.cfg
* /usr/local/naemon/etc/naemon.cfg
* ./naemon.cfg
* ./naemon/naemon.cfg
* /etc/shinken/shinken.cfg
Returns:
str. Path to the nagios.cfg or equivalent file
None if couldn't find a file in any of these locations.
"""
possible_files = ('/etc/nagios/nagios.cfg',
'/etc/nagios3/nagios.cfg',
'/usr/local/nagios/etc/nagios.cfg',
'/nagios/etc/nagios/nagios.cfg',
'./nagios.cfg',
'./nagios/nagios.cfg',
'/etc/icinga/icinga.cfg',
'/usr/local/icinga/etc/icinga.cfg',
'./icinga.cfg',
'./icinga/icinga.cfg',
'/etc/naemon/naemon.cfg',
'/usr/local/naemon/etc/naemon.cfg',
'./naemon.cfg',
'./naemon/naemon.cfg',
'/etc/shinken/shinken.cfg',
)
for file_path in possible_files:
if self.isfile(file_path):
return file_path
return None
def reset(self):
""" Reinitializes the data of a parser instance to its default values.
"""
self.cfg_files = [] # List of other configuration files
self.data = {} # dict of every known object definition
self.errors = [] # List of ParserErrors
self.item_list = None
self.item_cache = None
self.maincfg_values = [] # The contents of main nagios.cfg
self._resource_values = [] # The contents of any resource_files
self.item_apply_cache = {} # This is performance tweak used by _apply_template
# This is a pure listof all the key/values in the config files. It
# shouldn't be useful until the items in it are parsed through with the proper
# 'use' relationships
self.pre_object_list = []
self.post_object_list = []
self.object_type_keys = {
'hostgroup': 'hostgroup_name',
'hostextinfo': 'host_name',
'host': 'host_name',
'service': 'name',
'servicegroup': 'servicegroup_name',
'contact': 'contact_name',
'contactgroup': 'contactgroup_name',
'timeperiod': 'timeperiod_name',
'command': 'command_name',
#'service':['host_name','description'],
}
def _has_template(self, target):
""" Determine if an item has a template associated with it
Args:
target (dict): Parsed item as parsed by :py:class:`pynag.Parsers.config`
"""
return 'use' in target
def _get_pid(self):
""" Checks the lock_file var in nagios.cfg and returns the pid from the file
If the pid file does not exist, returns None.
"""
try:
return self.open(self.get_cfg_value('lock_file'), "r").readline().strip()
except Exception:
return None
def _get_hostgroup(self, hostgroup_name):
""" Returns the hostgroup that matches the queried name.
Args:
hostgroup_name: Name of the hostgroup to be returned (string)
Returns:
Hostgroup item with hostgroup_name that matches the queried name.
"""
return self.data['all_hostgroup'].get(hostgroup_name, None)
def _get_key(self, object_type, user_key=None):
""" Return the correct 'key' for an item.
This is mainly a helper method for other methods in this class. It is
used to shorten code repetition.
Args:
object_type: Object type from which to obtain the 'key' (string)
user_key: User defined key. Default None. (string)
Returns:
Correct 'key' for the object type. (string)
"""
if not user_key and not object_type in self.object_type_keys:
raise ParserError("Unknown key for object type: %s\n" % object_type)
# Use a default key
if not user_key:
user_key = self.object_type_keys[object_type]
return user_key
def _get_item(self, item_name, item_type):
""" Return an item from a list
Creates a cache of items in self.pre_object_list and returns an element
from this cache. Looks for an item with corresponding name and type.
Args:
item_name: Name of the item to be returned (string)
item_type: Type of the item to be returned (string)
Returns:
Item with matching name and type from
:py:attr:`pynag.Parsers.config.item_cache`
"""
# create local cache for performance optimizations. TODO: Rewrite functions that call this function
if not self.item_list:
self.item_list = self.pre_object_list
self.item_cache = {}
for item in self.item_list:
if not "name" in item:
continue
name = item['name']
tmp_item_type = (item['meta']['object_type'])
if not tmp_item_type in self.item_cache:
self.item_cache[tmp_item_type] = {}
self.item_cache[tmp_item_type][name] = item
my_cache = self.item_cache.get(item_type, None)
if not my_cache:
return None
return my_cache.get(item_name, None)
def _apply_template(self, original_item):
""" Apply all attributes of item named parent_name to "original_item".
Applies all of the attributes of parents (from the 'use' field) to item.
Args:
original_item: Item 'use'-ing a parent item. The parent's attributes
will be concretely added to this item.
Returns:
original_item to which have been added all the attributes defined
in parent items.
"""
# TODO: There is space for more performance tweaks here
# If item does not inherit from anyone else, lets just return item as is.
if 'use' not in original_item:
return original_item
object_type = original_item['meta']['object_type']
raw_definition = original_item['meta']['raw_definition']
my_cache = self.item_apply_cache.get(object_type, {})
# Performance tweak, if item has been parsed. Lets not do it again
if raw_definition in my_cache:
return my_cache[raw_definition]
parent_names = original_item['use'].split(',')
parent_items = []
for parent_name in parent_names:
parent_item = self._get_item(parent_name, object_type)
if parent_item is None:
error_string = "Can not find any %s named %s\n" % (object_type, parent_name)
self.errors.append(ParserError(error_string, item=original_item))
continue
try:
# Parent item probably has use flags on its own. So lets apply to parent first
parent_item = self._apply_template(parent_item)
except RuntimeError:
t, e = sys.exc_info()[:2]
self.errors.append(ParserError("Error while parsing item: %s (it might have circular use=)" % str(e),
item=original_item))
parent_items.append(parent_item)
inherited_attributes = original_item['meta']['inherited_attributes']
template_fields = original_item['meta']['template_fields']
for parent_item in parent_items:
for k, v in parent_item.iteritems():
if k in ('use', 'register', 'meta', 'name'):
continue
if k not in inherited_attributes:
inherited_attributes[k] = v
if k not in original_item:
original_item[k] = v
template_fields.append(k)
if 'name' in original_item:
my_cache[raw_definition] = original_item
return original_item
def _get_items_in_file(self, filename):
""" Return all items in the given file
Iterates through all elements in self.data and gatehrs all the items
defined in the queried filename.
Args:
filename: file from which are defined the items that will be
returned.
Returns:
A list containing all the items in self.data that were defined in
filename
"""
return_list = []
for k in self.data.keys():
for item in self[k]:
if item['meta']['filename'] == filename:
return_list.append(item)
return return_list
def get_new_item(self, object_type, filename):
""" Returns an empty item with all necessary metadata
Creates a new item dict and fills it with usual metadata:
* object_type : object_type (arg)
* filename : filename (arg)
* template_fields = []
* needs_commit = None
* delete_me = None
* defined_attributes = {}
* inherited_attributes = {}
* raw_definition = "define %s {\\n\\n} % object_type"
Args:
object_type: type of the object to be created (string)
filename: Path to which the item will be saved (string)
Returns:
A new item with default metadata
"""
meta = {
'object_type': object_type,
'filename': filename,
'template_fields': [],
'needs_commit': None,
'delete_me': None,
'defined_attributes': {},
'inherited_attributes': {},
'raw_definition': "define %s {\n\n}" % object_type,
}
return {'meta': meta}
def _load_file(self, filename):
""" Parses filename with self.parse_filename and append results in self._pre_object_list
This function is mostly here for backwards compatibility
Args:
filename: the file to be parsed. This is supposed to a nagios object definition file
"""
for i in self.parse_file(filename):
self.pre_object_list.append(i)
def parse_file(self, filename):
""" Parses a nagios object configuration file and returns lists of dictionaries.
This is more or less a wrapper around :py:meth:`config.parse_string`,
so reading documentation there is useful.
Args:
filename: Path to the file to parse (string)
Returns:
A list containing elements parsed by :py:meth:`parse_string`
"""
try:
raw_string = self.open(filename, 'rb').read()
return self.parse_string(raw_string, filename=filename)
except IOError:
t, e = sys.exc_info()[:2]
parser_error = ParserError(e.strerror)
parser_error.filename = e.filename
self.errors.append(parser_error)
return []
def parse_string(self, string, filename='None'):
""" Parses a string, and returns all object definitions in that string
Args:
string: A string containing one or more object definitions
filename (optional): If filename is provided, it will be referenced
when raising exceptions
Examples:
>>> test_string = "define host {\\nhost_name examplehost\\n}\\n"
>>> test_string += "define service {\\nhost_name examplehost\\nservice_description example service\\n}\\n"
>>> c = config()
>>> result = c.parse_string(test_string)
>>> for i in result: print i.get('host_name'), i.get('service_description', None)
examplehost None
examplehost example service
Returns:
A list of dictionaries, that look like self.data
Raises:
:py:class:`ParserError`
"""
append = ""
current = None
in_definition = {}
tmp_buffer = []
result = []
for sequence_no, line in enumerate(string.splitlines(False)):
line_num = sequence_no + 1
# If previous line ended with backslash, treat this line as a
# continuation of previous line
if append:
line = append + line
append = None
# Cleanup and line skips
line = line.strip()
if line == "":
continue
if line[0] == "#" or line[0] == ';':
continue
# If this line ends with a backslash, continue directly to next line
if line.endswith('\\'):
append = line.strip('\\')
continue
if line.startswith('}'): # end of object definition
if not in_definition:
p = ParserError("Unexpected '}' found outside object definition in line %s" % line_num)
p.filename = filename
p.line_start = line_num
raise p
in_definition = None
current['meta']['line_end'] = line_num
# Looks to me like nagios ignores everything after the } so why shouldn't we ?
rest = line.split("}", 1)[1]
tmp_buffer.append(line)
try:
current['meta']['raw_definition'] = '\n'.join(tmp_buffer)
except Exception:
raise ParserError("Encountered Unexpected end of object definition in file '%s'." % filename)
result.append(current)
# Destroy the Nagios Object
current = None
continue
elif line.startswith('define'): # beginning of object definition
if in_definition:
msg = "Unexpected 'define' in {filename} on line {line_num}. was expecting '}}'."
msg = msg.format(**locals())
self.errors.append(ParserError(msg, item=current))
m = self.__beginning_of_object.search(line)
tmp_buffer = [line]
object_type = m.groups()[0]
if self.strict and object_type not in self.object_type_keys.keys():
raise ParserError(
"Don't know any object definition of type '%s'. it is not in a list of known object definitions." % object_type)
current = self.get_new_item(object_type, filename)
current['meta']['line_start'] = line_num
# Start off an object
in_definition = True
# Looks to me like nagios ignores everything after the {, so why shouldn't we ?
rest = m.groups()[1]
continue
else: # In the middle of an object definition
tmp_buffer.append(' ' + line)
# save whatever's left in the buffer for the next iteration
if not in_definition:
append = line
continue
# this is an attribute inside an object definition
if in_definition:
#(key, value) = line.split(None, 1)
tmp = line.split(None, 1)
if len(tmp) > 1:
(key, value) = tmp
else:
key = tmp[0]
value = ""
# Strip out in-line comments
if value.find(";") != -1:
value = value.split(";", 1)[0]
# Clean info
key = key.strip()
value = value.strip()
# Rename some old values that may be in the configuration
# This can probably be removed in the future to increase performance
if (current['meta']['object_type'] == 'service') and key == 'description':
key = 'service_description'
# Special hack for timeperiods as they are not consistent with other objects
# We will treat whole line as a key with an empty value
if (current['meta']['object_type'] == 'timeperiod') and key not in ('timeperiod_name', 'alias'):
key = line
value = ''
current[key] = value
current['meta']['defined_attributes'][key] = value
# Something is wrong in the config
else:
raise ParserError("Error: Unexpected token in file '%s'" % filename)
# Something is wrong in the config
if in_definition:
raise ParserError("Error: Unexpected EOF in file '%s'" % filename)
return result
def _locate_item(self, item):
""" This is a helper function for anyone who wishes to modify objects.
It takes "item", locates the file which is configured in, and locates
exactly the lines which contain that definition.
Returns: (tuple)
(everything_before, object_definition, everything_after, filename):
* everything_before (list of lines): Every line in filename before object was defined
* everything_after (list of lines): Every line in "filename" after object was defined
* object_definition (list of lines): Every line used to define our item in "filename"
* filename (string): file in which the object was written to
Raises:
:py:class:`ValueError` if object was not found in "filename"
"""
if "filename" in item['meta']:
filename = item['meta']['filename']
else:
raise ValueError("item does not have a filename")
# Look for our item, store it as my_item
for i in self.parse_file(filename):
if self.compareObjects(item, i):
my_item = i
break
else:
raise ValueError("We could not find object in %s\n%s" % (filename, item))
# Caller of this method expects to be returned
# several lists that describe the lines in our file.
# The splitting logic starts here.
my_file = self.open(filename)
all_lines = my_file.readlines()
my_file.close()
start = my_item['meta']['line_start'] - 1
end = my_item['meta']['line_end']
everything_before = all_lines[:start]
object_definition = all_lines[start:end]
everything_after = all_lines[end:]
# If there happen to be line continuations in the object we will edit
# We will remove them from object_definition
object_definition = self._clean_backslashes(object_definition)
return everything_before, object_definition, everything_after, filename
def _clean_backslashes(self, list_of_strings):
""" Returns list_of_strings with all all strings joined that ended with backslashes
Args:
list_of_strings: List of strings to join
Returns:
Another list of strings, which lines ending with \ joined together.
"""
tmp_buffer = ''
result = []
for i in list_of_strings:
if i.endswith('\\\n'):
tmp_buffer += i.strip('\\\n')
else:
result.append(tmp_buffer + i)
tmp_buffer = ''
return result
def _modify_object(self, item, field_name=None, new_value=None, new_field_name=None, new_item=None,
make_comments=False):
""" Locates "item" and changes the line which contains field_name.
Helper function for object_* functions. Locates "item" and changes the
line which contains field_name. If new_value and new_field_name are both
None, the attribute is removed.
Args:
item(dict): The item to be modified
field_name(str): The field_name to modify (if any)
new_field_name(str): If set, field_name will be renamed
new_value(str): If set the value of field_name will be changed
new_item(str): If set, whole object will be replaced with this
string
make_comments: If set, put pynag-branded comments where changes
have been made
Returns:
True on success
Raises:
:py:class:`ValueError` if object or field_name is not found
:py:class:`IOError` is save is unsuccessful.
"""
if item is None:
return
if field_name is None and new_item is None:
raise ValueError("either field_name or new_item must be set")
if '\n' in str(new_value):
raise ValueError("Invalid character \\n used as an attribute value.")
everything_before, object_definition, everything_after, filename = self._locate_item(item)
if new_item is not None:
# We have instruction on how to write new object, so we dont need to parse it
object_definition = [new_item]
else:
change = None
value = None
i = 0
for i in range(len(object_definition)):
tmp = object_definition[i].split(None, 1)
if len(tmp) == 0:
continue
# Hack for timeperiods, they dont work like other objects
elif item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
tmp = [object_definition[i]]
# we can't change timeperiod, so we fake a field rename
if new_value is not None:
new_field_name = new_value
new_value = None
value = ''
elif len(tmp) == 1:
value = ''
else:
value = tmp[1]
k = tmp[0].strip()
if k == field_name:
# Attribute was found, lets change this line
if new_field_name is None and new_value is None:
# We take it that we are supposed to remove this attribute
change = object_definition.pop(i)
break
elif new_field_name:
# Field name has changed
k = new_field_name
if new_value is not None:
# value has changed
value = new_value
# Here we do the actual change
change = "\t%-30s%s\n" % (k, value)
if item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
change = "\t%s\n" % new_field_name
object_definition[i] = change
break
if not change and new_value is not None:
# Attribute was not found. Lets add it
change = "\t%-30s%s\n" % (field_name, new_value)
object_definition.insert(i, change)
# Lets put a banner in front of our item
if make_comments:
comment = '# Edited by PyNag on %s\n' % time.ctime()
if len(everything_before) > 0:
last_line_before = everything_before[-1]
if last_line_before.startswith('# Edited by PyNag on'):
everything_before.pop() # remove this line
object_definition.insert(0, comment)
# Here we overwrite the config-file, hoping not to ruin anything
str_buffer = "%s%s%s" % (''.join(everything_before), ''.join(object_definition), ''.join(everything_after))
self.write(filename, str_buffer)
return True
def open(self, filename, *args, **kwargs):
""" Wrapper around global open()
Simply calls global open(filename, *args, **kwargs) and passes all arguments
as they are received. See global open() function for more details.
"""
return open(filename, *args, **kwargs)
@pynag.Utils.synchronized(pynag.Utils.rlock)
def write(self, filename, string):
""" Wrapper around open(filename).write()
Writes string to filename and closes the file handler. File handler is
openned in `'w'` mode.
Args:
filename: File where *string* will be written. This is the path to
the file. (string)
string: String to be written to file. (string)
Returns:
Return code as returned by :py:meth:`os.write`
"""
fh = self.open(filename, 'w')
return_code = fh.write(string)
fh.flush()
# os.fsync(fh)
fh.close()
self._is_dirty = True
return return_code
def item_rewrite(self, item, str_new_item):
""" Completely rewrites item with string provided.
Args:
item: Item that is to be rewritten
str_new_item: str representation of the new item
..
In the following line, every "\\n" is actually a simple line break
This is only a little patch for the generated documentation.
Examples::
item_rewrite( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, new_item=str_new_item)
def item_remove(self, item):
""" Delete one specific item from its configuration files
Args:
item: Item that is to be rewritten
str_new_item: string representation of the new item
..
In the following line, every "\\n" is actually a simple line break
This is only a little patch for the generated documentation.
Examples::
item_remove( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, new_item="")
def item_edit_field(self, item, field_name, new_value):
""" Modifies one field of a (currently existing) object.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to be modified. Its field `field_name` will be set to
`new_value`.
field_name: Name of the field that will be modified. (str)
new_value: Value to which will be set the field `field_name`. (str)
Example usage::
edit_object( item, field_name="host_name", new_value="examplehost.example.com") # doctest: +SKIP
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item, field_name=field_name, new_value=new_value)
def item_remove_field(self, item, field_name):
""" Removes one field of a (currently existing) object.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to remove field from.
field_name: Field to remove. (string)
Example usage::
item_remove_field( item, field_name="contactgroups" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, field_name=field_name, new_value=None, new_field_name=None)
def item_rename_field(self, item, old_field_name, new_field_name):
""" Renames a field of a (currently existing) item.
Changes are immediate (i.e. there is no commit).
Args:
item: Item to modify.
old_field_name: Name of the field that will have its name changed. (string)
new_field_name: New name given to `old_field_name` (string)
Example usage::
item_rename_field(item, old_field_name="normal_check_interval", new_field_name="check_interval")
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, field_name=old_field_name, new_field_name=new_field_name)
def item_add(self, item, filename):
""" Adds a new object to a specified config file.
Args:
item: Item to be created
filename: Filename that we are supposed to write the new item to.
This is the path to the file. (string)
Returns:
True on success
Raises:
:py:class:`IOError` on failed save
"""
if not 'meta' in item:
item['meta'] = {}
item['meta']['filename'] = filename
# Create directory if it does not already exist
dirname = os.path.dirname(filename)
if not self.isdir(dirname):
os.makedirs(dirname)
str_buffer = self.print_conf(item)
fh = self.open(filename, 'a')
fh.write(str_buffer)
fh.close()
return True
def edit_object(self, item, field_name, new_value):
""" Modifies a (currently existing) item.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to modify.
field_name: Field that will be updated.
new_value: Updated value of field `field_name`
Example Usage:
edit_object( item, field_name="host_name", new_value="examplehost.example.com")
Returns:
True on success
.. WARNING::
THIS FUNCTION IS DEPRECATED. USE item_edit_field() instead
"""
return self.item_edit_field(item=item, field_name=field_name, new_value=new_value)
def compareObjects(self, item1, item2):
""" Compares two items. Returns true if they are equal
Compares every key: value pair for both items. If anything is different,
the items will not be considered equal.
Args:
item1, item2: Items to be compared.
Returns:
True -- Items are equal
False -- Items are not equal
"""
keys1 = item1['meta']['defined_attributes'].keys()
keys2 = item2['meta']['defined_attributes'].keys()
keys1.sort()
keys2.sort()
result = True
if keys1 != keys2:
return False
for key in keys1:
if key == 'meta':
continue
key1 = item1[key]
key2 = item2[key]
# For our purpose, 30 is equal to 30.000
if key == 'check_interval':
key1 = int(float(key1))
key2 = int(float(key2))
if str(key1) != str(key2):
result = False
if result is False:
return False
return True
def edit_service(self, target_host, service_description, field_name, new_value):
""" Edit a service's attributes
Takes a host, service_description pair to identify the service to modify
and sets its field `field_name` to `new_value`.
Args:
target_host: name of the host to which the service is attached to. (string)
service_description: Service description of the service to modify. (string)
field_name: Field to modify. (string)
new_value: Value to which the `field_name` field will be updated (string)
Returns:
True on success
Raises:
:py:class:`ParserError` if the service is not found
"""
original_object = self.get_service(target_host, service_description)
if original_object is None:
raise ParserError("Service not found")
return self.edit_object(original_object, field_name, new_value)
def _get_list(self, item, key):
""" Return a comma list from an item
Args:
item: Item from which to select value. (string)
key: Field name of the value to select and return as a list. (string)
Example::
_get_list(Foo_object, host_name)
define service {
service_description Foo
host_name larry,curly,moe
}
returns
['larry','curly','moe']
Returns:
A list of the item's values of `key`
Raises:
:py:class:`ParserError` if item is not a dict
"""
if not isinstance(item, dict):
raise ParserError("%s is not a dictionary\n" % item)
# return []
if not key in item:
return []
return_list = []
if item[key].find(",") != -1:
for name in item[key].split(","):
return_list.append(name)
else:
return_list.append(item[key])
# Alphabetize
return_list.sort()
return return_list
def delete_object(self, object_type, object_name, user_key=None):
""" Delete object from configuration files
Args:
object_type: Type of the object to delete from configuration files.
object_name: Name of the object to delete from configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
item = self.get_object(object_type=object_type, object_name=object_name, user_key=user_key)
return self.item_remove(item)
def delete_service(self, service_description, host_name):
""" Delete service from configuration files
Args:
service_description: service_description field value of the object
to delete from configuration files.
host_name: host_name field value of the object to delete from
configuration files.
Returns:
True on success.
"""
item = self.get_service(host_name, service_description)
return self.item_remove(item)
def delete_host(self, object_name, user_key=None):
""" Delete a host from its configuration files
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
return self.delete_object('host', object_name, user_key=user_key)
def delete_hostgroup(self, object_name, user_key=None):
""" Delete a hostgroup from its configuration files
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
return self.delete_object('hostgroup', object_name, user_key=user_key)
def get_object(self, object_type, object_name, user_key=None):
""" Return a complete object dictionary
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: User defined key. Default None. (string)
Returns:
The item found to match all the criterias.
None if object is not found
"""
object_key = self._get_key(object_type, user_key)
for item in self.data['all_%s' % object_type]:
if item.get(object_key, None) == object_name:
return item
return None
def get_host(self, object_name, user_key=None):
""" Return a host object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('host', object_name, user_key=user_key)
def get_servicegroup(self, object_name, user_key=None):
""" Return a Servicegroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('servicegroup', object_name, user_key=user_key)
def get_contact(self, object_name, user_key=None):
""" Return a Contact object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('contact', object_name, user_key=user_key)
def get_contactgroup(self, object_name, user_key=None):
""" Return a Contactgroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('contactgroup', object_name, user_key=user_key)
def get_timeperiod(self, object_name, user_key=None):
""" Return a Timeperiod object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('timeperiod', object_name, user_key=user_key)
def get_command(self, object_name, user_key=None):
""" Return a Command object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('command', object_name, user_key=user_key)
def get_hostgroup(self, object_name, user_key=None):
""" Return a hostgroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('hostgroup', object_name, user_key=user_key)
def get_servicedependency(self, object_name, user_key=None):
""" Return a servicedependency object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('servicedependency', object_name, user_key=user_key)
def get_hostdependency(self, object_name, user_key=None):
""" Return a hostdependency object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('hostdependency', object_name, user_key=user_key)
def get_service(self, target_host, service_description):
""" Return a service object
Args:
target_host: host_name field of the service to be returned. This is
the host to which is attached the service.
service_description: service_description field of the service to be
returned.
Returns:
The item found to match all the criterias.
"""
for item in self.data['all_service']:
if item.get('service_description') == service_description and item.get('host_name') == target_host:
return item
return None
def _append_use(self, source_item, name):
""" Append attributes to source_item that are inherited via 'use' attribute'
Args:
source_item: item (dict) to apply the inheritance upon
name: obsolete (discovered automatically via source_item['use'].
Here for compatibility.
Returns:
Source Item with appended attributes.
Raises:
:py:class:`ParserError` on recursion errors
"""
# Remove the 'use' key
if "use" in source_item:
del source_item['use']
for possible_item in self.pre_object_list:
if "name" in possible_item:
# Start appending to the item
for k, v in possible_item.iteritems():
try:
if k == 'use':
source_item = self._append_use(source_item, v)
except Exception:
raise ParserError("Recursion error on %s %s" % (source_item, v))
# Only add the item if it doesn't already exist
if not k in source_item:
source_item[k] = v
return source_item
def _post_parse(self):
""" Creates a few optimization tweaks and easy access lists in self.data
Creates :py:attr:`config.item_apply_cache` and fills the all_object
item lists in self.data.
"""
self.item_list = None
self.item_apply_cache = {} # This is performance tweak used by _apply_template
for raw_item in self.pre_object_list:
# Performance tweak, make sure hashmap exists for this object_type
object_type = raw_item['meta']['object_type']
if not object_type in self.item_apply_cache:
self.item_apply_cache[object_type] = {}
# Tweak ends
if "use" in raw_item:
raw_item = self._apply_template(raw_item)
self.post_object_list.append(raw_item)
# Add the items to the class lists.
for list_item in self.post_object_list:
type_list_name = "all_%s" % list_item['meta']['object_type']
if not type_list_name in self.data:
self.data[type_list_name] = []
self.data[type_list_name].append(list_item)
def commit(self):
""" Write any changes that have been made to it's appropriate file """
# Loops through ALL items
for k in self.data.keys():
for item in self[k]:
# If the object needs committing, commit it!
if item['meta']['needs_commit']:
# Create file contents as an empty string
file_contents = ""
# find any other items that may share this config file
extra_items = self._get_items_in_file(item['meta']['filename'])
if len(extra_items) > 0:
for commit_item in extra_items:
# Ignore files that are already set to be deleted:w
if commit_item['meta']['delete_me']:
continue
# Make sure we aren't adding this thing twice
if item != commit_item:
file_contents += self.print_conf(commit_item)
# This is the actual item that needs commiting
if not item['meta']['delete_me']:
file_contents += self.print_conf(item)
# Write the file
filename = item['meta']['filename']
self.write(filename, file_contents)
# Recreate the item entry without the commit flag
self.data[k].remove(item)
item['meta']['needs_commit'] = None
self.data[k].append(item)
def flag_all_commit(self):
""" Flag every item in the configuration to be committed
This should probably only be used for debugging purposes
"""
for object_type in self.data.keys():
for item in self.data[object_type]:
item['meta']['needs_commit'] = True
def print_conf(self, item):
""" Return a string that can be used in a configuration file
Args:
item: Item to be dumped as a string.
Returns:
String representation of item.
"""
output = ""
# Header, to go on all files
output += "# Configuration file %s\n" % item['meta']['filename']
output += "# Edited by PyNag on %s\n" % time.ctime()
# Some hostgroup information
if "hostgroup_list" in item['meta']:
output += "# Hostgroups: %s\n" % ",".join(item['meta']['hostgroup_list'])
# Some hostgroup information
if "service_list" in item['meta']:
output += "# Services: %s\n" % ",".join(item['meta']['service_list'])
# Some hostgroup information
if "service_members" in item['meta']:
output += "# Service Members: %s\n" % ",".join(item['meta']['service_members'])
if len(item['meta']['template_fields']) != 0:
output += "# Values from templates:\n"
for k in item['meta']['template_fields']:
output += "#\t %-30s %-30s\n" % (k, item[k])
output += "\n"
output += "define %s {\n" % item['meta']['object_type']
for k, v in item.iteritems():
if v is None:
# Skip entries with No value
continue
if k != 'meta':
if k not in item['meta']['template_fields']:
output += "\t %-30s %-30s\n" % (k, v)
output += "}\n\n"
return output
def _load_static_file(self, filename=None):
""" Load a general config file (like nagios.cfg) that has key=value config file format. Ignore comments
Arguments:
filename: name of file to parse, if none nagios.cfg will be used
Returns:
a [ (key,value), (key,value) ] list
"""
result = []
if not filename:
filename = self.cfg_file
for line in self.open(filename).readlines():
# Strip out new line characters
line = line.strip()
# Skip blank lines
if line == "":
continue
# Skip comments
if line[0] == "#" or line[0] == ';':
continue
tmp = line.split("=", 1)
if len(tmp) < 2:
continue
key, value = tmp
key = key.strip()
value = value.strip()
result.append((key, value))
return result
def _edit_static_file(self, attribute, new_value, filename=None, old_value=None, append=False):
""" Modify a general config file (like nagios.cfg) that has a key=value config file format.
Arguments:
filename: Name of config file that will be edited (i.e. nagios.cfg)
attribute: name of attribute to edit (i.e. check_external_commands)
new_value: new value for the said attribute (i.e. "1"). None deletes
the line.
old_value: Useful if multiple attributes exist (i.e. cfg_dir) and
you want to replace a specific one.
append: If true, do not overwrite current setting. Instead append
this at the end. Use this with settings that are repeated like
cfg_file.
Examples::
_edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='check_external_commands', new_value='1')
_edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='cfg_dir', new_value='/etc/nagios/okconfig', append=True)
"""
if filename is None:
filename = self.cfg_file
# For some specific attributes, append should be implied
if attribute in ('cfg_file', 'cfg_dir', 'broker_module'):
append = True
# If/when we make a change, new_line is what will be written
new_line = '%s=%s\n' % (attribute, new_value)
# new_value=None means line should be removed
if new_value is None:
new_line = ''
write_buffer = self.open(filename).readlines()
is_dirty = False # dirty if we make any changes
for i, line in enumerate(write_buffer):
# Strip out new line characters
line = line.strip()
# Skip blank lines
if line == "":
continue
# Skip comments
if line[0] == "#" or line[0] == ';':
continue
key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
# If key does not match, we are not interested in this line
if key != attribute:
continue
# If old_value was specified, and it matches, dont have to look any further
elif value == old_value:
write_buffer[i] = new_line
is_dirty = True
break
# if current value is the same as new_value, no need to make changes
elif value == new_value:
return False
# Special so cfg_dir matches despite double-slashes, etc
elif attribute == 'cfg_dir' and new_value and os.path.normpath(value) == os.path.normpath(new_value):
return False
# We are not appending, and no old value was specified:
elif append is False and not old_value:
write_buffer[i] = new_line
is_dirty = True
break
if is_dirty is False and new_value is not None:
# If we get here, it means we read the whole file,
# and we have not yet made any changes, So we assume
# We should append to the file
write_buffer.append(new_line)
is_dirty = True
# When we get down here, it is time to write changes to file
if is_dirty is True:
str_buffer = ''.join(write_buffer)
self.write(filename, str_buffer)
return True
else:
return False
def needs_reload(self):
""" Checks if the Nagios service needs a reload.
Returns:
True if Nagios service needs reload of cfg files
False if reload not needed or Nagios is not running
"""
if not self.maincfg_values:
self.reset()
self.parse_maincfg()
new_timestamps = self.get_timestamps()
object_cache_file = self.get_cfg_value('object_cache_file')
if self._get_pid() is None:
return False
if not object_cache_file:
return True
if not self.isfile(object_cache_file):
return True
object_cache_timestamp = new_timestamps.get(object_cache_file, 0)
# Reload not needed if no object_cache file
if object_cache_file is None:
return False
for k, v in new_timestamps.items():
if not v or int(v) > object_cache_timestamp:
return True
return False
def needs_reparse(self):
""" Checks if the Nagios configuration needs to be reparsed.
Returns:
True if any Nagios configuration file has changed since last parse()
"""
# If Parse has never been run:
if self.data == {}:
return True
# If previous save operation has forced a reparse
if self._is_dirty is True:
return True
# If we get here, we check the timestamps of the configs
new_timestamps = self.get_timestamps()
if len(new_timestamps) != len(self.timestamps):
return True
for k, v in new_timestamps.items():
if self.timestamps.get(k, None) != v:
return True
return False
@pynag.Utils.synchronized(pynag.Utils.rlock)
def parse_maincfg(self):
""" Parses your main configuration (nagios.cfg) and stores it as key/value pairs in self.maincfg_values
This function is mainly used by config.parse() which also parses your
whole configuration set.
Raises:
py:class:`ConfigFileNotFound`
"""
# If nagios.cfg is not set, lets do some minor autodiscover.
if self.cfg_file is None:
raise ConfigFileNotFound('Could not find nagios.cfg')
self.maincfg_values = self._load_static_file(self.cfg_file)
@pynag.Utils.synchronized(pynag.Utils.rlock)
def parse(self):
""" Parse all objects in your nagios configuration
This functions starts by loading up your nagios.cfg ( parse_maincfg() )
then moving on to your object configuration files (as defined via
cfg_file and cfg_dir) and and your resource_file as well.
Returns:
None
Raises:
:py:class:`IOError` if unable to read any file due to permission
problems
"""
# reset
self.reset()
self.parse_maincfg()
self.cfg_files = self.get_cfg_files()
# When parsing config, we will softly fail if permission denied
# comes on resource files. If later someone tries to get them via
# get_resource, we will fail hard
try:
self._resource_values = self.get_resources()
except IOError:
t, e = sys.exc_info()[:2]
self.errors.append(str(e))
self.timestamps = self.get_timestamps()
# This loads everything into
for cfg_file in self.cfg_files:
self._load_file(cfg_file)
self._post_parse()
self._is_dirty = False
def get_resource(self, resource_name):
""" Get a single resource value which can be located in any resource.cfg file
Arguments:
resource_name: Name as it appears in resource file (i.e. $USER1$)
Returns:
String value of the resource value.
Raises:
:py:class:`KeyError` if resource is not found
:py:class:`ParserError` if resource is not found and you do not have
permissions
"""
resources = self.get_resources()
for k, v in resources:
if k == resource_name:
return v
def get_timestamps(self):
""" Returns hash map of all nagios related files and their timestamps"""
files = {}
files[self.cfg_file] = None
for k, v in self.maincfg_values:
if k in ('resource_file', 'lock_file', 'object_cache_file'):
files[v] = None
for i in self.get_cfg_files():
files[i] = None
# Now lets lets get timestamp of every file
for k, v in files.items():
if not self.isfile(k):
continue
files[k] = self.stat(k).st_mtime
return files
def isfile(self, *args, **kwargs):
""" Wrapper around os.path.isfile """
return os.path.isfile(*args, **kwargs)
def isdir(self, *args, **kwargs):
""" Wrapper around os.path.isdir """
return os.path.isdir(*args, **kwargs)
def islink(self, *args, **kwargs):
""" Wrapper around os.path.islink """
return os.path.islink(*args, **kwargs)
def readlink(selfself, *args, **kwargs):
""" Wrapper around os.readlink """
return os.readlink(*args, **kwargs)
def stat(self, *args, **kwargs):
""" Wrapper around os.stat """
return os.stat(*args, **kwargs)
def remove(self, *args, **kwargs):
""" Wrapper around os.remove """
return os.remove(*args, **kwargs)
def access(self, *args, **kwargs):
""" Wrapper around os.access """
return os.access(*args, **kwargs)
def listdir(self, *args, **kwargs):
""" Wrapper around os.listdir """
return os.listdir(*args, **kwargs)
def exists(self, *args, **kwargs):
""" Wrapper around os.path.exists """
return os.path.exists(*args, **kwargs)
def get_resources(self):
"""Returns a list of every private resources from nagios.cfg"""
resources = []
for config_object, config_value in self.maincfg_values:
if config_object == 'resource_file' and self.isfile(config_value):
resources += self._load_static_file(config_value)
return resources
def extended_parse(self):
""" This parse is used after the initial parse() command is run.
It is only needed if you want extended meta information about hosts or other objects
"""
# Do the initial parsing
self.parse()
# First, cycle through the hosts, and append hostgroup information
index = 0
for host in self.data['all_host']:
if host.get("register", None) == "0":
continue
if not "host_name" in host:
continue
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
# Append any hostgroups that are directly listed in the host definition
if "hostgroups" in host:
for hostgroup_name in self._get_list(host, 'hostgroups'):
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
if hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:
self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)
# Append any services which reference this host
service_list = []
for service in self.data['all_service']:
if service.get("register", None) == "0":
continue
if not "service_description" in service:
continue
if host['host_name'] in self._get_active_hosts(service):
service_list.append(service['service_description'])
self.data['all_host'][index]['meta']['service_list'] = service_list
# Increment count
index += 1
# Loop through all hostgroups, appending them to their respective hosts
for hostgroup in self.data['all_hostgroup']:
for member in self._get_list(hostgroup, 'members'):
index = 0
for host in self.data['all_host']:
if not "host_name" in host:
continue
# Skip members that do not match
if host['host_name'] == member:
# Create the meta var if it doesn' exist
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
if hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:
self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])
# Increment count
index += 1
# Expand service membership
index = 0
for service in self.data['all_service']:
# Find a list of hosts to negate from the final list
self.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)
# Increment count
index += 1
def _get_active_hosts(self, item):
""" Given an object, return a list of active hosts.
This will exclude hosts that are negated with a "!"
Args:
item: Item to obtain active hosts from.
Returns:
List of all the active hosts for `item`
"""
# First, generate the negation list
negate_hosts = []
# Hostgroups
if "hostgroup_name" in item:
for hostgroup_name in self._get_list(item, 'hostgroup_name'):
if hostgroup_name[0] == "!":
hostgroup_obj = self.get_hostgroup(hostgroup_name[1:])
negate_hosts.extend(self._get_list(hostgroup_obj, 'members'))
# Host Names
if "host_name" in item:
for host_name in self._get_list(item, 'host_name'):
if host_name[0] == "!":
negate_hosts.append(host_name[1:])
# Now get hosts that are actually listed
active_hosts = []
# Hostgroups
if "hostgroup_name" in item:
for hostgroup_name in self._get_list(item, 'hostgroup_name'):
if hostgroup_name[0] != "!":
active_hosts.extend(self._get_list(self.get_hostgroup(hostgroup_name), 'members'))
# Host Names
if "host_name" in item:
for host_name in self._get_list(item, 'host_name'):
if host_name[0] != "!":
active_hosts.append(host_name)
# Combine the lists
return_hosts = []
for active_host in active_hosts:
if active_host not in negate_hosts:
return_hosts.append(active_host)
return return_hosts
def get_cfg_dirs(self):
""" Parses the main config file for configuration directories
Returns:
List of all cfg directories used in this configuration
Example::
print(get_cfg_dirs())
['/etc/nagios/hosts','/etc/nagios/objects',...]
"""
cfg_dirs = []
for config_object, config_value in self.maincfg_values:
if config_object == "cfg_dir":
cfg_dirs.append(config_value)
return cfg_dirs
def get_cfg_files(self):
""" Return a list of all cfg files used in this configuration
Filenames are normalised so that if nagios.cfg specifies relative
filenames we will convert it to fully qualified filename before returning.
Returns:
List of all configurations files used in the configuration.
Example:
print(get_cfg_files())
['/etc/nagios/hosts/host1.cfg','/etc/nagios/hosts/host2.cfg',...]
"""
cfg_files = []
for config_object, config_value in self.maincfg_values:
# Add cfg_file objects to cfg file list
if config_object == "cfg_file":
config_value = self.abspath(config_value)
if self.isfile(config_value):
cfg_files.append(config_value)
# Parse all files in a cfg directory
if config_object == "cfg_dir":
config_value = self.abspath(config_value)
directories = []
raw_file_list = []
directories.append(config_value)
# Walk through every subdirectory and add to our list
while directories:
current_directory = directories.pop(0)
# Nagios doesnt care if cfg_dir exists or not, so why should we ?
if not self.isdir(current_directory):
continue
for item in self.listdir(current_directory):
# Append full path to file
item = "%s" % (os.path.join(current_directory, item.strip()))
if self.islink(item):
item = os.readlink(item)
if self.isdir(item):
directories.append(item)
if raw_file_list.count(item) < 1:
raw_file_list.append(item)
for raw_file in raw_file_list:
if raw_file.endswith('.cfg'):
if self.exists(raw_file) and not self.isdir(raw_file):
# Nagios doesnt care if cfg_file exists or not, so we will not throws errors
cfg_files.append(raw_file)
return cfg_files
def abspath(self, path):
""" Return the absolute path of a given relative path.
The current working directory is assumed to be the dirname of nagios.cfg
Args:
path: relative path to be transformed into absolute path. (string)
Returns:
Absolute path of given relative path.
Example:
>>> c = config(cfg_file="/etc/nagios/nagios.cfg")
>>> c.abspath('nagios.cfg')
'/etc/nagios/nagios.cfg'
>>> c.abspath('/etc/nagios/nagios.cfg')
'/etc/nagios/nagios.cfg'
"""
if not isinstance(path, str):
return ValueError("Path must be a string got %s instead" % type(path))
if path.startswith('/'):
return path
nagiosdir = os.path.dirname(self.cfg_file)
normpath = os.path.abspath(os.path.join(nagiosdir, path))
return normpath
def get_cfg_value(self, key):
""" Returns one specific value from your nagios.cfg file,
None if value is not found.
Arguments:
key: what attribute to fetch from nagios.cfg (example: "command_file" )
Returns:
String of the first value found for
Example:
>>> c = Config() # doctest: +SKIP
>>> log_file = c.get_cfg_value('log_file') # doctest: +SKIP
# Should return something like "/var/log/nagios/nagios.log"
"""
if not self.maincfg_values:
self.parse_maincfg()
for k, v in self.maincfg_values:
if k == key:
return v
return None
def get_object_types(self):
""" Returns a list of all discovered object types """
return map(lambda x: re.sub("all_", "", x), self.data.keys())
def cleanup(self):
""" Remove configuration files that have no configuration items """
for filename in self.cfg_files:
if not self.parse_file(filename): # parse_file returns empty list on empty files
self.remove(filename)
# If nagios.cfg specifies this file directly via cfg_file directive then...
for k, v in self.maincfg_values:
if k == 'cfg_file' and v == filename:
self._edit_static_file(k, old_value=v, new_value=None)
def __setitem__(self, key, item):
self.data[key] = item
def __getitem__(self, key):
return self.data[key]
class Livestatus(object):
""" Wrapper around MK-Livestatus
Example usage::
s = Livestatus()
for hostgroup s.get_hostgroups():
print(hostgroup['name'], hostgroup['num_hosts'])
"""
def __init__(self, livestatus_socket_path=None, nagios_cfg_file=None, authuser=None):
""" Initilize a new instance of Livestatus
Args:
livestatus_socket_path: Path to livestatus socket (if none specified,
use one specified in nagios.cfg)
nagios_cfg_file: Path to your nagios.cfg. If None then try to
auto-detect
authuser: If specified. Every data pulled is with the access rights
of that contact.
"""
self.nagios_cfg_file = nagios_cfg_file
self.error = None
if not livestatus_socket_path:
c = config(cfg_file=nagios_cfg_file)
c.parse_maincfg()
self.nagios_cfg_file = c.cfg_file
# Look for a broker_module line in the main config and parse its arguments
# One of the arguments is path to the file socket created
for k, v in c.maincfg_values:
if k == 'broker_module' and "livestatus.o" in v:
for arg in v.split()[1:]:
if arg.startswith('/') or '=' not in arg:
livestatus_socket_path = arg
break
else:
# If we get here, then we could not locate a broker_module argument
# that looked like a filename
msg = "No Livestatus socket defined. Make sure livestatus broker module is loaded."
raise ParserError(msg)
self.livestatus_socket_path = livestatus_socket_path
self.authuser = authuser
def test(self, raise_error=True):
""" Test if connection to livestatus socket is working
Args:
raise_error: If set to True, raise exception if test fails,otherwise return False
Raises:
ParserError if raise_error == True and connection fails
Returns:
True -- Connection is OK
False -- there are problems and raise_error==False
"""
try:
self.query("GET hosts")
except Exception:
t, e = sys.exc_info()[:2]
self.error = e
if raise_error:
raise ParserError("got '%s' when testing livestatus socket. error was: '%s'" % (type(e), e))
else:
return False
return True
def _get_socket(self):
""" Returns a socket.socket() instance to communicate with livestatus
Socket might be either unix filesocket or a tcp socket depenging in
the content of :py:attr:`livestatus_socket_path`
Returns:
Socket to livestatus instance (socket.socket)
Raises:
:py:class:`LivestatusNotConfiguredException` on failed connection.
:py:class:`ParserError` If could not parse configured TCP address
correctly.
"""
if not self.livestatus_socket_path:
msg = "We could not find path to MK livestatus socket file. Make sure MK livestatus is installed and configured"
raise LivestatusNotConfiguredException(msg)
try:
# If livestatus_socket_path contains a colon, then we assume that it is tcp socket instead of a local filesocket
if self.livestatus_socket_path.find(':') > 0:
address, tcp_port = self.livestatus_socket_path.split(':', 1)
if not tcp_port.isdigit():
msg = 'Could not parse host:port "%s". %s does not look like a valid port is not a valid tcp port.'
raise ParserError(msg % (self.livestatus_socket_path, tcp_port))
tcp_port = int(tcp_port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((address, tcp_port))
else:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.livestatus_socket_path)
return s
except IOError:
t, e = sys.exc_info()[:2]
msg = "%s while connecting to '%s'. Make sure nagios is running and mk_livestatus loaded."
raise ParserError(msg % (e, self.livestatus_socket_path))
def query(self, query, *args, **kwargs):
""" Performs LQL queries the livestatus socket
Queries are corrected and convienient default data are added to the
query before sending it to the socket.
Args:
query: Query to be passed to the livestatus socket (string)
args, kwargs: Additionnal parameters that will be sent to
:py:meth:`pynag.Utils.grep_to_livestatus`. The result will be
appended to the query.
Returns:
Answer from livestatus. It will be in python format unless specified
otherwise.
Raises:
:py:class:`ParserError` if problems connecting to livestatus.
"""
# columns parameter is here for backwards compatibility only
kwargs.pop('columns', None)
# We break query up into a list, of commands, then before sending command to the socket
# We will write it one line per item in the array
query = query.split('\n')
query += pynag.Utils.grep_to_livestatus(*args, **kwargs)
# If no response header was specified, we add fixed16
response_header = None
if not filter(lambda x: x.startswith('ResponseHeader:'), query):
query.append("ResponseHeader: fixed16")
response_header = "fixed16"
# If no specific outputformat is requested, we will return in python format
python_format = False
if not filter(lambda x: x.startswith('OutputFormat:'), query):
query.append("OutputFormat: python")
python_format = True
# There is a bug in livestatus where if requesting Stats, then no column headers are sent from livestatus
# In later version, the headers are sent, but the output is corrupted.
#
# We maintain consistency by clinging on to the old bug, and if there are Stats in the output
# we will not ask for column headers
doing_stats = len(filter(lambda x: x.startswith('Stats:'), query)) > 0
if not filter(lambda x: x.startswith('Stats:'), query) and not filter(
lambda x: x.startswith('ColumnHeaders: on'), query):
query.append("ColumnHeaders: on")
# Check if we need to add authuser to the query
if not filter(lambda x: x.startswith('AuthUser:'), query) and self.authuser not in (None, ''):
query.append("AuthUser: %s" % self.authuser)
# When we reach here, we are done adding options to the query, so we convert to the string that will
# be sent to the livestatus socket
query = '\n'.join(query) + '\n'
self.last_query = query
#
# Lets create a socket and see if we can write to it
#
s = self._get_socket()
try:
s.send(query)
except IOError:
msg = "Could not write to socket '%s'. Make sure you have the right permissions"
raise ParserError(msg % self.livestatus_socket_path)
s.shutdown(socket.SHUT_WR)
tmp = s.makefile()
# Read the response header from livestatus
if response_header == "fixed16":
response_data = tmp.readline()
if len(response_data) == 0:
return []
return_code = response_data.split()[0]
if not return_code.startswith('2'):
error_message = tmp.readline().strip()
raise ParserError("Error '%s' from livestatus: %s" % (return_code, error_message))
answer = tmp.read()
# We are done with the livestatus socket. lets close it
s.close()
if answer == '':
return []
# If something other than python format was requested, we return the answer as is
if python_format is False:
return answer
# If we reach down here, it means we are supposed to parse the output before returning it
try:
answer = eval(answer)
except Exception:
raise ParserError("Error, could not parse response from livestatus.\n%s" % answer)
# Workaround for livestatus bug, where column headers are not provided even if we asked for them
if doing_stats is True and len(answer) == 1:
return answer[0]
columns = answer.pop(0)
# Lets throw everything into a hashmap before we return
result = []
for line in answer:
tmp = {}
for i, column in enumerate(line):
column_name = columns[i]
tmp[column_name] = column
result.append(tmp)
return result
def get(self, table, *args, **kwargs):
""" Same as self.query('GET %s' % (table,))
Extra arguments will be appended to the query.
Args:
table: Table from which the data will be retrieved
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Example::
get('contacts', 'Columns: name alias')
Returns:
Answer from livestatus in python format.
"""
return self.query('GET %s' % (table,), *args, **kwargs)
def get_host(self, host_name):
""" Performs a GET query for a particular host
This performs::
'''GET hosts
Filter: host_name = %s''' % host_name
Args:
host_name: name of the host to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hosts', 'Filter: host_name = %s' % host_name)[0]
def get_service(self, host_name, service_description):
""" Performs a GET query for a particular service
This performs::
'''GET services
Filter: host_name = %s
Filter: service_description = %s''' % (host_name, service_description)
Args:
host_name: name of the host the target service is attached to.
service_description: Description of the service to obtain livestatus
data from.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET services', 'Filter: host_name = %s' % host_name,
'Filter: description = %s' % service_description)[0]
def get_hosts(self, *args, **kwargs):
""" Performs a GET query for all hosts
This performs::
'''GET hosts %s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hosts', *args, **kwargs)
def get_services(self, *args, **kwargs):
""" Performs a GET query for all services
This performs::
'''GET services
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET services', *args, **kwargs)
def get_hostgroups(self, *args, **kwargs):
""" Performs a GET query for all hostgroups
This performs::
'''GET hostgroups
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hostgroups', *args, **kwargs)
def get_servicegroups(self, *args, **kwargs):
""" Performs a GET query for all servicegroups
This performs::
'''GET servicegroups
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET servicegroups', *args, **kwargs)
def get_contactgroups(self, *args, **kwargs):
""" Performs a GET query for all contactgroups
This performs::
'''GET contactgroups
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contactgroups', *args, **kwargs)
def get_contacts(self, *args, **kwargs):
""" Performs a GET query for all contacts
This performs::
'''GET contacts
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contacts', *args, **kwargs)
def get_contact(self, contact_name):
""" Performs a GET query for a particular contact
This performs::
'''GET contacts
Filter: contact_name = %s''' % contact_name
Args:
contact_name: name of the contact to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contacts', 'Filter: contact_name = %s' % contact_name)[0]
def get_servicegroup(self, name):
""" Performs a GET query for a particular servicegroup
This performs::
'''GET servicegroups
Filter: servicegroup_name = %s''' % servicegroup_name
Args:
servicegroup_name: name of the servicegroup to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET servicegroups', 'Filter: name = %s' % name)[0]
def get_hostgroup(self, name):
""" Performs a GET query for a particular hostgroup
This performs::
'''GET hostgroups
Filter: hostgroup_name = %s''' % hostgroup_name
Args:
hostgroup_name: name of the hostgroup to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hostgroups', 'Filter: name = %s' % name)[0]
def get_contactgroup(self, name):
""" Performs a GET query for a particular contactgroup
This performs::
'''GET contactgroups
Filter: contactgroup_name = %s''' % contactgroup_name
Args:
contactgroup_name: name of the contactgroup to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contactgroups', 'Filter: name = %s' % name)[0]
class RetentionDat(object):
""" Easy way to parse the content of retention.dat
After calling parse() contents of retention.dat are kept in self.data
Example Usage::
r = retention()
r.parse()
print r
print r.data['info']
"""
def __init__(self, filename=None, cfg_file=None):
""" Initilize a new instance of retention.dat
Args (you only need to provide one of these):
filename: path to your retention.dat file
cfg_file: path to your nagios.cfg file, path to retention.dat will
be looked up in this file
"""
# If filename is not provided, lets try to discover it from
# nagios.cfg
if filename is None:
c = config(cfg_file=cfg_file)
for key, value in c._load_static_file():
if key == "state_retention_file":
filename = value
self.filename = filename
self.data = None
def parse(self):
""" Parses your status.dat file and stores in a dictionary under self.data
Returns:
None
Raises:
:py:class:`ParserError`: if problem arises while reading status.dat
:py:class:`ParserError`: if status.dat is not found
:py:class:`IOError`: if status.dat cannot be read
"""
self.data = {}
status = {} # Holds all attributes of a single item
key = None # if within definition, store everything before =
value = None # if within definition, store everything after =
if not self.filename:
raise ParserError("status.dat file not found")
lines = open(self.filename, 'rb').readlines()
for sequence_no, line in enumerate(lines):
line_num = sequence_no + 1
# Cleanup and line skips
line = line.strip()
if line == "":
pass
elif line[0] == "#" or line[0] == ';':
pass
elif line.find("{") != -1:
status = {}
status['meta'] = {}
status['meta']['type'] = line.split("{")[0].strip()
elif line.find("}") != -1:
# Status definition has finished, lets add it to
# self.data
if status['meta']['type'] not in self.data:
self.data[status['meta']['type']] = []
self.data[status['meta']['type']].append(status)
else:
tmp = line.split("=", 1)
if len(tmp) == 2:
(key, value) = line.split("=", 1)
status[key] = value
elif key == "long_plugin_output":
# special hack for long_output support. We get here if:
# * line does not contain {
# * line does not contain }
# * line does not contain =
# * last line parsed started with long_plugin_output=
status[key] += "\n" + line
else:
raise ParserError("Error on %s:%s: Could not parse line: %s" % (self.filename, line_num, line))
def __setitem__(self, key, item):
self.data[key] = item
def __getitem__(self, key):
return self.data[key]
def __str__(self):
if not self.data:
self.parse()
str_buffer = "# Generated by pynag"
for datatype, datalist in self.data.items():
for item in datalist:
str_buffer += "%s {\n" % datatype
for attr, value in item.items():
str_buffer += "%s=%s\n" % (attr, value)
str_buffer += "}\n"
return str_buffer
class StatusDat(RetentionDat):
""" Easy way to parse status.dat file from nagios
After calling parse() contents of status.dat are kept in status.data
Example usage::
>>> s = status()
>>> s.parse()
>>> keys = s.data.keys()
>>> 'info' in keys
True
>>> 'programstatus' in keys
True
>>> for service in s.data.get('servicestatus',[]):
... host_name=service.get('host_name', None)
... description=service.get('service_description',None)
"""
def __init__(self, filename=None, cfg_file=None):
""" Initilize a new instance of status
Args (you only need to provide one of these):
filename: path to your status.dat file
cfg_file: path to your nagios.cfg file, path to status.dat will be
looked up in this file
"""
# If filename is not provided, lets try to discover it from
# nagios.cfg
if filename is None:
c = config(cfg_file=cfg_file)
for key, value in c._load_static_file():
if key == "status_file":
filename = value
self.filename = filename
self.data = None
def get_contactstatus(self, contact_name):
""" Returns a dictionary derived from status.dat for one particular contact
Args:
contact_name: `contact_name` field of the contact's status.dat data
to parse and return as a dict.
Returns:
dict derived from status.dat for the contact.
Raises:
ValueError if object is not found
Example:
>>> s = status()
>>> s.get_contactstatus(contact_name='invalid_contact')
ValueError('invalid_contact',)
>>> first_contact = s.data['contactstatus'][0]['contact_name']
>>> s.get_contactstatus(first_contact)['contact_name'] == first_contact
True
"""
if self.data is None:
self.parse()
for i in self.data['contactstatus']:
if i.get('contact_name') == contact_name:
return i
return ValueError(contact_name)
def get_hoststatus(self, host_name):
""" Returns a dictionary derived from status.dat for one particular contact
Args:
host_name: `host_name` field of the host's status.dat data
to parse and return as a dict.
Returns:
dict derived from status.dat for the host.
Raises:
ValueError if object is not found
"""
if self.data is None:
self.parse()
for i in self.data['hoststatus']:
if i.get('host_name') == host_name:
return i
raise ValueError(host_name)
def get_servicestatus(self, host_name, service_description):
""" Returns a dictionary derived from status.dat for one particular service
Args:
service_name: `service_name` field of the host's status.dat data
to parse and return as a dict.
Returns:
dict derived from status.dat for the service.
Raises:
ValueError if object is not found
"""
if self.data is None:
self.parse()
for i in self.data['servicestatus']:
if i.get('host_name') == host_name:
if i.get('service_description') == service_description:
return i
raise ValueError(host_name, service_description)
class ObjectCache(Config):
""" Loads the configuration as it appears in objects.cache file """
def get_cfg_files(self):
for k, v in self.maincfg_values:
if k == 'object_cache_file':
return [v]
class ParserError(Exception):
""" ParserError is used for errors that the Parser has when parsing config.
Typical usecase when there is a critical error while trying to read configuration.
"""
filename = None
line_start = None
message = None
def __init__(self, message, item=None):
""" Creates an instance of ParserError
Args:
message: Message to be printed by the error
item: Pynag item who caused the error
"""
self.message = message
if item is None:
return
self.item = item
self.filename = item['meta']['filename']
self.line_start = item['meta'].get('line_start')
def __str__(self):
message = self.message
if self.filename and self.line_start:
message = '%s in %s, line %s' % (message, self.filename, self.line_start)
return repr(message)
class ConfigFileNotFound(ParserError):
""" This exception is thrown if we cannot locate any nagios.cfg-style config file. """
pass
class LivestatusNotConfiguredException(ParserError):
""" This exception is raised if we tried to autodiscover path to livestatus and failed """
class LogFiles(object):
""" Parses Logfiles defined in nagios.cfg and allows easy access to its content
Content is stored in python-friendly arrays of dicts. Output should be more
or less compatible with mk_livestatus log output
"""
def __init__(self, maincfg=None):
self.config = config(maincfg)
self.log_file = self.config.get_cfg_value('log_file')
self.log_archive_path = self.config.get_cfg_value('log_archive_path')
def get_log_entries(self, start_time=None, end_time=None, strict=True, search=None, **kwargs):
""" Get Parsed log entries for given timeperiod.
Args:
start_time: unix timestamp. if None, return all entries from today
end_time: If specified, only fetch log entries older than this (unix
timestamp)
strict: If True, only return entries between start_time and
end_time, if False, then return entries that belong to same log
files as given timeset
search: If provided, only return log entries that contain this
string (case insensitive)
kwargs: All extra arguments are provided as filter on the log
entries. f.e. host_name="localhost"
Returns:
List of dicts
"""
now = time.time()
if end_time is None:
end_time = now
if start_time is None:
if 'filename' in kwargs:
start_time = 1
else:
seconds_in_a_day = 60 * 60 * 24
seconds_today = end_time % seconds_in_a_day # midnight of today
start_time = end_time - seconds_today
start_time = int(start_time)
end_time = int(end_time)
logfiles = self.get_logfiles()
if 'filename' in kwargs:
logfiles = filter(lambda x: x == kwargs.get('filename'), logfiles)
# If start time was provided, skip all files that we last modified
# before start_time
if start_time:
logfiles = filter(lambda x: start_time <= os.stat(x).st_mtime, logfiles)
# Log entries are returned in ascending order, which is the opposite of
# what get_logfiles returns.
logfiles.reverse()
result = []
for log_file in logfiles:
entries = self._parse_log_file(filename=log_file)
if len(entries) == 0:
continue
first_entry = entries[0]
last_entry = entries[-1]
if first_entry['time'] > end_time:
continue
# If strict, filter entries to only include the ones in the timespan
if strict is True:
entries = [x for x in entries if x['time'] >= start_time and x['time'] <= end_time]
# If search string provided, filter the string
if search is not None:
entries = [x for x in entries if x['message'].lower().find(search.lower()) > -1]
for k, v in kwargs.items():
entries = [x for x in entries if x.get(k) == v]
result += entries
if start_time is None or int(start_time) >= int(first_entry.get('time')):
continue
# Now, logfiles should in MOST cases come sorted for us.
# However we rely on modification time of files and if it is off,
# We want to make sure log entries are coming in the correct order.
# The following sort should not impact performance in the typical use case.
result.sort(key=lambda x: x.get('time'))
return result
def get_logfiles(self):
""" Returns a list with the fullpath to every log file used by nagios.
Lists are sorted by modification times. Newest logfile is at the front
of the list so usually nagios.log comes first, followed by archivelogs
Returns:
List of strings
"""
logfiles = []
for filename in os.listdir(self.log_archive_path):
full_path = "%s/%s" % (self.log_archive_path, filename)
logfiles.append(full_path)
logfiles.append(self.log_file)
# Sort the logfiles by modification time, newest file at the front
compare_mtime = lambda a, b: os.stat(a).st_mtime < os.stat(b).st_mtime
logfiles.sort(key=lambda x: int(os.stat(x).st_mtime))
# Newest logfiles go to the front of the list
logfiles.reverse()
return logfiles
def get_flap_alerts(self, **kwargs):
""" Same as :py:meth:`get_log_entries`, except return timeperiod transitions.
Takes same parameters.
"""
return self.get_log_entries(class_name="timeperiod transition", **kwargs)
def get_notifications(self, **kwargs):
""" Same as :py:meth:`get_log_entries`, except return only notifications.
Takes same parameters.
"""
return self.get_log_entries(class_name="notification", **kwargs)
def get_state_history(self, start_time=None, end_time=None, host_name=None, strict=True, service_description=None):
""" Returns a list of dicts, with the state history of hosts and services.
Args:
start_time: unix timestamp. if None, return all entries from today
end_time: If specified, only fetch log entries older than this (unix
timestamp)
host_name: If provided, only return log entries that contain this
string (case insensitive)
service_description: If provided, only return log entries that contain this
string (case insensitive)
Returns:
List of dicts with state history of hosts and services
"""
log_entries = self.get_log_entries(start_time=start_time, end_time=end_time, strict=strict, class_name='alerts')
result = []
last_state = {}
now = time.time()
for line in log_entries:
if 'state' not in line:
continue
line['duration'] = now - int(line.get('time'))
if host_name is not None and host_name != line.get('host_name'):
continue
if service_description is not None and service_description != line.get('service_description'):
continue
if start_time is None:
start_time = int(line.get('time'))
short_name = "%s/%s" % (line['host_name'], line['service_description'])
if short_name in last_state:
last = last_state[short_name]
last['end_time'] = line['time']
last['duration'] = last['end_time'] - last['time']
line['previous_state'] = last['state']
last_state[short_name] = line
if strict is True:
if start_time is not None and int(start_time) > int(line.get('time')):
continue
if end_time is not None and int(end_time) < int(line.get('time')):
continue
result.append(line)
return result
def _parse_log_file(self, filename=None):
""" Parses one particular nagios logfile into arrays of dicts.
Args:
filename: Log file to be parsed. If is None, then log_file from
nagios.cfg is used.
Returns:
A list of dicts containing all data from the log file
"""
if filename is None:
filename = self.log_file
result = []
for line in open(filename).readlines():
parsed_entry = self._parse_log_line(line)
if parsed_entry != {}:
parsed_entry['filename'] = filename
result.append(parsed_entry)
return result
def _parse_log_line(self, line):
""" Parse one particular line in nagios logfile and return a dict.
Args:
line: Line of the log file to be parsed.
Returns:
dict containing the information from the log file line.
"""
host = None
service_description = None
state = None
check_attempt = None
plugin_output = None
contact = None
m = re.search('^\[(.*?)\] (.*?): (.*)', line)
if m is None:
return {}
line = line.strip()
timestamp, logtype, options = m.groups()
result = {}
try:
timestamp = int(timestamp)
except ValueError:
timestamp = 0
result['time'] = int(timestamp)
result['type'] = logtype
result['options'] = options
result['message'] = line
result['class'] = 0 # unknown
result['class_name'] = 'unclassified'
if logtype in ('CURRENT HOST STATE', 'CURRENT SERVICE STATE', 'SERVICE ALERT', 'HOST ALERT'):
result['class'] = 1
result['class_name'] = 'alerts'
if logtype.find('HOST') > -1:
# This matches host current state:
m = re.search('(.*?);(.*?);(.*);(.*?);(.*)', options)
if m is None:
return result
host, state, hard, check_attempt, plugin_output = m.groups()
service_description = None
if logtype.find('SERVICE') > -1:
m = re.search('(.*?);(.*?);(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
host, service_description, state, hard, check_attempt, plugin_output = m.groups()
result['host_name'] = host
result['service_description'] = service_description
result['state'] = int(pynag.Plugins.state[state])
result['check_attempt'] = check_attempt
result['plugin_output'] = plugin_output
result['text'] = plugin_output
elif "NOTIFICATION" in logtype:
result['class'] = 3
result['class_name'] = 'notification'
if logtype == 'SERVICE NOTIFICATION':
m = re.search('(.*?);(.*?);(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
contact, host, service_description, state, command, plugin_output = m.groups()
elif logtype == 'HOST NOTIFICATION':
m = re.search('(.*?);(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
contact, host, state, command, plugin_output = m.groups()
service_description = None
result['contact_name'] = contact
result['host_name'] = host
result['service_description'] = service_description
try:
result['state'] = int(pynag.Plugins.state[state])
except Exception:
result['state'] = -1
result['plugin_output'] = plugin_output
result['text'] = plugin_output
elif logtype == "EXTERNAL COMMAND":
result['class'] = 5
result['class_name'] = 'command'
m = re.search('(.*?);(.*)', options)
if m is None:
return result
command_name, text = m.groups()
result['command_name'] = command_name
result['text'] = text
elif logtype in ('PASSIVE SERVICE CHECK', 'PASSIVE HOST CHECK'):
result['class'] = 4
result['class_name'] = 'passive'
if logtype.find('HOST') > -1:
# This matches host current state:
m = re.search('(.*?);(.*?);(.*)', options)
if m is None:
return result
host, state, plugin_output = m.groups()
service_description = None
if logtype.find('SERVICE') > -1:
m = re.search('(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
host, service_description, state, plugin_output = m.groups()
result['host_name'] = host
result['service_description'] = service_description
result['state'] = state
result['plugin_output'] = plugin_output
result['text'] = plugin_output
elif logtype in ('SERVICE FLAPPING ALERT', 'HOST FLAPPING ALERT'):
result['class_name'] = 'flapping'
elif logtype == 'TIMEPERIOD TRANSITION':
result['class_name'] = 'timeperiod_transition'
elif logtype == 'Warning':
result['class_name'] = 'warning'
result['state'] = "1"
result['text'] = options
if 'text' not in result:
result['text'] = result['options']
result['log_class'] = result['class'] # since class is a python keyword
return result
class ExtraOptsParser(object):
""" Get Nagios Extra-Opts from a config file as specified by http://nagiosplugins.org/extra-opts
We could ALMOST use pythons ConfParser but nagios plugin team thought it would be a
good idea to support multiple values per key, so a dict datatype no longer works.
Its a shame because we have to make our own "ini" parser as a result
Usage::
# cat /etc/nagios/plugins.ini
[main]
host_name = localhost
[other section]
host_name = example.com
# EOF
e = ExtraOptsParser(section_name='main', config_file='/etc/nagios/plugins.ini')
e.get('host_name') # returns "localhost"
e.get_values() # Returns a dict of all the extra opts
e.getlist('host_name') # returns all values of host_name (if more than one were specified) in a list
"""
standard_locations = [
"/etc/nagios/plugins.ini",
"/usr/local/nagios/etc/plugins.ini",
"/usr/local/etc/nagios/plugins.ini",
"/etc/opt/nagios/plugins.ini",
"/etc/nagios-plugins.ini",
"/usr/local/etc/nagios-plugins.ini",
"/etc/opt/nagios-plugins.ini",
]
def __init__(self, section_name=None, config_file=None):
if not section_name:
section_name = self.get_default_section_name()
if not config_file:
config_file = self.get_default_config_file()
self.section_name = section_name
self.config_file = config_file
self._all_options = self.parse_file(filename=config_file) or {}
def get_values(self):
""" Returns a dict with all extra-options with the granted section_name and config_file
Results are in the form of::
{
'key': ["possible","values"]
}
"""
return self._all_options.get(self.section_name, {})
def get_default_section_name(self):
""" According to extra-opts standard, the default should be filename of check script being run """
return os.path.basename(sys.argv[0])
def get_default_config_file(self):
""" Return path to first readable extra-opt config-file found
According to the nagiosplugins extra-opts spec the search method is as follows:
1. Search for nagios.ini or nagios-plugins.ini in : splitted variable NAGIOS_CONFIG_PATH
2. Search in a predefined list of files
3. Return None if no config file is found
The method works as follows:
To quote the spec on NAGIOS_CONFIG_PATH:
*"To use a custom location, set a NAGIOS_CONFIG_PATH environment
variable to the set of directories that should be checked (this is a
colon-separated list just like PATH). The first plugins.ini or
nagios-plugins.ini file found in these directories will be used."*
"""
search_path = []
nagios_config_path = os.environ.get('NAGIOS_CONFIG_PATH', '')
for path in nagios_config_path.split(':'):
search_path.append(os.path.join(path, 'plugins.ini'))
search_path.append(os.path.join(path, 'nagios-plugins.ini'))
search_path += self.standard_locations
self.search_path = search_path
for path in search_path:
if os.path.isfile(path):
return path
return None
def get(self, option_name, default=_sentinel):
""" Return the value of one specific option
Args:
option_name: The value set to this option will be returned
Returns:
The value of `option_name`
Raises:
:py:class:`ValueError` when `option_name` cannot be found in options
"""
result = self.getlist(option_name, default)
# If option was not found, raise error
if result == _sentinel:
raise ValueError("Option named %s was not found" % (option_name))
elif result == default:
return result
elif not result:
# empty list
return result
else:
return result[0]
def getlist(self, option_name, default=_sentinel):
""" Return a list of all values for option_name
Args:
option_name: All the values set to this option will be returned
Returns:
List containing all the options set to `option_name`
Raises:
:py:class:`ValueError` when `option_name` cannot be found in options
"""
result = self.get_values().get(option_name, default)
if result == _sentinel:
raise ValueError("Option named %s was not found" % (option_name))
return result
def parse_file(self, filename):
""" Parses an ini-file and returns a dict of the ini values.
The datatype returned is a list of sections where each section is a
dict of values.
Args:
filename: Full path to the ini-file to be parsed.
Example the following the file::
[main]
name = this is a name
key = value
key = value2
Would return::
[
{'main':
{
'name': ['this is a name'],
'key': [value, value2]
}
},
]
"""
if filename is None:
return {}
f = open(filename)
try:
data = f.read()
return self.parse_string(data)
finally:
f.close()
def parse_string(self, string):
""" Parses a string that is supposed to be ini-style format.
See :py:meth:`parse_file` for more info
Args:
string: String to be parsed. Should be in ini-file format.
Returns:
Dictionnary containing all the sections of the ini-file and their
respective data.
Raises:
:py:class:`ParserError` when line does not follow the ini format.
"""
sections = {}
# When parsing inside a section, the name of it stored here.
section_name = None
current_section = pynag.Utils.defaultdict(dict)
for line_no, line, in enumerate(string.splitlines()):
line = line.strip()
# skip empty lines
if not line or line[0] in ('#', ';'):
continue
# Check if this is a new section
if line.startswith('[') and line.endswith(']'):
section_name = line.strip('[').strip(']').strip()
current_section = pynag.Utils.defaultdict(list)
sections[section_name] = current_section
continue
# All entries should have key=value format
if not '=' in line:
error = "Line %s should be in the form of key=value format (got '%s' instead)" % (line_no, line)
raise ParserError(error)
# If we reach here, we parse current line into key and a value section
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
sections[section_name][key].append(value)
return sections
class SshConfig(Config):
""" Parse object configuration files from remote host via ssh
Uses python-paramiko for ssh connections.
"""
def __init__(self, host, username, password=None, cfg_file=None):
""" Creates a SshConfig instance
Args:
host: Host to connect to
username: User to connect with
password: Password for `username`
cfg_file: Nagios main cfg file
"""
import paramiko
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(host, username=username, password=password)
self.ftp = self.ssh.open_sftp()
import cStringIO
c = cStringIO.StringIO()
self.tar = tarfile.open(mode='w', fileobj=c)
self.cached_stats = {}
super(SshConfig, self).__init__(cfg_file=cfg_file)
def open(self, filename, *args, **kwargs):
""" Behaves like file.open only, via ssh connection """
return self.tar.extractfile(filename)
tarinfo = self._get_file(filename)
string = tarinfo.tobuf()
print string
return StringIO.StringIO(string)
return self.tar.extractfile(tarinfo)
def add_to_tar(self, path):
"""
"""
print "Taring ", path
command = "find '{path}' -type f | tar -c -T - --to-stdout --absolute-names"
command = command.format(path=path)
print command
stdin, stdout, stderr = self.ssh.exec_command(command, bufsize=50000)
tar = tarfile.open(fileobj=stdout, mode='r|')
if not self.tar:
self.tar = tar
# return
else:
for i in tar:
self.tar.addfile(i)
def is_cached(self, filename):
if not self.tar:
return False
return filename in self.tar.getnames()
def _get_file(self, filename):
""" Download filename and return the TarInfo object """
if filename not in self.tar.getnames():
self.add_to_tar(filename)
return self.tar.getmember(filename)
def get_cfg_files(self):
cfg_files = []
for config_object, config_value in self.maincfg_values:
# Add cfg_file objects to cfg file list
if config_object == "cfg_file":
config_value = self.abspath(config_value)
if self.isfile(config_value):
cfg_files.append(config_value)
elif config_object == "cfg_dir":
absolut_path = self.abspath(config_value)
command = "find '%s' -type f -iname \*cfg" % (absolut_path)
stdin, stdout, stderr = self.ssh.exec_command(command)
raw_filelist = stdout.read().splitlines()
cfg_files += raw_filelist
else:
continue
if not self.is_cached(config_value):
self.add_to_tar(config_value)
return cfg_files
def isfile(self, path):
""" Behaves like os.path.isfile only, via ssh connection """
try:
copy = self._get_file(path)
return copy.isfile()
except IOError:
return False
def isdir(self, path):
""" Behaves like os.path.isdir only, via ssh connection """
try:
file_stat = self.stat(path)
return stat.S_ISDIR(file_stat.st_mode)
except IOError:
return False
def islink(self, path):
""" Behaves like os.path.islink only, via ssh connection """
try:
file_stat = self.stat(path)
return stat.S_ISLNK(file_stat.st_mode)
except IOError:
return False
def readlink(self, path):
""" Behaves like os.readlink only, via ssh connection """
return self.ftp.readlink(path)
def stat(self, *args, **kwargs):
""" Wrapper around os.stat only, via ssh connection """
path = args[0]
if not self.is_cached(path):
self.add_to_tar(path)
if path not in self.tar.getnames():
raise IOError("No such file or directory %s" % path)
member = self.tar.getmember(path)
member.st_mode = member.mode
member.st_mtime = member.mtime
return member
def access(self, *args, **kwargs):
""" Wrapper around os.access only, via ssh connection """
return os.access(*args, **kwargs)
def exists(self, path):
""" Wrapper around os.path.exists only, via ssh connection """
try:
self.ftp.stat(path)
return True
except IOError:
return False
def listdir(self, *args, **kwargs):
""" Wrapper around os.listdir but via ssh connection """
stats = self.ftp.listdir_attr(*args, **kwargs)
for i in stats:
self.cached_stats[args[0] + "/" + i.filename] = i
files = map(lambda x: x.filename, stats)
return files
class MultiSite(Livestatus):
""" Wrapps around multiple Livesatus instances and aggregates the results
of queries.
Example:
>>> m = MultiSite()
>>> m.add_backend(path='/var/spool/nagios/livestatus.socket', name='local')
>>> m.add_backend(path='127.0.0.1:5992', name='remote')
"""
def __init__(self, *args, **kwargs):
super(MultiSite, self).__init__(*args, **kwargs)
self.backends = {}
def add_backend(self, path, name):
""" Add a new livestatus backend to this instance.
Arguments:
path (str): Path to file socket or remote address
name (str): Friendly shortname for this backend
"""
backend = Livestatus(
livestatus_socket_path=path,
nagios_cfg_file=self.nagios_cfg_file,
authuser=self.authuser
)
self.backends[name] = backend
def get_backends(self):
""" Returns a list of mk_livestatus instances
Returns:
list. List of mk_livestatus instances
"""
return self.backends
def get_backend(self, backend_name):
""" Return one specific backend that has previously been added
"""
if not backend_name:
return self.backends.values()[0]
try:
return self.backends[backend_name]
except KeyError:
raise ParserError("No backend found with name='%s'" % backend_name)
def query(self, query, *args, **kwargs):
""" Behaves like mk_livestatus.query() except results are aggregated from multiple backends
Arguments:
backend (str): If specified, fetch only data from this backend (see add_backend())
*args: Passed directly to mk_livestatus.query()
**kwargs: Passed directly to mk_livestatus.query()
"""
result = []
backend = kwargs.pop('backend', None)
# Special hack, if 'Stats' argument was provided to livestatus
# We have to maintain compatibility with old versions of livestatus
# and return single list with all results instead of a list of dicts
doing_stats = any(map(lambda x: x.startswith('Stats:'), args + (query,)))
# Iterate though all backends and run the query
# TODO: Make this multithreaded
for name, backend_instance in self.backends.items():
# Skip if a specific backend was requested and this is not it
if backend and backend != name:
continue
query_result = backend_instance.query(query, *args, **kwargs)
if doing_stats:
result = self._merge_statistics(result, query_result)
else:
for row in query_result:
row['backend'] = name
result.append(row)
return result
def _merge_statistics(self, list1, list2):
""" Merges multiple livestatus results into one result
Arguments:
list1 (list): List of integers
list2 (list): List of integers
Returns:
list. Aggregated results of list1 + list2
Example:
>>> result1 = [1,1,1,1]
>>> result2 = [2,2,2,2]
>>> MultiSite()._merge_statistics(result1, result2)
[3, 3, 3, 3]
"""
if not list1:
return list2
if not list2:
return list1
number_of_columns = len(list1)
result = [0] * number_of_columns
for row in (list1, list2):
for i, column in enumerate(row):
result[i] += column
return result
def get_host(self, host_name, backend=None):
""" Same as Livestatus.get_host() """
backend = self.get_backend(backend)
return backend.get_host(host_name)
def get_service(self, host_name, service_description, backend=None):
""" Same as Livestatus.get_service() """
backend = self.get_backend(backend)
return backend.get_service(host_name, service_description)
def get_contact(self, contact_name, backend=None):
""" Same as Livestatus.get_contact() """
backend = self.get_backend(backend)
return backend.get_contact(contact_name)
def get_contactgroup(self, contactgroup_name, backend=None):
""" Same as Livestatus.get_contact() """
backend = self.get_backend(backend)
return backend.get_contactgroup(contactgroup_name)
def get_servicegroup(self, servicegroup_name, backend=None):
""" Same as Livestatus.get_servicegroup() """
backend = self.get_backend(backend)
return backend.get_servicegroup(servicegroup_name)
def get_hostgroup(self, hostgroup_name, backend=None):
""" Same as Livestatus.get_hostgroup() """
backend = self.get_backend(backend)
return backend.get_hostgroup(hostgroup_name)
class config(Config):
""" This class is here only for backwards compatibility. Use Config instead. """
class mk_livestatus(Livestatus):
""" This class is here only for backwards compatibility. Use Livestatus instead. """
class object_cache(ObjectCache):
""" This class is here only for backwards compatibility. Use ObjectCache instead. """
class status(StatusDat):
""" This class is here only for backwards compatibility. Use StatusDat instead. """
class retention(RetentionDat):
""" This class is here only for backwards compatibility. Use RetentionDat instead. """
if __name__ == '__main__':
import time
start = time.time()
ssh = SshConfig(host='status.adagios.org', username='palli')
ssh.ssh.get_transport().window_size = 3 * 1024 * 1024
ssh.ssh.get_transport().use_compression()
# ssh.add_to_tar('/etc/nagios')
# sys.exit()
# ssh.ssh.exec_command("/bin/ls")
print "before reset"
ssh.parse()
end = time.time()
print "duration=", end - start
bland = ssh.tar.getmember('/etc/nagios/okconfig/hosts/web-servers/bland.is-http.cfg')
print bland.tobuf()
sys.exit(0)
print "ssh up"
ssh_conn = FastTransport(('status.adagios.org', 22))
ssh_conn.connect(username='palli')
ftp = paramiko.SFTPClient.from_transport(ssh_conn)
print "connected" \
""
ssh.ssh = ssh_conn
ssh.ftp = ftp
print "starting parse"
print "done parsing"
|
kaji-project/pynag
|
pynag/Parsers/__init__.py
|
Python
|
gpl-2.0
| 129,457
| 0.001808
|
from datetime import datetime, timedelta, timezone
from django.shortcuts import render
from django.core.management import call_command
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from fly_project import settings, constants
from api.models import SavingsGoal, CreditGoal, FinalGoal
def count_days_between(dt1, dt2):
"""Function will return an integer of day numbers between two dates."""
dt1 = dt1.replace(hour=0, minute=0, second=0, microsecond=0)
dt2 = dt2.replace(hour=0, minute=0, second=0, microsecond=0)
return (dt2 - dt1).days
def count_days_between_today_and(dt2):
# Detect whether the unlocked time has elapsed and load the appropriate
# UI associated with this.
now = datetime.now(timezone.utc) # Standardize date to a specific time-zone
# Count how many days are left from today to the unlocked date.
return count_days_between(now,dt2)
@login_required(login_url='/authentication')
def mygoals_page(request):
return render(request, 'mygoals/type/view.html',{
'settings': settings,
})
@login_required(login_url='/authentication')
def savings_goals_page(request):
# Check to see if we have the latest SavingsGoal set, if not then
# create a new goal here.
savings_goal = SavingsGoal.objects.get_latest(request.user.id)
if not savings_goal:
savings_goal = SavingsGoal.objects.create(user_id=request.user.id,)
# Check to see if the current SavingsGoal has 'is_closed=True' which means
# we need to create a new savings goal.
if savings_goal.is_closed == True:
savings_goal = SavingsGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if savings_goal.unlocks:
days_remaining = count_days_between_today_and(savings_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/savings/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/savings/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'savings_goal': savings_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def credit_goals_page(request):
# Check to see if we have the latest CreditGoal set, if not then
# create a new goal here.
credit_goal = CreditGoal.objects.get_latest(request.user.id)
if not credit_goal:
credit_goal = CreditGoal.objects.create(user_id=request.user.id,)
# Check to see if the current SavingsGoal has 'is_closed=True' which means
# we need to create a new savings goal.
if credit_goal.is_closed == True:
credit_goal = CreditGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if credit_goal.unlocks:
days_remaining = count_days_between_today_and(credit_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/credit/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/credit/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'credit_goal': credit_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def final_goal_page(request):
# Check to see if we have the latest FinalGoal set, if not then
# create a new goal here.
final_goal = FinalGoal.objects.get_latest(request.user.id)
if not final_goal:
final_goal = FinalGoal.objects.create(user_id=request.user.id,)
# Check to see if the current FinalGoal has 'is_closed=True' which means
# we need to create a new final goal.
if final_goal.is_closed == True:
final_goal = FinalGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if final_goal.unlocks:
days_remaining = count_days_between_today_and(final_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/final/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/final/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'final_goal': final_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def goal_complete_page(request, goal_type, goal_id):
goal = None
try:
if goal_type == constants.SAVINGS_MYGOAL_TYPE:
goal = SavingsGoal.objects.get(id=goal_id)
elif goal_type == constants.CREDIT_MYGOAL_TYPE:
goal = CreditGoal.objects.get(id=goal_id)
elif goal_type == constants.GOAL_MYGOAL_TYPE:
goal = FinalGoal.objects.get(id=goal_id)
except Exception as e:
pass
return render(request, 'mygoals/complete/view.html',{
'settings': settings,
'constants': constants,
'goal_id': int(goal_id),
'goal_type': int(goal_type),
'goal': goal,
})
@login_required(login_url='/authentication')
def goal_failed_page(request, goal_type, goal_id):
goal = None
try:
if goal_type == constants.SAVINGS_MYGOAL_TYPE:
goal = SavingsGoal.objects.get(id=goal_id)
elif goal_type == constants.CREDIT_MYGOAL_TYPE:
goal = CreditGoal.objects.get(id=goal_id)
elif goal_type == constants.GOAL_MYGOAL_TYPE:
goal = FinalGoal.objects.get(id=goal_id)
except Exception as e:
pass
# Evaulate the User's profile
call_command('evaluate_me', str(request.me.id))
return render(request, 'mygoals/failed/view.html',{
'settings': settings,
'constants': constants,
'goal_id': int(goal_id),
'goal_type': int(goal_type),
'goal': goal,
})
|
evan-rusin/fly-project
|
mygoals/views.py
|
Python
|
bsd-2-clause
| 6,398
| 0.00297
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Callable, Tuple, TypeVar
T = TypeVar('T')
def _accept(random_sample: float, cost_diff: float, temp: float) -> Tuple[bool, float]:
"""Calculates probability and draws if solution should be accepted.
Based on exp(-Delta*E/T) formula.
Args:
random_sample: Uniformly distributed random number in the range [0, 1).
cost_diff: Cost difference between new and previous solutions.
temp: Current temperature.
Returns:
Tuple of boolean and float, with boolean equal to True if solution is
accepted, and False otherwise. The float value is acceptance
probability.
"""
exponent = -cost_diff / temp
if exponent >= 0.0:
return True, 1.0
probability = math.exp(exponent)
return probability > random_sample, probability
def anneal_minimize(
initial: T,
cost_func: Callable[[T], float],
move_func: Callable[[T], T],
random_sample: Callable[[], float],
temp_initial: float = 1.0e-2,
temp_final: float = 1e-6,
cooling_factor: float = 0.99,
repeat: int = 100,
trace_func: Callable[[T, float, float, float, bool], None] = None,
) -> T:
"""Minimize solution using Simulated Annealing meta-heuristic.
Args:
initial: Initial solution of type T to the problem.
cost_func: Callable which takes current solution of type T, evaluates it
and returns float with the cost estimate. The better solution is,
the lower resulting value should be; negative values are allowed.
move_func: Callable which takes current solution of type T and returns a
new solution candidate of type T which is random iteration over
input solution. The input solution, which is argument to this
callback should not be mutated.
random_sample: Callable which gives uniformly sampled random value from
the [0, 1) interval on each call.
temp_initial: Optional initial temperature for simulated annealing
optimization. Scale of this value is cost_func-dependent.
temp_final: Optional final temperature for simulated annealing
optimization, where search should be stopped. Scale of this value is
cost_func-dependent.
cooling_factor: Optional factor to be applied to the current temperature
and give the new temperature, this must be strictly greater than 0
and strictly lower than 1.
repeat: Optional number of iterations to perform at each given
temperature.
trace_func: Optional callback for tracing simulated annealing progress.
This is going to be called at each algorithm step for the arguments:
solution candidate (T), current temperature (float), candidate cost
(float), probability of accepting candidate (float), and acceptance
decision (boolean).
Returns:
The best solution found.
Raises:
ValueError: When supplied arguments are invalid.
"""
if not 0.0 < cooling_factor < 1.0:
raise ValueError("Cooling factor must be within (0, 1) range")
temp = temp_initial
sol = initial
sol_cost = cost_func(initial)
best = sol
best_cost = sol_cost
if trace_func:
trace_func(sol, temp, sol_cost, 1.0, True)
while temp > temp_final:
for _ in range(0, repeat):
# Find a new solution candidate and evaluate its cost.
cand = move_func(sol)
cand_cost = cost_func(cand)
# Store the best solution, regardless if it is accepted or not.
if best_cost > cand_cost:
best = cand
best_cost = cand_cost
accepted, probability = _accept(random_sample(), cand_cost - sol_cost, temp)
if accepted:
sol = cand
sol_cost = cand_cost
if trace_func:
trace_func(cand, temp, cand_cost, probability, accepted)
temp *= cooling_factor
return best
|
quantumlib/Cirq
|
cirq-google/cirq_google/line/placement/optimization.py
|
Python
|
apache-2.0
| 4,663
| 0.001501
|
# xVector Engine Client
# Copyright (c) 2011 James Buchwald
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Contains code for nicely reporting errors to the user.
"""
import logging
import traceback
from PyQt4 import QtGui
from xVClient import ClientGlobals
mainlog = logging.getLogger("")
# Severity constants
FatalError = 1
"""Fatal error, forces termination of application."""
NormalError = 2
"""Normal error, this has impact but does not crash the program."""
WarningError = 3
"""Warning, this does not affect function but should cause concern."""
NoticeError = 4
"""General information."""
def ShowError(message, severity=NormalError, parent=None):
"""
Displays an error message to the user and waits for a response.
"""
dlg = QtGui.QMessageBox(parent)
dlg.setText(message)
if severity == FatalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Fatal Error")
elif severity == NormalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Error")
elif severity == WarningError:
dlg.setIcon(QtGui.QMessageBox.Warning)
dlg.setWindowTitle("Warning")
elif severity == NoticeError:
dlg.setIcon(QtGui.QMessageBox.Information)
dlg.setWindowTitle("Notice")
else:
dlg.setIcon(QtGui.QMessageBox.NoIcon)
dlg.setWindowTitle("Message")
dlg.exec_()
def ShowException(severity=NormalError, start_msg='An error has occurred!', parent=None):
'''
Displays the currently-handled exception in an error box.
'''
msg = start_msg + "\n\n" + traceback.format_exc()
ShowError(msg, severity, parent)
class ErrorMessageHandler(logging.Handler):
'''
Logging handler that displays messages in Qt message boxes.
'''
def __init__(self, parent=None):
'''
Creates a new handler.
@type parent: QtGui.QWidget
@param parent: Parent widget for errors to be displayed under.
'''
super(ErrorMessageHandler,self).__init__()
self.Parent = parent
'''Parent widget for errors to be displayed under.'''
def _ShowError(self, message):
'''
Shows an error message and returns immediately.
@type message: string
@param message: Message to display.
'''
app = ClientGlobals.Application
wnd = QtGui.QMessageBox(parent=self.Parent)
wnd.setIcon(QtGui.QMessageBox.Critical)
wnd.setWindowTitle("Error")
wnd.setStandardButtons(QtGui.QMessageBox.Ok)
wnd.setText(message)
wnd.exec_()
def emit(self, record):
self._ShowError(record.getMessage())
def ConfigureLogging(parent=None):
'''
Configures the logging mechanism to report errors as dialog boxes.
@type parent: QtGui.QWidget
@param parent: Parent widget for errors to be displayed under.
'''
# Set up the error handler (output to a message box).
handler = ErrorMessageHandler(parent)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
handler.setLevel(logging.ERROR)
mainlog.addHandler(handler)
# Send lower-level messages to stderr.
lowhandler = logging.StreamHandler()
lowhandler.setFormatter(formatter)
lowhandler.setLevel(logging.DEBUG)
mainlog.addHandler(lowhandler)
# Make sure that the logger catches all levels of messages.
mainlog.setLevel(logging.DEBUG)
|
buchwj/xvector
|
client/xVClient/ErrorReporting.py
|
Python
|
gpl-3.0
| 4,145
| 0.002413
|
from bs4 import BeautifulSoup
import xlsxwriter
workbook= xlsxwriter.Workbook("data.xlsx")
worksheet = workbook.add_worksheet()
f = open('rough.html',"r")
data=f.read()
soup=BeautifulSoup(data)
div = soup.find('div', {"class":'dataTables_scroll'})
table=div.find('table')
tbody=div.find('tbody')
rows=tbody.find_all('tr')
rowno = 0
for row in rows:
a=row.find_all('a')
td=row.find_all('td')
worksheet.write(rowno, 1, a[2].text)
worksheet.write(rowno, 2, td[3].text[td[3].text.find('P:'):])
worksheet.write(rowno, 3, a[3].text)
worksheet.write(rowno, 4, a[4].text)
worksheet.write(rowno, 5, a[3].text)
worksheet.write(rowno, 6, td[6].text)
rowno=rowno+1
workbook.close()
print "Done"
|
melvin0008/pythoncodestrial
|
trybs4.py
|
Python
|
apache-2.0
| 697
| 0.030129
|
import os
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('datasets', parent_package, top_path)
config.add_data_dir('data')
config.add_data_dir('descr')
config.add_data_dir('images')
config.add_data_dir(os.path.join('tests', 'data'))
config.add_extension('_svmlight_format',
sources=['_svmlight_format.c'],
include_dirs=[numpy.get_include()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/datasets/setup.py
|
Python
|
mit
| 658
| 0
|
import json
from traceback import format_exception
from click.testing import CliRunner
import pytest
from qypi.__main__ import qypi
def show_result(r):
if r.exception is not None:
return "".join(format_exception(*r.exc_info))
else:
return r.output
def test_list(mocker):
spinstance = mocker.Mock(
**{
"list_packages.return_value": [
"foobar",
"BarFoo",
"quux",
"Gnusto-Cleesh",
"XYZZY_PLUGH",
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["list"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"foobar\n" "BarFoo\n" "quux\n" "Gnusto-Cleesh\n" "XYZZY_PLUGH\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [mocker.call.list_packages()]
def test_owner(mocker):
spinstance = mocker.Mock(
**{
"package_roles.return_value": [
["Owner", "luser"],
["Maintainer", "jsmith"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owner", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "foobar": [\n'
" {\n"
' "role": "Owner",\n'
' "user": "luser"\n'
" },\n"
" {\n"
' "role": "Maintainer",\n'
' "user": "jsmith"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [mocker.call.package_roles("foobar")]
def test_multiple_owner(mocker):
spinstance = mocker.Mock(
**{
"package_roles.side_effect": [
[
["Owner", "luser"],
["Maintainer", "jsmith"],
],
[
["Owner", "jsmith"],
["Owner", "froody"],
],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owner", "foobar", "Glarch"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "foobar": [\n'
" {\n"
' "role": "Owner",\n'
' "user": "luser"\n'
" },\n"
" {\n"
' "role": "Maintainer",\n'
' "user": "jsmith"\n'
" }\n"
" ],\n"
' "Glarch": [\n'
" {\n"
' "role": "Owner",\n'
' "user": "jsmith"\n'
" },\n"
" {\n"
' "role": "Owner",\n'
' "user": "froody"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.package_roles("foobar"),
mocker.call.package_roles("Glarch"),
]
def test_owned(mocker):
spinstance = mocker.Mock(
**{
"user_packages.return_value": [
["Owner", "foobar"],
["Maintainer", "quux"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owned", "luser"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "luser": [\n'
" {\n"
' "package": "foobar",\n'
' "role": "Owner"\n'
" },\n"
" {\n"
' "package": "quux",\n'
' "role": "Maintainer"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [mocker.call.user_packages("luser")]
def test_multiple_owned(mocker):
spinstance = mocker.Mock(
**{
"user_packages.side_effect": [
[
["Owner", "foobar"],
["Maintainer", "quux"],
],
[
["Maintainer", "foobar"],
["Owner", "Glarch"],
],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owned", "luser", "jsmith"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "luser": [\n'
" {\n"
' "package": "foobar",\n'
' "role": "Owner"\n'
" },\n"
" {\n"
' "package": "quux",\n'
' "role": "Maintainer"\n'
" }\n"
" ],\n"
' "jsmith": [\n'
" {\n"
' "package": "foobar",\n'
' "role": "Maintainer"\n'
" },\n"
" {\n"
' "package": "Glarch",\n'
' "role": "Owner"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.user_packages("luser"),
mocker.call.user_packages("jsmith"),
]
def test_search(mocker):
spinstance = mocker.Mock(
**{
"search.return_value": [
{
"name": "foobar",
"version": "1.2.3",
"summary": "Foo all your bars",
"_pypi_ordering": False,
},
{
"name": "quux",
"version": "0.1.0",
"summary": "Do that thing this does",
"_pypi_ordering": True,
},
{
"name": "gnusto",
"version": "0.0.0",
"summary": "",
"_pypi_ordering": False,
},
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["search", "term", "keyword:foo", "readme:bar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "name": "foobar",\n'
' "summary": "Foo all your bars",\n'
' "version": "1.2.3"\n'
" },\n"
" {\n"
' "name": "quux",\n'
' "summary": "Do that thing this does",\n'
' "version": "0.1.0"\n'
" },\n"
" {\n"
' "name": "gnusto",\n'
' "summary": null,\n'
' "version": "0.0.0"\n'
" }\n"
"]\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.search(
{"description": ["term", "bar"], "keywords": ["foo"]},
"and",
)
]
def test_browse(mocker):
spinstance = mocker.Mock(
**{
"browse.return_value": [
["foobar", "1.2.3"],
["foobar", "1.2.2"],
["foobar", "1.2.1"],
["foobar", "1.2.0"],
["quux", "0.1.0"],
["gnusto", "0.0.0"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(
qypi,
["browse", "Typing :: Typed", "Topic :: Utilities"],
)
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.3"\n'
" },\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.2"\n'
" },\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.1"\n'
" },\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.0"\n'
" },\n"
" {\n"
' "name": "quux",\n'
' "version": "0.1.0"\n'
" },\n"
" {\n"
' "name": "gnusto",\n'
' "version": "0.0.0"\n'
" }\n"
"]\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.browse(("Typing :: Typed", "Topic :: Utilities"))
]
def test_browse_packages(mocker):
spinstance = mocker.Mock(
**{
"browse.return_value": [
["foobar", "1.2.3"],
["foobar", "1.2.2"],
["foobar", "1.2.1"],
["foobar", "1.2.0"],
["quux", "0.1.0"],
["gnusto", "0.0.0"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(
qypi,
["browse", "--packages", "Typing :: Typed", "Topic :: Utilities"],
)
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.3"\n'
" },\n"
" {\n"
' "name": "quux",\n'
' "version": "0.1.0"\n'
" },\n"
" {\n"
' "name": "gnusto",\n'
' "version": "0.0.0"\n'
" }\n"
"]\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.browse(("Typing :: Typed", "Topic :: Utilities"))
]
@pytest.mark.usefixtures("mock_pypi_json")
def test_info():
r = CliRunner().invoke(qypi, ["info", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_explicit_latest_version():
r = CliRunner().invoke(qypi, ["info", "foobar==1.0.0"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_explicit_version():
r = CliRunner().invoke(qypi, ["info", "foobar==0.2.0"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "danielstewart@frye.com",\n'
' "name": "Sonya Johnson",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "maynardtim@hotmail.com",\n'
' "name": "Stephen Romero",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Wood",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2017-02-04T12:34:05.766270Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/0.2.0",\n'
' "summary": "Water audience cut call.",\n'
' "unknown_field": "passed through",\n'
' "url": "http://www.sanchez.net/index.htm",\n'
' "version": "0.2.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_description():
r = CliRunner().invoke(qypi, ["info", "--description", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "description": "foobar v1.0.0\\n\\nDream political close attorney sit cost inside. Seek hard can bad investment authority walk we. Sing range late use speech citizen.\\n\\nCan money issue claim onto really case. Fact garden along all book sister trip step.\\n\\nView table woman her production result. Fine allow prepare should traditional. Send cultural two care eye.\\n\\nGenerated with Faker",\n'
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_multiple_info():
r = CliRunner().invoke(qypi, ["info", "has-prerel", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "has_prerel",\n'
' "people": [\n'
" {\n"
' "email": "freed@hotmail.com",\n'
' "name": "Samantha Gilbert",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "estradakelly@hotmail.com",\n'
' "name": "Bradley Livingston",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Coleco",\n'
' "project_url": "https://dummy.nil/pypi/has_prerel",\n'
' "release_date": "1970-04-21T22:33:29.915221Z",\n'
' "release_url": "https://dummy.nil/pypi/has_prerel/1.0.0",\n'
' "summary": "Boy kid chance indeed resource explain.",\n'
' "unknown_field": "passed through",\n'
' "url": "http://www.johnson.com/author.jsp",\n'
' "version": "1.0.0"\n'
" },\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_nonexistent():
r = CliRunner().invoke(qypi, ["info", "does-not-exist", "foobar"])
assert r.exit_code == 1, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
"qypi: does-not-exist: package not found\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_nonexistent_split():
r = CliRunner(mix_stderr=False).invoke(qypi, ["info", "does-not-exist", "foobar"])
assert r.exit_code == 1, show_result(r)
assert r.stdout == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
assert r.stderr == "qypi: does-not-exist: package not found\n"
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_nonexistent_version():
r = CliRunner().invoke(qypi, ["info", "foobar==2.23.42"])
assert r.exit_code == 1, show_result(r)
assert r.output == ("[]\n" "qypi: foobar: version 2.23.42 not found\n")
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_nonexistent_version_split():
r = CliRunner(mix_stderr=False).invoke(qypi, ["info", "foobar==2.23.42"])
assert r.exit_code == 1, show_result(r)
assert r.stdout == "[]\n"
assert r.stderr == "qypi: foobar: version 2.23.42 not found\n"
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_nonexistent_explicit_version():
r = CliRunner().invoke(qypi, ["info", "does-not-exist==2.23.42"])
assert r.exit_code == 1, show_result(r)
assert r.output == ("[]\n" "qypi: does-not-exist: version 2.23.42 not found\n")
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_nonexistent_explicit_version_split():
r = CliRunner(mix_stderr=False).invoke(qypi, ["info", "does-not-exist==2.23.42"])
assert r.exit_code == 1, show_result(r)
assert r.stdout == "[]\n"
assert r.stderr == "qypi: does-not-exist: version 2.23.42 not found\n"
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_latest_is_prerelease():
r = CliRunner().invoke(qypi, ["info", "has-prerel"])
assert r.exit_code == 0, show_result(r)
data = json.loads(r.output)
assert data[0]["version"] == "1.0.0"
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_latest_is_prerelease_pre():
r = CliRunner().invoke(qypi, ["info", "--pre", "has-prerel"])
assert r.exit_code == 0, show_result(r)
data = json.loads(r.output)
assert data[0]["version"] == "1.0.1a1"
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_explicit_prerelease():
r = CliRunner().invoke(qypi, ["info", "has-prerel==1.0.1a1"])
assert r.exit_code == 0, show_result(r)
data = json.loads(r.output)
assert data[0]["version"] == "1.0.1a1"
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_all_are_prerelease():
r = CliRunner().invoke(qypi, ["info", "prerelease-only"])
assert r.exit_code == 0, show_result(r)
data = json.loads(r.output)
assert data[0]["version"] == "0.2a1"
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_nullfields():
r = CliRunner().invoke(qypi, ["info", "nullfields"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "nullfields",\n'
' "people": [\n'
" {\n"
' "email": "barbara10@yahoo.com",\n'
' "name": "Philip Gonzalez",\n'
' "role": "author"\n'
" }\n"
" ],\n"
' "platform": null,\n'
' "project_url": "https://dummy.nil/pypi/nullfields",\n'
' "release_date": "2007-10-08T07:21:06.191703Z",\n'
' "release_url": "https://dummy.nil/pypi/nullfields/1.0.0",\n'
' "summary": "Film station choose short.",\n'
' "unknown_field": null,\n'
' "url": "https://bryant.com/wp-content/search/author/",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_readme():
r = CliRunner().invoke(qypi, ["readme", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"foobar v1.0.0\n"
"\n"
"Dream political close attorney sit cost inside. Seek hard can bad investment authority walk we. Sing range late use speech citizen.\n"
"\n"
"Can money issue claim onto really case. Fact garden along all book sister trip step.\n"
"\n"
"View table woman her production result. Fine allow prepare should traditional. Send cultural two care eye.\n"
"\n"
"Generated with Faker\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_readme_explicit_version():
r = CliRunner().invoke(qypi, ["readme", "foobar==0.2.0"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"foobar v0.2.0\n"
"\n"
"Lead must laugh trouble expert else get million.\n"
"\n"
"Top shake walk. A cold national.\n"
"\n"
"Bring energy yourself suffer. Catch concern official relate voice base.\n"
"\n"
"Generated with Faker\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_files():
r = CliRunner().invoke(qypi, ["files", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "files": [\n'
" {\n"
' "comment_text": "",\n'
' "digests": {\n'
' "md5": "f92e8964922878760a07f783341a58ae",\n'
' "sha256": "84750bd98e3f61441e4b86ab443ebae41e65557e2b071b5a8e22a7d61a48a59d"\n'
" },\n"
' "filename": "foobar-1.0.0-py2.py3-none-any.whl",\n'
' "has_sig": true,\n'
' "md5_digest": "f92e8964922878760a07f783341a58ae",\n'
' "packagetype": "bdist_wheel",\n'
' "python_version": "py2.py3",\n'
' "size": 735,\n'
' "unknown_field": "passed through",\n'
' "upload_time": "2019-02-01T09:17:59",\n'
' "upload_time_iso_8601": "2019-02-01T09:17:59.172284Z",\n'
' "url": "https://files.dummyhosted.nil/packages/7f/97/e5ec19aed5d108c2f6c2fc6646d8247b1fadb49f0bf48e87a0fca8827696/foobar-1.0.0-py2.py3-none-any.whl"\n'
" }\n"
" ],\n"
' "name": "foobar",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_files_explicit_version():
r = CliRunner().invoke(qypi, ["files", "foobar==0.2.0"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "files": [\n'
" {\n"
' "comment_text": "",\n'
' "digests": {\n'
' "md5": "5ced02e62434eb5649276e6f12003009",\n'
' "sha256": "f0862078b4f1af49f6b8c91153e9a7df88807900f9cf1b24287a901e515c824e"\n'
" },\n"
' "filename": "foobar-0.2.0-py2.py3-none-any.whl",\n'
' "has_sig": false,\n'
' "md5_digest": "5ced02e62434eb5649276e6f12003009",\n'
' "packagetype": "bdist_wheel",\n'
' "python_version": "py2.py3",\n'
' "size": 752,\n'
' "unknown_field": "passed through",\n'
' "upload_time": "2017-02-04T12:34:05",\n'
' "upload_time_iso_8601": "2017-02-04T12:34:05.766270Z",\n'
' "url": "https://files.dummyhosted.nil/packages/54/40/36eccb727704b5dabfda040e0eb23c29dbe26cf1a78cbeb24f33deb26b22/foobar-0.2.0-py2.py3-none-any.whl"\n'
" }\n"
" ],\n"
' "name": "foobar",\n'
' "version": "0.2.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_releases():
r = CliRunner().invoke(qypi, ["releases", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "foobar": [\n'
" {\n"
' "is_prerelease": false,\n'
' "release_date": "2013-01-18T18:53:56.265173Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/0.1.0",\n'
' "version": "0.1.0"\n'
" },\n"
" {\n"
' "is_prerelease": false,\n'
' "release_date": "2017-02-04T12:34:05.766270Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/0.2.0",\n'
' "version": "0.2.0"\n'
" },\n"
" {\n"
' "is_prerelease": false,\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "version": "1.0.0"\n'
" }\n"
" ]\n"
"}\n"
)
# `qypi --index-url`
|
jwodder/qypi
|
test/test_main.py
|
Python
|
mit
| 30,735
| 0.000488
|
# -*- coding: utf-8 -*-
"""Doctest for method/function calls.
We're going the use these types for extra testing
>>> from UserList import UserList
>>> from UserDict import UserDict
We're defining four helper functions
>>> def e(a,b):
... print a, b
>>> def f(*a, **k):
... print a, test_support.sortdict(k)
>>> def g(x, *y, **z):
... print x, y, test_support.sortdict(z)
>>> def h(j=1, a=2, h=3):
... print j, a, h
Argument list examples
>>> f()
() {}
>>> f(1)
(1,) {}
>>> f(1, 2)
(1, 2) {}
>>> f(1, 2, 3)
(1, 2, 3) {}
>>> f(1, 2, 3, *(4, 5))
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *[4, 5])
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *UserList([4, 5]))
(1, 2, 3, 4, 5) {}
Here we add keyword arguments
>>> f(1, 2, 3, **{'a':4, 'b':5})
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7})
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9})
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
>>> f(1, 2, 3, **UserDict(a=4, b=5))
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *(4, 5), **UserDict(a=6, b=7))
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **UserDict(a=8, b=9))
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
Examples with invalid arguments (TypeErrors). We're also testing the function
names in the exception messages.
Verify clearing of SF bug #733667
>>> e(c=4)
Traceback (most recent call last):
...
TypeError: e() got an unexpected keyword argument 'c'
>>> g()
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*())
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*(), **{})
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(1)
1 () {}
>>> g(1, 2)
1 (2,) {}
>>> g(1, 2, 3)
1 (2, 3) {}
>>> g(1, 2, 3, *(4, 5))
1 (2, 3, 4, 5) {}
>>> class Nothing: pass
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing:
... def __len__(self): return 5
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing():
... def __len__(self): return 5
... def __getitem__(self, i):
... if i<3: return i
... else: raise IndexError(i)
...
>>> g(*Nothing())
0 (1, 2) {}
>>> class Nothing:
... def __init__(self): self.c = 0
... def __iter__(self): return self
... def next(self):
... if self.c == 4:
... raise StopIteration
... c = self.c
... self.c += 1
... return c
...
>>> g(*Nothing())
0 (1, 2, 3) {}
Check for issue #4806: Does a TypeError in a generator get propagated with the
right error message?
>>> def broken(): raise TypeError("myerror")
...
>>> g(*(broken() for i in range(1)))
Traceback (most recent call last):
...
TypeError: myerror
Make sure that the function doesn't stomp the dictionary
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = d.copy()
>>> g(1, d=4, **d)
1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> d == d2
True
What about willful misconduct?
>>> def saboteur(**kw):
... kw['x'] = 'm'
... return kw
>>> d = {}
>>> kw = saboteur(a=1, **d)
>>> d
{}
>>> g(1, 2, 3, **{'x': 4, 'y': 5})
Traceback (most recent call last):
...
TypeError: g() got multiple values for keyword argument 'x'
>>> f(**{1:2})
Traceback (most recent call last):
...
TypeError: f() keywords must be strings
>>> h(**{'e': 2})
Traceback (most recent call last):
...
TypeError: h() got an unexpected keyword argument 'e'
>>> h(*h)
Traceback (most recent call last):
...
TypeError: h() argument after * must be an iterable, not function
>>> dir(*h)
Traceback (most recent call last):
...
TypeError: dir() argument after * must be an iterable, not function
>>> None(*h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after * must be an iterable, \
not function
>>> h(**h)
Traceback (most recent call last):
...
TypeError: h() argument after ** must be a mapping, not function
>>> dir(**h)
Traceback (most recent call last):
...
TypeError: dir() argument after ** must be a mapping, not function
>>> None(**h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after ** must be a mapping, \
not function
>>> dir(b=1, **{'b': 1})
Traceback (most recent call last):
...
TypeError: dir() got multiple values for keyword argument 'b'
Another helper function
>>> def f2(*a, **b):
... return a, b
>>> d = {}
>>> for i in xrange(512):
... key = 'k%d' % i
... d[key] = i
>>> a, b = f2(1, *(2,3), **d)
>>> len(a), len(b), b == d
(3, 512, True)
>>> class Foo:
... def method(self, arg1, arg2):
... return arg1+arg2
>>> x = Foo()
>>> Foo.method(*(x, 1, 2))
3
>>> Foo.method(x, *(1, 2))
3
>>> Foo.method(*(1, 2, 3))
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
>>> Foo.method(1, *[2, 3])
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
A PyCFunction that takes only positional parameters should allow an
empty keyword dictionary to pass without a complaint, but raise a
TypeError if te dictionary is not empty
>>> try:
... silence = id(1, *{})
... True
... except:
... False
True
>>> id(1, **{'foo': 1})
Traceback (most recent call last):
...
TypeError: id() takes no keyword arguments
A corner case of keyword dictionary items being deleted during
the function call setup. See <http://bugs.python.org/issue2016>.
>>> class Name(str):
... def __eq__(self, other):
... try:
... del x[self]
... except KeyError:
... pass
... return str.__eq__(self, other)
... def __hash__(self):
... return str.__hash__(self)
>>> x = {Name("a"):1, Name("b"):2}
>>> def f(a, b):
... print a,b
>>> f(**x)
1 2
An obscure message:
>>> def f(a, b):
... pass
>>> f(b=1)
Traceback (most recent call last):
...
TypeError: f() takes exactly 2 arguments (1 given)
The number of arguments passed in includes keywords:
>>> def f(a):
... pass
>>> f(6, a=4, *(1, 2, 3))
Traceback (most recent call last):
...
TypeError: f() takes exactly 1 argument (5 given)
"""
import unittest
import sys
from test import test_support
class ExtCallTest(unittest.TestCase):
def test_unicode_keywords(self):
def f(a):
return a
self.assertEqual(f(**{u'a': 4}), 4)
self.assertRaises(TypeError, f, **{u'stören': 4})
self.assertRaises(TypeError, f, **{u'someLongString':2})
try:
f(a=4, **{u'a': 4})
except TypeError:
pass
else:
self.fail("duplicate arguments didn't raise")
def test_main():
test_support.run_doctest(sys.modules[__name__], True)
test_support.run_unittest(ExtCallTest)
if __name__ == '__main__':
test_main()
|
wang1352083/pythontool
|
python-2.7.12-lib/test/test_extcall.py
|
Python
|
mit
| 7,975
| 0.000251
|
import os
# Django settings for mysite project.
DEBUG = True
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
SITE_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
DATE_INPUT_FORMATS = ('%d/%m/%Y')
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '05=^qgbhg3!6-dzb6#&2j^jmh-2fgc%22!z_!w*&8iy_m$2*$*'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(SITE_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages'
],
'debug': DEBUG,
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'polls'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
sebnorth/extended_user
|
mysite/settings.py
|
Python
|
bsd-3-clause
| 5,917
| 0.001183
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from django.conf import settings
from openstack_dashboard.api.rest import nova
from openstack_dashboard.test import helpers as test
class NovaRestTestCase(test.TestCase):
#
# Keypairs
#
@mock.patch.object(nova.api, 'nova')
def test_keypair_get(self, nc):
request = self.mock_rest_request()
nc.keypair_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.Keypairs().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.keypair_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_keypair_create(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!"}''')
new = nc.keypair_create.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'sekrit'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "sekrit"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_create.assert_called_once_with(request, 'Ni!')
@mock.patch.object(nova.api, 'nova')
def test_keypair_import(self, nc):
request = self.mock_rest_request(body='''
{"name": "Ni!", "public_key": "hi"}
''')
new = nc.keypair_import.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "hi"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi')
#
# Availability Zones
#
def test_availzone_get_brief(self):
self._test_availzone_get(False)
def test_availzone_get_detailed(self):
self._test_availzone_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_availzone_get(self, detail, nc):
if detail:
request = self.mock_rest_request(GET={'detailed': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.availability_zone_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.AvailabilityZones().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.availability_zone_list.assert_called_once_with(request, detail)
#
# Limits
#
def test_limits_get_not_reserved(self):
self._test_limits_get(False)
def test_limits_get_reserved(self):
self._test_limits_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_limits_get(self, reserved, nc):
if reserved:
request = self.mock_rest_request(GET={'reserved': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.tenant_absolute_limits.return_value = {'id': 'one'}
response = nova.Limits().get(request)
self.assertStatusCode(response, 200)
nc.tenant_absolute_limits.assert_called_once_with(request, reserved)
self.assertEqual(response.content, '{"id": "one"}')
#
# Servers
#
@mock.patch.object(nova.api, 'nova')
def test_server_create_missing(self, nc):
request = self.mock_rest_request(body='''{"name": "hi"}''')
response = nova.Servers().post(request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content,
'"missing required parameter \'source_id\'"')
nc.server_create.assert_not_called()
@mock.patch.object(nova.api, 'nova')
def test_server_create_basic(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!",
"source_id": "image123", "flavor_id": "flavor123",
"key_name": "sekrit", "user_data": "base64 yes",
"security_groups": [{"name": "root"}]}
''')
new = nc.server_create.return_value
new.to_dict.return_value = {'id': 'server123'}
new.id = 'server123'
response = nova.Servers().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content, '{"id": "server123"}')
self.assertEqual(response['location'], '/api/nova/servers/server123')
nc.server_create.assert_called_once_with(
request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes',
[{'name': 'root'}]
)
@mock.patch.object(nova.api, 'nova')
def test_server_get_single(self, nc):
request = self.mock_rest_request()
nc.server_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Server().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
#
# Extensions
#
@mock.patch.object(nova.api, 'nova')
@mock.patch.object(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', ['baz'])
def _test_extension_list(self, nc):
request = self.mock_rest_request()
nc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'baz'}}),
]
response = nova.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"name": "foo"}, {"name": "bar"}]}')
nc.list_extensions.assert_called_once_with(request)
#
# Flavors
#
def test_get_extras_no(self):
self._test_flavor_get_single(get_extras=False)
def test_get_extras_yes(self):
self._test_flavor_get_single(get_extras=True)
def test_get_extras_default(self):
self._test_flavor_get_single(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_get_single(self, nc, get_extras):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request()
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content, '{"extras": {}, "name": "1"}')
else:
self.assertEqual(response.content, '{"name": "1"}')
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=get_extras)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_public(self, nc, is_public=None):
if is_public:
request = self.mock_rest_request(GET={'is_public': 'tRuE'})
elif is_public is None:
request = self.mock_rest_request(GET={})
else:
request = self.mock_rest_request(GET={'is_public': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=is_public,
get_extras=False)
def test_flavor_list_private(self):
self._test_flavor_list_public(is_public=False)
def test_flavor_list_public(self):
self._test_flavor_list_public(is_public=True)
def test_flavor_list_public_none(self):
self._test_flavor_list_public(is_public=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_extras(self, nc, get_extras=None):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request(GET={})
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content,
'{"items": [{"extras": {}, "id": "1"}, '
'{"extras": {}, "id": "2"}]}')
else:
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=None,
get_extras=get_extras)
def test_flavor_list_extras_no(self):
self._test_flavor_list_extras(get_extras=False)
def test_flavor_list_extras_yes(self):
self._test_flavor_list_extras(get_extras=True)
def test_flavor_list_extras_absent(self):
self._test_flavor_list_extras(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def test_flavor_extra_specs(self, nc):
request = self.mock_rest_request()
nc.flavor_get_extras.return_value.to_dict.return_value = {'foo': '1'}
response = nova.FlavorExtraSpecs().get(request, "1")
self.assertStatusCode(response, 200)
nc.flavor_get_extras.assert_called_once_with(request, "1", raw=True)
|
wangxiangyu/horizon
|
openstack_dashboard/test/api_tests/nova_rest_tests.py
|
Python
|
apache-2.0
| 11,121
| 0
|
#!/usr/bin/python3
def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return(time_string)
(mins, secs) = time_string.strip().split(splitter)
return(mins + '.' + secs)
def get_coach_data(filename):
try:
with open(filename) as fn:
data = fn.readline()
return(data.strip().split(','))
except IOError as ioerr:
print('File Error:' + str(ioerr))
return(None)
sarah = get_coach_data('sarah2.txt')
(sarah_name, sarach_dob) = sarah.pop(0), sarah.pop(0)
print(sarah_name + "'s fastest time are:"+
str(sorted(set([sanitize(t) for t in sarah]))[0:3]))
|
clovemfeng/studydemo
|
20140617/userlist_data.py
|
Python
|
gpl-2.0
| 657
| 0.024353
|
import os
def create_peanut(peanut_name):
peanut_dir = './peanuts/%s' % peanut_name
if os.path.exists(peanut_dir):
print('Peanut already exists')
return
os.mkdir(peanut_dir)
os.mkdir(peanut_dir + '/templates')
f = open(peanut_dir + '/__init__.py', 'w')
f.write('')
f.flush()
f.close()
f = open(peanut_dir + '/main.py', 'w')
f.write('\n__META__ = {\n')
f.write(" 'displayName': '%s',\n" % peanut_name)
f.write(" 'description': 'Peanut description',\n")
f.write(" 'version': '0.1',\n")
f.write(" 'enabled': True,\n")
f.write("}\n\n")
f.write('def load(peanut):\n')
f.write(" print('Loading peanut %s')\n" % peanut_name)
f.flush()
f.close()
def clean():
if os.path.exists('./.__tmp__') and os.path.isdir('./.__tmp__'):
import shutil
shutil.rmtree('./.__tmp__')
|
donkeysharp/elvispy
|
elvis/climanager.py
|
Python
|
mit
| 889
| 0.00225
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-03 14:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('presence', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='presence.Entity')),
],
),
]
|
RESTfactory/presence
|
presence/migrations/0002_session.py
|
Python
|
gpl-3.0
| 830
| 0.00241
|
# -*- coding: utf-8 -*-
"""
Name : multilayers
Author : Joan Juvert <trust.no.one.51@gmail.com>
Version : 1.0
Description : A class library to simulate light propagation in
: multilayer systems.
Copyright 2012 Joan Juvert
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
################################# ToDo ################################
#
# Some attributes that have a 'get' method could be decorated as
# properties in order to supress the parantheses in the method call.
#
# The reflection coefficients for TE and TM waves, rte and rtm, as well
# as their ratio -rtm/rte, could be expressed in terms of ellipsometric
# angles Psi and Delta (see Handbook of ellipsometry, Tompkins)
#
#######################################################################
import bphysics as bp
import numpy as np
import scipy.interpolate as interpolation
############################ Class definitions ########################
class Medium(object):
"""
The Medium class implements an object representing an optical
medium (basically its refractive index).
It contains the minimum and maximum wavelengths for which the
refractive index is known and a couple of interpolators to calculate
the refractive index and extintion coefficient at any wavelength in
the available range.
All the attributes are private and accessed through the provided
methods.
"""
def __init__(self, filename, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None):
"""
Initialize a Medium instance.
The refractive indices that characterize the medium are read
from a text file. After loading the table of refractive indices
an interpolator is built that allows to calculate the refractive
index at any wavelength within the available range.
Note that the table is actually read through the numpy.loadtxt
function. The loaded text file must have a column with the
wavelength values, another with the real part of the refractive
index, and another with its imaginary part. If there are other
columns in your file, or there are not in that order, the
'usecols' optional argument can be used to select which columns
to read.
Parameters
----------
filename : str
Path to the file containing the table of triplets
(wavelenght, n, k) that characterize the index of refraction
of the medium.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will
convert that column to a float. E.g., if column 0 is a date
string:``converters = {0: datestr2num}``. Converters can
also be used to provide a default value for missing data
(but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th
columns. The default, None, results in all columns being
read.
Returns
-------
out : Medium
A Medium instance.
See also
--------
numpy.loadtxt
"""
# Initialize variables
self.__maxWlength = None
self.__minWlength = None
self.__nInterpolator = None
self.__kInterpolator = None
# Load the table of refractive indices and generate the
# interpolators.
table = np.loadtxt(filename, 'float', comments, delimiter,
converters, skiprows, usecols)
wavelengths = table[:, 0]
refrIndex = table[:, 1]
extCoef = table[:, 2]
self.__maxWlength = wavelengths.max()
self.__minWlength = wavelengths.min()
self.__nInterpolator = interpolation.interp1d(
wavelengths, refrIndex, kind='cubic')
self.__kInterpolator = interpolation.interp1d(
wavelengths, extCoef, kind='cubic')
def getRefrIndex(self, wavelength):
"""
Returns the complex refractive index at the given wavelength.
Parameters
----------
wavelength : float
The wavelength at which we want to calculate the complex
refractive index. In the same units as in the file from
which the refractive indices were loaded.
Returns
-------
out : numpy.complex128
The complex refractive index.
"""
try:
return self.__nInterpolator(wavelength) + \
self.__kInterpolator(wavelength) * 1j
except ValueError:
print("Error: you are trying to work at a wavelength outside " + \
"the range where the refractive indices are known")
raise
def getMinMaxWlength(self):
"""
Returns a tuple (min, max) with the shortest and longest
wavelengths for which the refractive index is known.
Returns
-------
out : tuple
A tuple with the minimum and maximum wavelengths for which
the refractive index can be calculated. In the same units as
in the file from which the refractive indices were loaded.
"""
return (self.__minWlength, self.__maxWlength)
class Multilayer(object):
"""
The Multilayer class implements a layered optical medium in a
logical way. That allows to perform some complex calculations in an
understandable and flexible way.
All the attributes are private and accessed through the provided
methods. The structure is the following:
workingWavelength
minMaxWlength
polarization
charMatrixUpDown
charMatrixDownUp
coefficientsUpDown --> {'r', 't', 'R', 'T'}
coefficientsDownUp --> {'r', 't', 'R', 'T'}
stack --> [
top medium,
layer 1
.
.
.
layer N,
bottom medium ----> {
] 'medium', ------> Medium instance
'position',
'thickness',
'angle',
'matrix'
'refindex'
}
#There are properties that are common to the whole system:
- Wavelength of the light.
- Minimum and maximum wavelengths at which the refractive
indices can be calculated in all the layers.
- Polarization of the light.
- The characteristic matrix in the up-down direction of
propagation.
- The characteristic matrix in the down-up direction of
propagation.
- The optical coefficients (reflection coefficient, refraction
coefficient, reflectance and transmittance).
The stack is implemented as a list and contains parameters that
change in each layer. Each layer is a dictionary with the following
data:
- The medium (determines the refractive index). This is a
reference to a Medium instance.
- The position (z coordinate) of the layer.
- The thickness of the layer.
- The propagation angle of the light
- The characteristic matrix of the layer.
- The complex refractive index of the layer at the current
wavelength.
"""
def __init__(self, mediums):
"""
Generates a multilayer structure.
Note that a system with a layer of zero thickness is physically
the same as the system without that layer, and the results of
the simulations will be the same. However, bear in mind that you
cannot "grow" a nonexistent layer but you can "grow" an existing
zero thickness layer (i.e, change its thickness).
Initializing the multilayer does not involve providing any
information regarding the properties of the light propagating
across it. Before any calculation can be made you must enter the
wavelength, polarization and propagation angle of the light
using the appropiate methods.
Parameters
----------
mediums : list
A list containing the Medium instances that form the
multilayer system. The first element of the list corresponds
to the upper medium and the last one corresponds to the
bottom medium. At least two mediums must be given.
Each element of the list (except the first and the last) is
another list with two elements: the first is a reference to
a Medium instance, and the second is a scalar representing
the thickness of that layer. If only a Medium instance is
given instead of a list with two elements, then the
thickness will be considered zero. If the thickness is an
'int' it will be promoted to 'float'. The thickness must be
in the same units as the wavelength.
The first and last elements of "mediums" are just a
reference to the corresponding Medium instances. The
thickness is not necessary because they represent the top
and bottom mediums and the thickness will be considered
infinite.
Returns
-------
out : Multilayer
A multilayer instance.
Example
-------
If topmedium, layer1, layer2 and bottommedium are Medium
instances, then the following statement builds a system with
topmedium and bottommedium as top and bottom mediums
respectively, and two layers, one of layer1 10 units thick and
another of layer2 15 units thick. The thickness in the same
units as the wavelengths.
system = Multilayer([
topmedium,
[layer1, 10],
[layer2, 15],
bottommedium])
"""
# Properties of the light common to all the layers of the system
self.__workingWavelength = None
self.__polarization = None
self.__minMaxWlength = None
# List of the mediums conforming the multilayer system. Each
# element is a dictionary with the following elements keys:
# - medium: a reference to the corresponding Medium instance.
# - position: position of the lower interface of the layer.
# This is calculated automatically. The origin is at the
# boundary between the lower medium and the next layer.
# - thickness: thickness of the layer.
# - propangle: propagation angle of the light.
# - matrix: characteristic matrix of the layer.
self.__stack = []
# The following instance variables contain the characteristic
# matrices of the system (one for the up->down direction and
# another for the opposite) and a the coefficients of the system
# (also for both directions). The coefficients are stored in a
# dictionary with the following keys:
# - r: reflection coefficient
# - t: transmission coefficient
# - R: reflectivity
# - T: transmittivity
self.__charMatrixUpDown = None
self.__charMatrixDownUp = None
self.__coefficientsUpDown = {
'r': None, 't': None, 'R': None, 'T': None}
self.__coefficientsDownUp = {
'r': None, 't': None, 'R': None, 'T': None}
# Check that we get at least two mediums
try:
len(mediums)
except TypeError:
error = "Multilayer creation error: a list of mediums is expected"
print(error)
raise
if len(mediums) < 2:
error = "Multilayer creation error: at least two mediums must " + \
"be given"
print(error)
raise ValueError
# Start the creation of the multilayer
for (index, medium) in enumerate(mediums):
if (index == 0) or (index == len(mediums) - 1):
# First and last mediums.
# Check that we are given a Medium instance.
if not isinstance(medium, Medium):
error = "Multilayer creation error: element " + \
"%i is not a Medium instance" % index
print(error)
raise TypeError
self.__stack.append({
'medium': medium, 'position': None,
'thickness': np.infty, 'propangle': None,
'matrix': None, 'refindex': None})
else:
# Intermediate layers.
# If we have a Medium instance we consider the
# thickness to be zero. Otherwise we expect a list
# [medium, thickness]
if isinstance(medium, Medium):
self.__stack.append({
'medium': medium, 'position': None,
'thickness': 0.0, 'propangle': None,
'matrix': None, 'refindex': None})
elif isinstance(medium, list):
if len(medium) != 2:
error = "Multilayer creation error: " + \
"element %i must be either a " % index + \
"Medium instance or a list [Medium, thickness]"
print(error)
raise TypeError
if not isinstance(medium[0], Medium):
error = "Multilayer creation error: first " + \
"component of element %i must be " % index + \
"a Medium instance"
print(error)
raise TypeError
try:
thick = np.float(medium[1])
except TypeError:
error = "Multilayer creation error: element " + \
"%i, thickness must be a 'float' " % index + \
"or 'float'"
print(error)
raise
except ValueError:
error = "Multilayer creation error: element " + \
"%i thickness must be an 'float' " % index + \
"or 'float'"
print(error)
raise
if medium[1] < 0:
error = "Multilayer creation error: element " + \
"%i, thickness must be >= 0" % index
print(error)
raise ValueError
self.__stack.append({
'medium': medium[0], 'position': None,
'thickness': thick, 'propangle': None,
'matrix': None, 'refindex': None})
else:
error = "Multilayer creation error: element " + \
"%i must be either a Medium instance " % index + \
"or a list [Medium, thickness]"
print(error)
raise TypeError
# Calculate the positions of each layer
self.calcPositions()
# Make sure that there is a common range of wavelengths where
# the refractive index can be calculated.
# What we have to do is find de shortest and longest wavelength
# for each medium of the multilayer and then find de longest of
# the shortest and the shortest of the longest.
minimums = np.empty(self.numLayers())
maximums = np.empty(self.numLayers())
for index, layer in enumerate(self.__stack):
minimums[index] = layer['medium'].getMinMaxWlength()[0]
maximums[index] = layer['medium'].getMinMaxWlength()[1]
minimum = np.max(minimums)
maximum = np.min(maximums)
# Check that minimum is lower than maximum. Otherwise the
# intersection of the ranges of all the mediums is zero and
# therefore we do not have any refractive index common to all
# the layers
if minimum >= maximum:
error = "Fatal error: it is not possible to calculate any " + \
"refractive index common to all mediums"
print(error)
raise ValueError
self.__minMaxWlength = (minimum, maximum)
def calcPositions(self):
"""
This method calculates the positions of each layer along the
z axis (the direction perpendicular to the interfaces between
layers.
The position of a layer corresponds to the z coordinate of its
lower surface. The origin is located at the interface between
the last layer and the bottom medium. Therefore, the position of
the bottom medium is -infinity, the position of the last layer
is 0, the one above is at 0 + the thickness of the one below,
and so on.
This method does not return anything, it just sets the position
of each layer. It is automatically executed during instantiation
of a multilayer. The user typically does not need to call it.
"""
# We start setting the positions from below
reverseIndex = range(len(self.__stack))
reverseIndex.reverse()
self.__stack[reverseIndex[0]]['position'] = -np.infty
self.__stack[reverseIndex[1]]['position'] = 0.0
for layerIndex in reverseIndex[2:]:
self.__stack[layerIndex]['position'] = self.__stack[
layerIndex + 1]['position'] + \
self.getThickness(layerIndex + 1)
def getPosition(self, layerIndex):
"""
This method returns the position of the layer with index
'layerIndex'.
Parameters
----------
layerIndex : int
The index of the layer. Index 0 corresponds to the top
medium.
Returns
-------
out : float
The position of the layer, which corresponds to the z
coordinate of its lower surface.
"""
if layerIndex < 0:
error = "Negative index not accepted"
print(error)
raise IndexError
return self.__stack[layerIndex]['position']
def setThickness(self, thickness, layerIndex):
"""
This method changes the thickness of the layer with index
'layerIndex' to a new value.
Index 0 corresponds to the top medium. The positions of the
layers above the one being changed will be recalculated
accordingly.
Note that the thickness of the top and bottom mediums cannot be
changed because they are infinite.
The characteristic matrices of the system and the coefficients
will be reset to zero because they must be recalculated after
the thickness change. However, only the individual matrix of
the layer being modified will be reset to zero. The individual
matrices of all other layers remain the same.
Parameters
----------
thickness : float
The thickness of the layer. In the same units as the
wavelengths.
layerIndex : int
The index of the layer. Index 0 corresponds to the top
medium.
"""
# Change the thickness of the layer
if thickness < 0:
error = "Negative thickness not accepted"
print(error)
raise ValueError
if layerIndex < 0:
error = "Negative index not accepted"
print(error)
raise IndexError
if (layerIndex == 0) or (layerIndex == len(self.__stack) - 1):
error = "Error setting thickness: the thickness of the top " + \
"and bottom mediums cannot be changed"
print(error)
raise IndexError
self.__stack[layerIndex]['thickness'] = np.float(thickness)
# Recalculate the z coordinates of the layers and reset matrices
# and coefficients.
self.calcPositions()
self.__stack[layerIndex]['matrix'] = None
self.__charMatrixUpDown = None
self.__charMatrixDownUp = None
self.__coefficientsUpDown['r'] = None
self.__coefficientsUpDown['t'] = None
self.__coefficientsUpDown['R'] = None
self.__coefficientsUpDown['T'] = None
self.__coefficientsDownUp['r'] = None
self.__coefficientsDownUp['t'] = None
self.__coefficientsDownUp['R'] = None
self.__coefficientsDownUp['T'] = None
def getThickness(self, layerIndex):
"""
This method returns the thickness of the layer with index
'layerindex'. Index 0 corresponds to the top medium.
Parameters
----------
layerIndex : int
The index of the layer. Index 0 corresponds to the top
medium.
Returns
-------
out : float
The thickness of the layer. In the same units as the
wavelengths.
"""
if layerIndex < 0:
error = "Negative index not accepted"
print(error)
raise IndexError
return self.__stack[layerIndex]['thickness']
def getMinMaxWlength(self):
"""
This method returns a tuple (min, max) with the shortest and
longest wavelengths for which the refractive index can be
calculated in all the layers forming the multilayer system.
Returns
-------
out : tuple
A tuple containing the minimum and maximum wavelengths
within which the refractive index can be interpolated in all
the mediums of the multilayer. In the same units as in the
file from which the refractive indices were loaded.
"""
return self.__minMaxWlength
def numLayers(self):
"""
This method returns the number of layers of the multilayer
system including the top and bottom mediums.
Returns
-------
out : int
The number of layers of the multilayer system, including the
top and bottom mediums.
"""
return len(self.__stack)
def getIndexAtPos(self, z):
"""
Returns the index of the layer within which z lies.
Parameters
----------
z : float
A z coordinate. z = 0 is at the surface between the bottom
medium and the next layer. The position of a layer is the z
coordinate of its lower interface. The units are the same as
the thickness and wavelengths.
Returns
-------
out : int
The index of the layer within which z lies.
"""
# For each layer starting at the upper medium, check if the
# given z is larger or equal than the position of the layer. If
# it is, then z lies in the current layer, otherwise move to the
# next one.
for index in range(self.numLayers()):
if z >= self.getPosition(index):
return index
def setWlength(self, wavelength, rilist=None):
"""
This method sets the wavelength of the light going through the
multilayer system and sets the actual refractive index in each
layer.
Since changing the working wavelength also changes the
refractive index in effect, the propagation angles must be
recalculated. For that reason, this method will reset the
propagation angle in all the layers to 'None'. That will force
the user to execute again the setPropAngle() method.
Otherwise the calculation of the characteristic matrices will
rise an error.
Also, the characteristic matrices must be recalculated.
Therefore, they will also be reset to None along with the
coefficients.
Optionally, the refractive indices of the layers can be
passed explicitly with a list. This avoids calculating each
refractive index using the interpolator. Using this is
dangerous and discouraged. Use it only if you know what you are
doing.
Parameters
----------
wavelength : float
The wavelength of the light going across the system. In the
same units as in the file from which the refractive indices
were loaded.
rilist : list, optional
A list containing the refractive index of each layer at the
wavelength being set. The items must be ordered, the first
one corresponding to the top layer and the last one to the
bottom layer. Remember that the refractive indices are
complex numbers. If you pass a real number, it will be
converted to numpy.complex128. The use of this option is
discouraged. Use it only if you know what you are doing.
"""
# Only accept wavelengths within the available range
minimum, maximum = self.getMinMaxWlength()
if wavelength < minimum or wavelength > maximum:
error = "Error: Wavelength out of bounds"
print(error)
raise ValueError
self.__workingWavelength = np.float64(wavelength)
# Calculate the refractive indices of each layer and reset the
# variables that must be recalculated due to the change in the
# wavelength.
if rilist == None:
for index in range(self.numLayers()):
self.__stack[index]['propangle'] = None
self.__stack[index]['matrix'] = None
self.__stack[index]['refindex'] = \
self.__stack[index]['medium'].getRefrIndex(wavelength)
else:
for index in range(self.numLayers()):
self.__stack[index]['propangle'] = None
self.__stack[index]['matrix'] = None
try:
ri = rilist[index]
except:
error = "rilist must be an ordered sequence and have " + \
"as many items as layers in the system"
print(error)
raise TypeError
try:
ri = np.complex128(ri)
except:
error = "The refractive index must be a number"
print(error)
raise TypeError
self.__stack[index]['refindex'] = ri
self.__charMatrixUpDown = None
self.__charMatrixDownUp = None
self.__coefficientsUpDown['r'] = None
self.__coefficientsUpDown['t'] = None
self.__coefficientsUpDown['R'] = None
self.__coefficientsUpDown['T'] = None
self.__coefficientsDownUp['r'] = None
self.__coefficientsDownUp['t'] = None
self.__coefficientsDownUp['R'] = None
self.__coefficientsDownUp['T'] = None
def getWlength(self):
"""
This method returns the current wavelength of the light going
through the multilayer.
Returns
-------
out : float
The wavelength of the light going across the system. In the
same units as in the file from which the refractive indices
were loaded.
"""
return self.__workingWavelength
def setPolarization(self, polarization):
"""
Sets the polarization of the light going through the multilayer
system.
Since the characteristic matrices will change, they will be
reset to None in order to force the user to calculate them again
with the corresponding methods. The same goes for the
coefficients.
Parameters
----------
polarization : str
The polarization of the ligth going across the system. It
may be "te" or "tm", case insensitive.
"""
try:
polarization = polarization.upper()
if (polarization != 'TE') and (polarization != 'TM'):
raise ValueError
except ValueError:
error = "Error setting polarization: polarization must be " + \
"'te' or 'tm'"
print(error)
raise ValueError
except AttributeError:
error = "Error setting polarization: polarization must be " + \
"'te' or 'tm'"
print(error)
raise AttributeError
self.__polarization = polarization
# Reset the characteristic matrices and coefficients
for index in range(self.numLayers()):
self.__stack[index]['matrix'] = None
self.__charMatrixUpDown = None
self.__charMatrixDownUp = None
self.__coefficientsUpDown['r'] = None
self.__coefficientsUpDown['t'] = None
self.__coefficientsUpDown['R'] = None
self.__coefficientsUpDown['T'] = None
self.__coefficientsDownUp['r'] = None
self.__coefficientsDownUp['t'] = None
self.__coefficientsDownUp['R'] = None
self.__coefficientsDownUp['T'] = None
def getPolarization(self):
"""
Returns the polarization of the light going through the
multilayer system.
Returns
-------
out : str
The polarization of the ligth going across the system. It
may be "TE" or "TM".
"""
return self.__polarization
def setPropAngle(self, angle, index=0):
"""
Sets the propagation angle of light in the layer of given
index.
The propagation angle in all other layers is automatically
calculated using Snell's Law. The angle must be given in
radians.
If a list is given instead of a single angle, the angles will
be set to those found in the list following the natural order
(first angle to top medium, last angle to bottom medium). Use
of this feature is strongly discouraged. Use it only if you
know what you are doing.
Since the characteristic matrices and coefficients must be
recalculated, they will be reset to None to force the user to
execute again the relevant methods for its calculation.
Parameters
----------
angle : float or complex or list of floats or complexes
Propagation angle in radians. Use of a list instead of a
single angle is strongly discouraged. Use it only if you
know what you are doing.
index : int, optional
The index of the layer at which light propagates with the
given angle. If not specified, it will be assumed that the
angle corresponds to the propagation in the upper medium
(index = 0).
"""
# Do not accept a negative index.
if index < 0:
error = "Negative index not accepted"
print(error)
raise IndexError
if index >= self.numLayers():
error = "Layer %i does not exist" % index
print(error)
raise IndexError
# We want to work always with complex angles for when we have
# propagation beyond the critical angle.
angle = np.complex128(angle)
if self.getWlength() == None:
error = "Error setting propagation angle: a working " + \
"wavelength has not been set"
print(error)
raise ValueError
wavelength = self.getWlength()
if type(angle) == np.complex128:
# We set the angle in the layer specified in the argument. All
# other layers get the appropiate angle calculated using Snell's
# law
sine_i = np.sin(angle)
n_i = self.getRefrIndex(index)
for layerIndex in range(self.numLayers()):
if layerIndex == index:
self.__stack[layerIndex]['propangle'] = angle
else:
n_f = self.getRefrIndex(layerIndex)
if n_f == n_i:
self.__stack[layerIndex]['propangle'] = angle
else:
self.__stack[layerIndex]['propangle'] = np.arcsin(
n_i * sine_i / n_f)
else:
# In this case we have a list of angles. We copy them
# directly to the layer.
for layerIndex in range(self.numLayers()):
try:
self.__stack[layerIndex]['propangle'] = angle[layerIndex]
except:
error = "angle must be a number or a list of numbers " + \
"with as many items as layers in the system"
print(error)
raise TypeError
# Reset the characteristic matrices and the coefficients
for index in range(self.numLayers()):
self.__stack[index]['matrix'] = None
self.__charMatrixUpDown = None
self.__charMatrixDownUp = None
self.__coefficientsUpDown['r'] = None
self.__coefficientsUpDown['t'] = None
self.__coefficientsUpDown['R'] = None
self.__coefficientsUpDown['T'] = None
self.__coefficientsDownUp['r'] = None
self.__coefficientsDownUp['t'] = None
self.__coefficientsDownUp['R'] = None
self.__coefficientsDownUp['T'] = None
def getPropAngle(self, index):
"""
Returns the propagation angle of the light in the layer with the
given index.
Parameters
----------
index : int
The index of the layer. Index 0 corresponds to the upper
medium.
Returns
-------
out : complex
The propagation angle. It may be complex if there has been
total internal reflection in a lower interface.
"""
if index < 0:
error = "Negative index not accepted"
print(error)
raise IndexError
if index >= self.numLayers():
error = "Layer %i does not exist" % index
print(error)
raise IndexError
return self.__stack[index]['propangle']
def getRefrIndex(self, index):
"""
Returns the complex refractive index at the current wavelength
within the layer with the given index.
For example, multilayer.getRefrIndex(0) would return the complex
refractive index at 400 units of length (nm typically) in the
top medium.
Parameters
----------
index : int
The index of the layer. Index 0 corresponds to the upper
medium.
Returns
-------
out : complex128
The complex refractive index for the given wavelength at the
medium of given index.
"""
if index < 0:
error = "Negative index not accepted"
print(error)
raise IndexError
if index >= self.numLayers():
error = "Layer %i does not exist" % index
print(error)
raise IndexError
return self.__stack[index]['refindex']
def calcMatrices(self, layerIndexes=[]):
"""
This method calculates the characteristic matrix of the
specified layers.
Note that the top and bottom medium do not have characteristic
matrices. An error will be raised if you try to calculate their
characteristic matrices.
The matrix is stored in a numpy.ndarray variable. Note that this
method does not return anything, it just stores the calculated
matrices in the corresponding field of the multilayer.
Parameters
----------
layerIndexes : list, optional
A list of the indices of the layers whose characteristic
matrix should be calculated. If the list is empty, the
matrices of all the layers (except top and bottom mediums)
will be calculated. Same if the parameter is skipped.
See Also
--------
getMatrix
"""
if not isinstance(layerIndexes, list):
error = "Error: the argument of calcMatrices must be a list"
print(error)
raise ValueError
if len(layerIndexes) == 0:
# Calculate all the characteristic matrices
layerList = range(1, self.numLayers() - 1)
else:
# Calculate only the characteristic matrices of the given
# layers.
layerList = layerIndexes
# Perform here the actual calculation
for layerIndex in layerList:
if not isinstance(layerIndex, int):
error = "Error: the layer index must be an integer"
print(error)
raise ValueError
if (layerIndex == 0) or (layerIndex == self.numLayers() - 1):
error = "Error: the characteristic matrix of the top and " + \
"bottom mediums cannot be calculated"
print(error)
raise ValueError
if (layerIndex >= self.numLayers()) or (layerIndex < 0):
error = "Error: valid layer indices from %i to %i" % \
(1, self.numLayers() - 2)
print(error)
raise IndexError
lambda0 = self.getWlength()
if lambda0 == None:
error = "Error: the wavelength is not set"
print(error)
raise ValueError
angle = self.getPropAngle(layerIndex)
if angle == None:
error = "Error: the propagation angle is not set"
print(error)
raise ValueError
pol = self.getPolarization()
if pol == None:
error = "Error: the polarization is not set"
print(error)
raise ValueError
n = self.getRefrIndex(layerIndex)
cosineAngle = np.cos(angle)
d = self.getThickness(layerIndex)
if pol == 'TE':
p = n * cosineAngle
else:
p = cosineAngle / n
b = 2 * np.pi * n * d * cosineAngle / lambda0
m11 = np.cos(b)
m12 = -1j * np.sin(b) / p
m21 = -1j * p * np.sin(b)
m22 = m11
self.__stack[layerIndex]['matrix'] = \
np.matrix([[m11, m12], [m21, m22]])
def getMatrix(self, layerIndex):
"""
This method returns the characteristic matrix of a given layer
in the stack.
Parameters
----------
layerIndex : int
The index of the layer. Index 0 corresponds to the upper
medium.
Returns
-------
out : numpy.ndarray
The characteristic matrix of the layer with the given index.
"""
if (layerIndex >= self.numLayers()) or (layerIndex < 0):
error = "Error: valid layer indices from %i to %i" % \
(1, self.numLayers() - 2)
print(error)
raise IndexError
return self.__stack[layerIndex]['matrix']
def updateCharMatrix(self):
"""
This method calculates the characteristic matrix of the
multilayer in the up-down direction and the down-up direction.
Then the coefficients r, t, R and T (reflection coefficient,
transmission coefficient, reflectance and transmittance,
respectively) are calculated from the characteristic matrix.
Note that both the reflection and transmission coefficients
refer to the ratios between reflected (transmitted) ELECTRIC
fields to the incident ELECTRIC field regardless of the
polarization of the wave (TE or TM).
The coefficients are calculated for both possible directions of
propagation (top-down and down-top). In the former case, the top
medium is considered to be the input medium and the bottom
medium is considered to be the exit medium. The reverse holds
for the down-top direction.
Before executing this method, the calcMatrices method must
be invoked in order to calculate the characteristic matrices of
each individual layer.
Note that this method does not return anything, it just stores
the global characteristic matrix in the corresponding attribute
of the multilayer.
"""
# Calculation of the characteristic matrices
# Up-down direction
charMatrixUD = np.eye(2, 2)
for index in range(1, self.numLayers() - 1):
matrix = self.getMatrix(index)
if matrix == None:
error = "Error: the characteristic matrix cannot be " + \
"calculated because some of the individual " + \
"matrices has not been calculated"
print(error)
raise ValueError
charMatrixUD = charMatrixUD * matrix
self.__charMatrixUpDown = charMatrixUD
# Down-up direction
charMatrixDU = np.eye(2, 2)
for index in range(self.numLayers() - 2, 0, -1):
matrix = self.getMatrix(index)
if matrix == None:
error = "Error: the characteristic matrix cannot be " + \
"calculated because one of the individual " + \
"matrices has not been calculated"
print(error)
raise ValueError
charMatrixDU = charMatrixDU * matrix
self.__charMatrixDownUp = charMatrixDU
# Calculation of the coefficients
# Auxiliary variables
bottom_index = self.numLayers() - 1
n_top = self.getRefrIndex(0)
n_bottom = self.getRefrIndex(bottom_index)
cos_top = np.cos(self.getPropAngle(0))
cos_bottom = np.cos(self.getPropAngle(bottom_index))
# Up-down direction
# Determine the value of p according to the polarization
if self.getPolarization() == 'TE':
p_i = n_top * cos_top
p_l = n_bottom * cos_bottom
else:
p_i = cos_top / n_top
p_l = cos_bottom / n_bottom
# Calculate the coefficients
m11 = charMatrixUD[0, 0]
m12 = charMatrixUD[0, 1]
m21 = charMatrixUD[1, 0]
m22 = charMatrixUD[1, 1]
a = (m11 + m12 * p_l) * p_i
b = (m21 + m22 * p_l)
r = (a - b) / (a + b)
reflectivity = np.absolute(r) ** 2
# Attention: in the case of TM waves, the coefficients r and t
# refer to the ratio of the reflected (transmitted) MAGNETIC
# field to the incident MAGNETIC field. r is in fact equal to
# the ratio of the electric field amplitudes, but t must be
# modified to put it in terms of the electric field.
if self.getPolarization() == 'TE':
t = 2 * p_i / (a + b)
else:
t = (n_top / n_bottom) * 2 * p_i / (a + b)
p_i = n_top * cos_top
p_l = n_bottom * cos_bottom
# Note that, when p_l or p_i are complex (for instance because
# we are beyond the critical angle or because the medium has
# nonzero extintion coefficient) the transmittivity will be a
# complex number.
transmittivity = np.absolute(t) ** 2 * p_l / p_i
self.__coefficientsUpDown['r'] = r
self.__coefficientsUpDown['t'] = t
self.__coefficientsUpDown['R'] = reflectivity
self.__coefficientsUpDown['T'] = transmittivity
# Down-up direction
# Determine the value of p according to the polarization
if self.getPolarization() == 'TE':
p_i = n_bottom * cos_bottom
p_l = n_top * cos_top
else:
p_i = cos_bottom / n_bottom
p_l = cos_top / n_top
# Calculate the coefficients
m11 = charMatrixDU[0, 0]
m12 = charMatrixDU[0, 1]
m21 = charMatrixDU[1, 0]
m22 = charMatrixDU[1, 1]
a = (m11 + m12 * p_l) * p_i
b = (m21 + m22 * p_l)
r = (a - b) / (a + b)
reflectivity = np.absolute(r) ** 2
# Attention: in the case of TM waves, the coefficients r and t
# refer to the ratio of the reflected (transmitted) MAGNETIC
# field to the incident MAGNETIC field. r is in fact equal to
# the ratio of the electric field amplitudes, but t must be
# modified to put it in terms of the electric field.
if self.getPolarization() == 'TE':
t = 2 * p_i / (a + b)
else:
t = (n_bottom / n_top) * 2 * p_i / (a + b)
p_i = n_bottom * cos_bottom
p_l = n_top * cos_top
# Note that, when p_l or p_i are complex (for instance because
# we are beyond the critical angle or because the medium has
# nonzero extintion coefficient) the transmittivity will be a
# complex number.
transmittivity = np.absolute(t) ** 2 * p_l / p_i
self.__coefficientsDownUp['r'] = r
self.__coefficientsDownUp['t'] = t
self.__coefficientsDownUp['R'] = reflectivity
self.__coefficientsDownUp['T'] = transmittivity
def getCharMatrixUpDown(self):
"""
This method returns the characteristic matrix in the up-down
direction.
Returns
-------
out : numpy.ndarray
The characteristic matrix of the system in the top-down
direction of propagation.
"""
return self.__charMatrixUpDown
def getCharMatrixDownUp(self):
"""
This method returns the characteristic matrix in the down-up
direction
Returns
-------
out : numpy.ndarray
The characteristic matrix of the system in the down-top
direction of propagation.
"""
return self.__charMatrixDownUp
def getCoefficientsUpDown(self):
"""
This method returns a dictionary with the reflection and
transmission coefficients, the reflectance and the transmittance
of the multilayer system.in the up-down direction of
propagation.
Returns
-------
out : dictionary
A dictionary with the reflection and transmission
coefficients, the reflectance and the transmittance of the
multilayer system. The keys are {'r', 't', 'R', 'T'}.
"""
return self.__coefficientsUpDown
def getCoefficientsDownUp(self):
"""
This method returns a dictionary with the reflection and
transmission coefficients, the reflectance and the transmittance
of the multilayer system.in the down-up direction of
propagation.
Returns
-------
out : dictionary
A dictionary with the reflection and transmission
coefficients, the reflectance and the transmittance of the
multilayer system. The keys are {'r', 't', 'R', 'T'}.
"""
return self.__coefficientsDownUp
def calculateFx(self, z, wlength, angle, index=0):
"""
Calculates Fx(z; lambda, theta) of the multilayer.
The direction x is parallel to both the layer interfaces and the
plane of incidence of the light (or the direction of the
intersection between plane of incidence and interfaces). The
plane of incidence is always perpendicular to the interfaces.
The direction z is perpendicular to the interfaces.
The state of the multilayer will be changed according to the
parameters passed to the method and then F(z) will be
calculated.
Fx is defined only for TM waves. If the multilayer is currently
in TE, it will be changed to TM to perform the calculations.
Parameters
----------
z : float
The z coordinate of the emitting dipole.
wlength : float
The wavelength of the light across the multilayer In the
same units as in the file from which the refractive
indices were loaded.
angle : float
The propagation angle in radians
index : int
The index of the layer where we are fixing the propagation
angle.
Returns
-------
out : complex128
The value of Fx(z, lambda, angle)
"""
# Determine what has to be changed and wether or not to update
# the matrices
if self.getPolarization() != 'TM':
self.setPolarization('TM')
if wlength != self.getWlength():
self.setWlength(wlength)
self.setPropAngle(angle, index)
if self.getPropAngle(index) != angle:
self.setPropAngle(angle, index)
if self.getCharMatrixUpDown() == None:
self.calcMatrices()
self.updateCharMatrix()
# Calculate Fx(z)
# Find out in which layer the dipole is located
dipole_layer_index = self.getIndexAtPos(z)
# Calculate Fx according to the position of the dipole
if dipole_layer_index == 0:
# Fx(z) in case the dipole is in the top medium
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
n0 = self.getRefrIndex(0)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
r01 = self.getCoefficientsUpDown()['r']
# Calculate function
fx = 1 - r01 * np.exp(2 * eta0 * (z - z0) * 1j)
elif dipole_layer_index == self.numLayers() - 1:
# Fx(z) in case the dipole is in the bottom medium
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
thetaN = self.getPropAngle(dipole_layer_index)
n0 = self.getRefrIndex(0)
nN = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaN = 2 * np.pi * np.sqrt(nN ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
t1N = self.getCoefficientsUpDown()['t']
# Calculate function. We handle separately the case
# where theta0 is 0 to avoid a NaN result. Bear in
# mind that if we have a dipole oscilating along x
# there is no light propagating along x.
if theta0 == np.pi / 2:
fx = 1 + 0j
else:
fx = t1N * np.exp(eta0 * (z - z0) * 1j - etaN * z * 1j) * \
np.cos(thetaN) / np.cos(theta0)
else:
# Fx(z) in case the dipole is within any of the layers
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
thetaj = self.getPropAngle(dipole_layer_index)
n0 = self.getRefrIndex(0)
nj = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
zj = self.getPosition(dipole_layer_index)
zj1 = self.getPosition(dipole_layer_index - 1)
dj = self.getThickness(dipole_layer_index)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaj = 2 * np.pi * np.sqrt(nj ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients. We have to build some
# submultilayers first.
# Submultilayer from the top medium to dipole_layer_index.
rilist = [self.getRefrIndex(0)]
alist = [self.getPropAngle(0)]
layers = [self.__stack[0]['medium']]
for index in range(1, dipole_layer_index):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[dipole_layer_index]['medium'])
rilist.append(self.getRefrIndex(dipole_layer_index))
alist.append(self.getPropAngle(dipole_layer_index))
sub_above = Multilayer(layers)
sub_above.setWlength(wavelength, rilist)
sub_above.setPropAngle(alist)
sub_above.setPolarization('TM')
sub_above.calcMatrices()
sub_above.updateCharMatrix()
# Submultilayer from dipole_layer_index to the bottom
# medium.
rilist = [self.getRefrIndex(dipole_layer_index)]
alist = [self.getPropAngle(dipole_layer_index)]
layers = [self.__stack[dipole_layer_index]['medium']]
for index in range(dipole_layer_index + 1,
self.numLayers() - 1):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[self.numLayers() - 1]['medium'])
rilist.append(self.getRefrIndex(self.numLayers() - 1))
alist.append(self.getPropAngle(self.numLayers() - 1))
sub_below = Multilayer(layers)
sub_below.setWlength(wavelength, rilist)
sub_below.setPropAngle(alist)
sub_below.setPolarization(self.getPolarization())
sub_below.calcMatrices()
sub_below.updateCharMatrix()
# Now we can retreive the relevant coefficients
t1j = sub_above.getCoefficientsUpDown()['t']
rjjp1 = sub_below.getCoefficientsUpDown()['r']
rjjm1 = sub_above.getCoefficientsDownUp()['r']
# Calculate function. We handle separately the case
# where theta0 is 0 to avoid a NaN result. Bear in
# mind that if we have a dipole oscilating along x
# there is no light propagating along x.
if theta0 == np.pi / 2:
fx = 1 + 0j
else:
numerator = t1j * \
(1 - rjjp1 * np.exp(2 * etaj * (z - zj) * 1j))
denominator = \
1 - rjjp1 * rjjm1 * np.exp(2 * etaj * dj * 1j)
factor = np.exp(eta0 * (z - z0) * 1j - etaj * \
(z - zj1) * 1j) * np.cos(thetaj) / np.cos(theta0)
fx = numerator * factor / denominator
return np.complex128(fx)
def calculateFy(self, z, wlength, angle, index=0):
"""
Calculates Fy(z) of the multilayer.
The direction y is parallel to the layer interfaces and
perpendicular to the plane of incidence. The plane of incidence
is always perpendicular to the interfaces. The direction z is
perpendicular to the interfaces.
The state of the multilayer will be changed according to the
parameters passed to the method and then F(z) will be
calculated.
Fy is defined only for TE waves. If the multilayer is currently
in TM, it will be changed to TE to perform the calculations.
Parameters
----------
z : float
The z coordinate of the emitting dipole.
wlength : float
The wavelength of the light across the multilayer In the
same units as in the file from which the refractive indices
were loaded.
angle : float
The propagation angle in radians
index : int
The index of the layer where we are fixing the propagation
angle.
Returns
-------
out : complex128
The value of Fy(z, lambda, angle)
"""
# Determine what has to be changed and update matrices
if self.getPolarization() != 'TE':
self.setPolarization('TE')
if wlength != self.getWlength():
self.setWlength(wlength)
self.setPropAngle(angle, index)
if self.getPropAngle(index) != angle:
self.setPropAngle(angle, index)
if self.getCharMatrixUpDown() == None:
self.calcMatrices()
self.updateCharMatrix()
# Calculate Fy(z)
# Find out in which layer the dipole is located
dipole_layer_index = self.getIndexAtPos(z)
# Calculate Fy according to the position of the dipole
if dipole_layer_index == 0:
# Fy(z) in case the dipole is in the top medium
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
n0 = self.getRefrIndex(0)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
r01 = self.getCoefficientsUpDown()['r']
# Calculate function
fy = 1 + r01 * np.exp(2 * eta0 * (z - z0) * 1j)
elif dipole_layer_index == self.numLayers() - 1:
# Fy(z) in case the dipole is in the bottom
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
n0 = self.getRefrIndex(0)
nN = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaN = 2 * np.pi * np.sqrt(nN ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
t1N = self.getCoefficientsUpDown()['t']
# Calculate function
fy = t1N * np.exp(eta0 * (z - z0) * 1j - etaN * z * 1j)
else:
# Fy(z) in case the dipole is within any of the layers
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
n0 = self.getRefrIndex(0)
nj = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
zj = self.getPosition(dipole_layer_index)
zj1 = self.getPosition(dipole_layer_index - 1)
dj = self.getThickness(dipole_layer_index)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaj = 2 * np.pi * np.sqrt(nj ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients. We have to build some
# submultilayers first
# Submultilayer from the top medium to dipole_layer_index
rilist = [self.getRefrIndex(0)]
alist = [self.getPropAngle(0)]
layers = [self.__stack[0]['medium']]
for index in range(1, dipole_layer_index):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[dipole_layer_index]['medium'])
rilist.append(self.getRefrIndex(dipole_layer_index))
alist.append(self.getPropAngle(dipole_layer_index))
sub_above = Multilayer(layers)
sub_above.setWlength(wavelength, rilist)
sub_above.setPropAngle(alist)
sub_above.setPolarization(self.getPolarization())
sub_above.calcMatrices()
sub_above.updateCharMatrix()
# Submultilayer from dipole_layer_index to the bottom
# medium.
rilist = [self.getRefrIndex(dipole_layer_index)]
alist = [self.getPropAngle(dipole_layer_index)]
layers = [self.__stack[dipole_layer_index]['medium']]
for index in range(dipole_layer_index + 1,
self.numLayers() - 1):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[self.numLayers() - 1]['medium'])
rilist.append(self.getRefrIndex(self.numLayers() - 1))
alist.append(self.getPropAngle(self.numLayers() - 1))
sub_below = Multilayer(layers)
sub_below.setWlength(wavelength, rilist)
sub_below.setPropAngle(alist)
sub_below.setPolarization(self.getPolarization())
sub_below.calcMatrices()
sub_below.updateCharMatrix()
# Now we can retreive the relevant coefficients
t1j = sub_above.getCoefficientsUpDown()['t']
rjjp1 = sub_below.getCoefficientsUpDown()['r']
rjjm1 = sub_above.getCoefficientsDownUp()['r']
# Calculate function
numerator = t1j * \
(1 + rjjp1 * np.exp(2 * etaj * (z - zj) * 1j))
denominator = 1 - \
rjjp1 * rjjm1 * np.exp(2 * etaj * dj * 1j)
factor = np.exp(eta0 * (z - z0) * 1j - etaj * (z - zj1) * 1j)
fy = numerator * factor / denominator
return np.complex128(fy)
def calculateFz(self, z, wlength, angle, index=0):
"""
Calculates Fz(z) of the multilayer.
The direction z is perpendicular to the interfaces.
The state of the multilayer will be changed according to the
parameters passed to the method and then F(z) will be
calculated.
Fz is defined only for TM waves. If the multilayer is currently
in TE, it will be changed to TM to perform the calculations.
Parameters
----------
z : float
The z coordinate of the emitting dipole.
wlength : float
The wavelength of the light across the multilayer. In the
same units as in the file from which the refractive
indices were loaded.
angle : float
The propagation angle in radians.
index : int
The index of the layer where we are fixing the propagation
angle.
Returns
-------
out : complex128
The value of Fz(z, lambda, angle)
"""
# Calculate Fz(z)
# Determine what has to be changed and update matrices
if self.getPolarization() != 'TM':
self.setPolarization('TM')
if wlength != self.getWlength():
self.setWlength(wlength)
self.setPropAngle(angle, index)
if self.getPropAngle(index) != angle:
self.setPropAngle(angle, index)
if self.getCharMatrixUpDown() == None:
self.calcMatrices()
self.updateCharMatrix()
# Find out in which layer the dipole is located
dipole_layer_index = self.getIndexAtPos(z)
# Calculate Fz according to the position of the dipole
if dipole_layer_index == 0:
# Fz(z) in case the dipole is in the top medium
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
n0 = self.getRefrIndex(0)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
r01 = self.getCoefficientsUpDown()['r']
# Calculate function
fz = 1 + r01 * np.exp(2 * eta0 * (z - z0) * 1j)
elif dipole_layer_index == self.numLayers() - 1:
# Fz(z) in case the dipole is in the bottom medium
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
thetaN = self.getPropAngle(dipole_layer_index)
n0 = self.getRefrIndex(0)
nN = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaN = 2 * np.pi * np.sqrt(nN ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
t1N = self.getCoefficientsUpDown()['t']
# Calculate function. We handle separately the case
# where theta0 is one to avoid a NaN result. Bear in
# mind that if we have a dipole oscilating along z
# there is no light propagating along z.
if theta0 == 0:
fz = 1 + 0j
else:
fz = t1N * np.exp(eta0 * (z - z0) * 1j - etaN * z * 1j) * \
np.sin(thetaN) / np.sin(theta0)
else:
# Fz(z) in case the dipole is within any of the layers
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
thetaj = self.getPropAngle(dipole_layer_index)
n0 = self.getRefrIndex(0)
nj = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
zj = self.getPosition(dipole_layer_index)
zj1 = self.getPosition(dipole_layer_index - 1)
dj = self.getThickness(dipole_layer_index)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaj = 2 * np.pi * np.sqrt(nj ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients. We have to build some
# submultilayers first.
# Submultilayer from the top medium to dipole_layer_index.
rilist = [self.getRefrIndex(0)]
alist = [self.getPropAngle(0)]
layers = [self.__stack[0]['medium']]
for index in range(1, dipole_layer_index):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[dipole_layer_index]['medium'])
rilist.append(self.getRefrIndex(dipole_layer_index))
alist.append(self.getPropAngle(dipole_layer_index))
sub_above = Multilayer(layers)
sub_above.setWlength(wavelength, rilist)
sub_above.setPropAngle(alist)
sub_above.setPolarization(self.getPolarization())
sub_above.calcMatrices()
sub_above.updateCharMatrix()
# Submultilayer from the dipole_layer_index to the bottom
# medium.
rilist = [self.getRefrIndex(dipole_layer_index)]
alist = [self.getPropAngle(dipole_layer_index)]
layers = [self.__stack[dipole_layer_index]['medium']]
for index in range(dipole_layer_index + 1,
self.numLayers() - 1):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[self.numLayers() - 1]['medium'])
rilist.append(self.getRefrIndex(self.numLayers() - 1))
alist.append(self.getPropAngle(self.numLayers() - 1))
sub_below = Multilayer(layers)
sub_below.setWlength(wavelength, rilist)
sub_below.setPropAngle(alist)
sub_below.setPolarization(self.getPolarization())
sub_below.calcMatrices()
sub_below.updateCharMatrix()
# Now we can retreive the relevant coefficients
t1j = sub_above.getCoefficientsUpDown()['t']
rjjp1 = sub_below.getCoefficientsUpDown()['r']
rjjm1 = sub_above.getCoefficientsDownUp()['r']
# Calculate function. We handle separately the case
# where theta0 is one to avoid a NaN result. Bear in
# mind that if we have a dipole oscilating along z
# there is no light propagating along z.
if theta0 == 0:
fz = 1 + 0j
else:
numerator = t1j * \
(1 + rjjp1 * np.exp(2 * etaj * (z - zj) * 1j))
denominator = 1 - rjjp1 * rjjm1 * \
np.exp(2 * etaj * dj * 1j)
factor = np.exp(
eta0 * (z - z0) * 1j - etaj * (z - zj1) * 1j) * \
np.sin(thetaj) / np.sin(theta0)
fz = numerator * factor / denominator
return np.complex128(fz)
|
tortugueta/multilayers
|
multilayers.py
|
Python
|
gpl-3.0
| 70,824
| 0.000706
|
import sys
import operator
import collections
import random
import string
import heapq
# @include
def find_student_with_highest_best_of_three_scores(name_score_data):
student_scores = collections.defaultdict(list)
for line in name_score_data:
name, score = line.split()
if len(student_scores[name]) < 3:
heapq.heappush(student_scores[name], int(score))
else:
heapq.heappushpop(student_scores[name], int(score))
return max([(sum(scores), name) for name, scores in student_scores.items()
if len(scores) == 3],
key=operator.itemgetter(0),
default='no such student')[1]
# @exclude
def rand_string(length):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
def simple_test():
with open('scores.txt', 'w') as ofs:
ofs.write('''adnan 100
amit 99
adnan 98
thl 90
adnan 10
amit 100
thl 99
thl 95
dd 100
dd 100
adnan 95''')
with open('scores.txt') as name_score_data:
result = find_student_with_highest_best_of_three_scores(name_score_data)
print('result =', result)
assert result == 'adnan'
def main():
simple_test()
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 10000)
with open('scores.txt', 'w') as ofs:
for i in range(n):
test_num = random.randint(0, 20)
name = rand_string(random.randint(5, 10))
for _ in range(test_num):
print(name, random.randint(0, 100), file=ofs)
with open('scores.txt') as name_score_data:
name = find_student_with_highest_best_of_three_scores(name_score_data)
name_score_data.seek(0)
print('top student is', name)
if __name__ == '__main__':
main()
|
meisamhe/GPLshared
|
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/average_top_3_scores.py
|
Python
|
gpl-3.0
| 1,767
| 0.001132
|
from pathlib import Path
import os
import structlog
log = structlog.get_logger()
_config = None
def get():
global _config
if not isinstance(_config, _build_config):
_config = _build_config()
return _config
class _build_config:
def __init__(self):
self._config = {}
self.dos_install_dir = os.environ["DOS_BIN"]
self.dos_log_dir = os.environ["DOS_LOG"]
self.env_var_contexts = ["dos"]
# load from toml file
self._load_toml_config()
# load from env variables
self._load_env_vars()
def get(self, key, default=None):
return self._config.get(key, None)
def put(self, key, value, context="default"):
self.add_config_value(key, value, context=context)
def check(self, key):
return key in self._config
def add_config_value(self, key, value, context="default"):
ctx_key = f"{context}_{key}"
self._config[ctx_key] = value
log.debug("set config", context=context, key=key, ctx_key=ctx_key)
def add_path_value(self, key, value, context):
self.add_config_value(key, Path(value), context=context)
def _load_toml_config(self):
# potentially add env var contexts
log.debug("loading toml config", file_name="TODO <> TODO")
def _load_env_vars(self):
log.debug("loading environ config")
for key in os.environ:
parts = key.lower().split("_")
ctx = parts[0]
if ctx not in self.env_var_contexts:
continue
log.info(f"discovered environ config", key=key)
if len(parts) == 2:
self.add_config_value(
parts[1], # key
os.environ[key], # value from env
context=ctx, # give context
)
elif len(parts) == 3:
k = parts[2]
t = parts[1]
if t == "path":
self.add_path_value(k, os.environ[key], context=ctx)
else:
raise ValueError(f'unrecognized key type "{t}" for "{key}"')
else:
ValueError(
f"incorrect number of parts for env var: {key}, expected 2 or 3"
)
def dos_bin(self):
log.info(f"dev ops shell bin: {self.dos_install_dir}")
dos_bin = Path(self.dos_install_dir)
dos_bin.mkdir(parents=True, exist_ok=True)
return dos_bin
|
meantheory/dotfiles
|
dos/src/dos/config.py
|
Python
|
mit
| 2,504
| 0.000799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.