code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page
from telemetry import story
class ServiceWorkerBenchmarkPage(page.Page):
"""Page for workload to measure some specific functions in JS"""
def RunNavigateSteps(self, action_runner):
super(ServiceWorkerBenchmarkPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition('window.done')
class ServiceWorkerMicroBenchmarkPageSet(story.StorySet):
"""Page set for micro benchmarking of each functions with ServiceWorker"""
def __init__(self):
super(ServiceWorkerMicroBenchmarkPageSet, self).__init__(
archive_data_file='data/service_worker_micro_benchmark.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
# pylint: disable=line-too-long
# The latest code of localhost:8091 is from:
# https://github.com/horo-t/Service-Worker-Performance/tree/fix-flakyness
# (rev: 0cc35c2398526665399ca99fe53147ff81101408)
# TODO(falken): House the code in GoogleChrome's GitHub repository.
# pylint: enable=C0301
# Why: to measure performance of many concurrent fetches
self.AddStory(ServiceWorkerBenchmarkPage(
'http://localhost:8091/index.html', self,
make_javascript_deterministic=False))
|
axinging/chromium-crosswalk
|
tools/perf/page_sets/service_worker_micro_benchmark.py
|
Python
|
bsd-3-clause
| 1,389
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2314
try:
import unittest2 as unittest
except ImportError:
import unittest
class CertificationRequestTestCase(unittest.TestCase):
pem_text = """\
MIIDATCCAekCAQAwgZkxCzAJBgNVBAYTAlJVMRYwFAYDVQQIEw1Nb3Njb3cgUmVn
aW9uMQ8wDQYDVQQHEwZNb3Njb3cxGjAYBgNVBAoTEVNOTVAgTGFib3JhdG9yaWVz
MQwwCgYDVQQLFANSJkQxFTATBgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3
DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQC9n2NfGS98JDBmAXQn+vNUyPB3QPYC1cwpX8UMYh9MdAmBZJCnvXrQ
Pp14gNAv6AQKxefmGES1b+Yd+1we9HB8AKm1/8xvRDUjAvy4iO0sqFCPvIfSujUy
pBcfnR7QE2itvyrMxCDSEVnMhKdCNb23L2TptUmpvLcb8wfAMLFsSu2yaOtJysep
oH/mvGqlRv2ti2+E2YA0M7Pf83wyV1XmuEsc9tQ225rprDk2uyshUglkDD2235rf
0QyONq3Aw3BMrO9ss1qj7vdDhVHVsxHnTVbEgrxEWkq2GkVKh9QReMZ2AKxe40j4
og+OjKXguOCggCZHJyXKxccwqCaeCztbAgMBAAGgIjAgBgkqhkiG9w0BCQIxExMR
U05NUCBMYWJvcmF0b3JpZXMwDQYJKoZIhvcNAQEFBQADggEBAAihbwmN9M2bsNNm
9KfxqiGMqqcGCtzIlpDz/2NVwY93cEZsbz3Qscc0QpknRmyTSoDwIG+1nUH0vzkT
Nv8sBmp9I1GdhGg52DIaWwL4t9O5WUHgfHSJpPxZ/zMP2qIsdPJ+8o19BbXRlufc
73c03H1piGeb9VcePIaulSHI622xukI6f4Sis49vkDaoi+jadbEEb6TYkJQ3AMRD
WdApGGm0BePdLqboW1Yv70WRRFFD8sxeT7Yw4qrJojdnq0xMHPGfKpf6dJsqWkHk
b5DRbjil1Zt9pJuF680S9wtBzSi0hsMHXR9TzS7HpMjykL2nmCVY6A78MZapsCzn
GGbx7DI=
"""
def setUp(self):
self.asn1Spec = rfc2314.CertificationRequest()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert der_encoder.encode(asn1Object) == substrate
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
catapult-project/catapult
|
third_party/gsutil/third_party/pyasn1-modules/tests/test_rfc2314.py
|
Python
|
bsd-3-clause
| 2,078
|
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import (
PeriodArray,
period_array,
)
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]),
([pd.Period("2017", "D"), None], None, [17167, iNaT]),
(pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]),
(pd.date_range("2017", periods=3), None, [17167, 17168, 17169]),
(pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]),
],
)
def test_period_array_ok(data, freq, expected):
result = period_array(data, freq=freq).asi8
expected = np.asarray(expected, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_period_array_readonly_object():
# https://github.com/pandas-dev/pandas/issues/25403
pa = period_array([pd.Period("2019-01-01")])
arr = np.asarray(pa, dtype="object")
arr.setflags(write=False)
result = period_array(arr)
tm.assert_period_array_equal(result, pa)
result = pd.Series(arr)
tm.assert_series_equal(result, pd.Series(pa))
result = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
def test_from_datetime64_freq_changes():
# https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
result = PeriodArray._from_datetime64(arr, freq="M")
expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M")
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize(
"data, freq, msg",
[
(
[pd.Period("2017", "D"), pd.Period("2017", "A")],
None,
"Input has different freq",
),
([pd.Period("2017", "D")], "A", "Input has different freq"),
],
)
def test_period_array_raises(data, freq, msg):
with pytest.raises(IncompatibleFrequency, match=msg):
period_array(data, freq)
def test_period_array_non_period_series_raies():
ser = pd.Series([1, 2, 3])
with pytest.raises(TypeError, match="dtype"):
PeriodArray(ser, freq="D")
def test_period_array_freq_mismatch():
arr = period_array(["2000", "2001"], freq="D")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq="M")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq=pd.tseries.offsets.MonthEnd())
def test_from_sequence_disallows_i8():
arr = period_array(["2000", "2001"], freq="D")
msg = str(arr[0].ordinal)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(arr.asi8, dtype=arr.dtype)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype)
|
rs2/pandas
|
pandas/tests/arrays/period/test_constructors.py
|
Python
|
bsd-3-clause
| 3,116
|
# Do not edit this file, pipeline versioning is governed by git tags
__version__=0.0.0
|
ecolell/aquire
|
version.py
|
Python
|
mit
| 86
|
# Copyright (C) 2009 Red Hat, Inc., Joey Boggs <jboggs@redhat.com>
# Copyright (C) 2012 Rackspace US, Inc.,
# Justin Shepherd <jshepher@rackspace.com>
# Copyright (C) 2013 Red Hat, Inc., Flavio Percoco <fpercoco@redhat.com>
# Copyright (C) 2013 Red Hat, Inc., Jeremy Agee <jagee@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class OpenStackCinder(Plugin):
"""OpenStack cinder
"""
plugin_name = "openstack_cinder"
profiles = ('openstack', 'openstack_controller')
option_list = [("db", "gathers openstack cinder db version", "slow",
False)]
def setup(self):
if self.get_option("db"):
self.add_cmd_output(
"cinder-manage db version",
suggest_filename="cinder_db_version")
self.add_copy_spec(["/etc/cinder/"])
self.limit = self.get_option("log_size")
if self.get_option("all_logs"):
self.add_copy_spec_limit("/var/log/cinder/",
sizelimit=self.limit)
else:
self.add_copy_spec_limit("/var/log/cinder/*.log",
sizelimit=self.limit)
def postproc(self):
protect_keys = [
"admin_password", "backup_tsm_password", "chap_password",
"nas_password", "cisco_fc_fabric_password", "coraid_password",
"eqlx_chap_password", "fc_fabric_password",
"hitachi_auth_password", "hitachi_horcm_password",
"hp3par_password", "hplefthand_password", "memcache_secret_key",
"netapp_password", "netapp_sa_password", "nexenta_password",
"password", "qpid_password", "rabbit_password", "san_password",
"ssl_key_password", "vmware_host_password", "zadara_password",
"zfssa_initiator_password", "connection", "zfssa_target_password",
"os_privileged_user_password", "hmac_keys"
]
regexp = r"((?m)^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys)
self.do_path_regex_sub("/etc/cinder/*", regexp, r"\1*********")
class DebianCinder(OpenStackCinder, DebianPlugin, UbuntuPlugin):
cinder = False
packages = (
'cinder-api',
'cinder-backup',
'cinder-common',
'cinder-scheduler',
'cinder-volume',
'python-cinder',
'python-cinderclient'
)
def check_enabled(self):
self.cinder = self.is_installed("cinder-common")
return self.cinder
def setup(self):
super(DebianCinder, self).setup()
class RedHatCinder(OpenStackCinder, RedHatPlugin):
cinder = False
packages = ('openstack-cinder',
'python-cinder',
'python-cinderclient')
def check_enabled(self):
self.cinder = self.is_installed("openstack-cinder")
return self.cinder
def setup(self):
super(RedHatCinder, self).setup()
self.add_copy_spec(["/etc/sudoers.d/cinder"])
# vim: set et ts=4 sw=4 :
|
csutherl/sos
|
sos/plugins/openstack_cinder.py
|
Python
|
gpl-2.0
| 3,713
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGHEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces instances like 'CONCAT(A, B)' with 'CONCAT_WS(MID(CHAR(0), 0, 0), A, B)'
Requirement:
* MySQL
Tested against:
* MySQL 5.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that filter the CONCAT() function
>>> tamper('CONCAT(1,2)')
'CONCAT_WS(MID(CHAR(0),0,0),1,2)'
"""
if payload:
payload = payload.replace("CONCAT(", "CONCAT_WS(MID(CHAR(0),0,0),")
return payload
|
pwnieexpress/raspberry_pwn
|
src/pentest/sqlmap/tamper/concat2concatws.py
|
Python
|
gpl-3.0
| 766
|
"""
Command to migrate transcripts to django storage.
"""
import logging
from django.core.management import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from cms.djangoapps.contentstore.tasks import (
DEFAULT_ALL_COURSES,
DEFAULT_FORCE_UPDATE,
DEFAULT_COMMIT,
enqueue_async_migrate_transcripts_tasks
)
from openedx.core.lib.command_utils import get_mutually_exclusive_required_option, parse_course_keys
from openedx.core.djangoapps.video_config.models import TranscriptMigrationSetting
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Example usage:
$ ./manage.py cms migrate_transcripts --all-courses --force-update --commit
$ ./manage.py cms migrate_transcripts --course-id 'Course1' --course-id 'Course2' --commit
$ ./manage.py cms migrate_transcripts --from-settings
"""
help = 'Migrates transcripts to S3 for one or more courses.'
def add_arguments(self, parser):
"""
Add arguments to the command parser.
"""
parser.add_argument(
'--course-id', '--course_id',
dest='course_ids',
action='append',
help=u'Migrates transcripts for the list of courses.'
)
parser.add_argument(
'--all-courses', '--all', '--all_courses',
dest='all_courses',
action='store_true',
default=DEFAULT_ALL_COURSES,
help=u'Migrates transcripts to the configured django storage for all courses.'
)
parser.add_argument(
'--from-settings', '--from_settings',
dest='from_settings',
help='Migrate Transcripts with settings set via django admin',
action='store_true',
default=False,
)
parser.add_argument(
'--force-update', '--force_update',
dest='force_update',
action='store_true',
default=DEFAULT_FORCE_UPDATE,
help=u'Force migrate transcripts for the requested courses, overwrite if already present.'
)
parser.add_argument(
'--commit',
dest='commit',
action='store_true',
default=DEFAULT_COMMIT,
help=u'Commits the discovered video transcripts to django storage. '
u'Without this flag, the command will return the transcripts discovered for migration.'
)
def _parse_course_key(self, raw_value):
""" Parses course key from string """
try:
result = CourseKey.from_string(raw_value)
except InvalidKeyError:
raise CommandError("Invalid course_key: '%s'." % raw_value)
if not isinstance(result, CourseLocator):
raise CommandError(u"Argument {0} is not a course key".format(raw_value))
return result
def _get_migration_options(self, options):
"""
Returns the command arguments configured via django admin.
"""
force_update = options['force_update']
commit = options['commit']
courses_mode = get_mutually_exclusive_required_option(options, 'course_ids', 'all_courses', 'from_settings')
if courses_mode == 'all_courses':
course_keys = [course.id for course in modulestore().get_course_summaries()]
elif courses_mode == 'course_ids':
course_keys = map(self._parse_course_key, options['course_ids'])
else:
if self._latest_settings().all_courses:
course_keys = [course.id for course in modulestore().get_course_summaries()]
else:
course_keys = parse_course_keys(self._latest_settings().course_ids.split())
force_update = self._latest_settings().force_update
commit = self._latest_settings().commit
return course_keys, force_update, commit
def _latest_settings(self):
"""
Return the latest version of the TranscriptMigrationSetting
"""
return TranscriptMigrationSetting.current()
def handle(self, *args, **options):
"""
Invokes the migrate transcripts enqueue function.
"""
course_keys, force_update, commit = self._get_migration_options(options)
command_run = self._latest_settings().increment_run() if commit else -1
enqueue_async_migrate_transcripts_tasks(
course_keys=course_keys, commit=commit, command_run=command_run, force_update=force_update
)
|
Stanford-Online/edx-platform
|
cms/djangoapps/contentstore/management/commands/migrate_transcripts.py
|
Python
|
agpl-3.0
| 4,653
|
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import tensorflow as tf
class IoOpsTest(tf.test.TestCase):
def testReadFile(self):
cases = ['', 'Some contents', 'Неки садржаји на српском']
for contents in cases:
contents = tf.compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(prefix='ReadFileTest',
dir=self.get_temp_dir(),
delete=False) as temp:
temp.write(contents)
with self.test_session():
read = tf.read_file(temp.name)
self.assertEqual([], read.get_shape())
self.assertEqual(read.eval(), contents)
os.remove(temp.name)
def testWriteFile(self):
cases = ['', 'Some contents']
for contents in cases:
contents = tf.compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(prefix='WriteFileTest',
dir=self.get_temp_dir(),
delete=False) as temp:
pass
with self.test_session() as sess:
w = tf.write_file(temp.name, contents)
sess.run(w)
with open(temp.name, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
os.remove(temp.name)
def _subset(self, files, indices):
return set(tf.compat.as_bytes(files[i].name)
for i in range(len(files)) if i in indices)
def testMatchingFiles(self):
cases = ['ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH',
'AB4DEF.GH', 'ABDEF.GH', 'XYZ']
files = [tempfile.NamedTemporaryFile(
prefix=c, dir=self.get_temp_dir(), delete=True) for c in cases]
with self.test_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(tf.matching_files(f.name).eval(),
tf.compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
pos = files[0].name.find(cases[0])
pattern = files[0].name[:pos] + 'AB%sDEF.GH*'
self.assertEqual(set(tf.matching_files(pattern % 'z').eval()),
self._subset(files, [1]))
self.assertEqual(set(tf.matching_files(pattern % '?').eval()),
self._subset(files, [0, 1, 3, 4]))
self.assertEqual(set(tf.matching_files(pattern % '*').eval()),
self._subset(files, [0, 1, 2, 3, 4, 5]))
# NOTE(mrry): Windows uses PathMatchSpec to match file patterns, which
# does not support the following expressions.
if os.name != 'nt':
self.assertEqual(set(tf.matching_files(pattern % '[cxz]').eval()),
self._subset(files, [0, 1]))
self.assertEqual(set(tf.matching_files(pattern % '[0-9]').eval()),
self._subset(files, [3, 4]))
for f in files:
f.close()
if __name__ == '__main__':
tf.test.main()
|
sandeepdsouza93/TensorFlow-15712
|
tensorflow/python/kernel_tests/io_ops_test.py
|
Python
|
apache-2.0
| 3,819
|
import logging
from flask import request, flash, abort, Response
from flask_admin import expose
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin._compat import iteritems, string_types
import mongoengine
import gridfs
from mongoengine.connection import get_db
from bson.objectid import ObjectId
from flask_admin.actions import action
from .filters import FilterConverter, BaseMongoEngineFilter
from .form import get_form, CustomModelConverter
from .typefmt import DEFAULT_FORMATTERS
from .tools import parse_like_term
from .helpers import format_error
from .ajax import process_ajax_references, create_ajax_loader
from .subdoc import convert_subdocuments
# Set up logger
log = logging.getLogger("flask-admin.mongo")
SORTABLE_FIELDS = set((
mongoengine.StringField,
mongoengine.IntField,
mongoengine.FloatField,
mongoengine.BooleanField,
mongoengine.DateTimeField,
mongoengine.ComplexDateTimeField,
mongoengine.ObjectIdField,
mongoengine.DecimalField,
mongoengine.ReferenceField,
mongoengine.EmailField,
mongoengine.UUIDField,
mongoengine.URLField
))
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.mongoengine.filters.BaseFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = CustomModelConverter
"""
Model form conversion class. Use this to implement custom
field conversion logic.
Custom class should be derived from the
`flask_admin.contrib.mongoengine.form.CustomModelConverter`.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
object_id_converter = ObjectId
"""
Mongodb ``_id`` value conversion function. Default is `bson.ObjectId`.
Use this if you are using String, Binary and etc.
For example::
class MyModelView(BaseModelView):
object_id_converter = int
or::
class MyModelView(BaseModelView):
object_id_converter = str
"""
filter_converter = FilterConverter()
"""
Field to filter converter.
Override this attribute to use a non-default converter.
"""
column_type_formatters = DEFAULT_FORMATTERS
"""
Customized type formatters for MongoEngine backend
"""
allowed_search_types = (mongoengine.StringField,
mongoengine.URLField,
mongoengine.EmailField)
"""
List of allowed search field types.
"""
form_subdocuments = None
"""
Subdocument configuration options.
This field accepts dictionary, where key is field name and value is either dictionary or instance of the
`flask_admin.contrib.EmbeddedForm`.
Consider following example::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.EmbeddedDocumentField(Comment)
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_columns': ('name',)
}
}
In this example, `Post` model has child `Comment` subdocument. When generating form for `Comment` embedded
document, Flask-Admin will only create `name` field.
It is also possible to use class-based embedded document configuration::
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
Arbitrary depth nesting is supported::
class SomeEmbed(EmbeddedForm):
form_excluded_columns = ('test',)
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
form_subdocuments = {
'inner': SomeEmbed()
}
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
There's also support for forms embedded into `ListField`. All you have
to do is to create nested rule with `None` as a name. Even though it
is slightly confusing, but that's how Flask-MongoEngine creates
form fields embedded into ListField::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.ListField(db.EmbeddedDocumentField(Comment))
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
"""
def __init__(self, model, name=None,
category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._primary_key = self.scaffold_pk()
def _refresh_cache(self):
"""
Refresh cache.
"""
# Process subdocuments
if self.form_subdocuments is None:
self.form_subdocuments = {}
self._form_subdocuments = convert_subdocuments(self.form_subdocuments)
# Cache other properties
super(ModelView, self)._refresh_cache()
def _process_ajax_references(self):
"""
AJAX endpoint is exposed by top-level admin view class, but
subdocuments might have AJAX references too.
This method will recursively go over subdocument configuration
and will precompute AJAX references for them ensuring that
subdocuments can also use AJAX to populate their ReferenceFields.
"""
references = super(ModelView, self)._process_ajax_references()
return process_ajax_references(references, self)
def _get_model_fields(self, model=None):
"""
Inspect model and return list of model fields
:param model:
Model to inspect
"""
if model is None:
model = self.model
return sorted(iteritems(model._fields), key=lambda n: n[1].creation_counter)
def scaffold_pk(self):
# MongoEngine models have predefined 'id' as a key
return 'id'
def get_pk_value(self, model):
"""
Return the primary key value from the model instance
:param model:
Model instance
"""
return model.pk
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
columns = []
for n, f in self._get_model_fields():
# Verify type
field_class = type(f)
if (field_class == mongoengine.ListField and
isinstance(f.field, mongoengine.EmbeddedDocumentField)):
continue
if field_class == mongoengine.EmbeddedDocumentField:
continue
if self.column_display_pk or field_class != mongoengine.ObjectIdField:
columns.append(n)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns (name, field)
"""
columns = {}
for n, f in self._get_model_fields():
if type(f) in SORTABLE_FIELDS:
if self.column_display_pk or type(f) != mongoengine.ObjectIdField:
columns[n] = f
return columns
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, string_types):
p = self.model._fields.get(p)
if p is None:
raise Exception('Invalid search field')
field_type = type(p)
# Check type
if (field_type not in self.allowed_search_types):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
if isinstance(name, string_types):
attr = self.model._fields.get(name)
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Find name
visible_name = None
if not isinstance(name, string_types):
visible_name = self.get_column_name(attr.name)
if not visible_name:
visible_name = self.get_column_name(name)
# Convert filter
type_name = type(attr).__name__
flt = self.filter_converter.convert(type_name,
attr,
visible_name)
return flt
def is_valid_filter(self, filter):
"""
Validate if the provided filter is a valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BaseMongoEngineFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
# AJAX foreignkey support
def _create_ajax_loader(self, name, opts):
return create_ajax_loader(self.model, name, name, opts)
def get_query(self):
"""
Returns the QuerySet for this view. By default, it returns all the
objects for the current model.
"""
return self.model.objects
def _search(self, query, search_term):
# TODO: Unfortunately, MongoEngine contains bug which
# prevents running complex Q queries and, as a result,
# Flask-Admin does not support per-word searching like
# in other backends
op, term = parse_like_term(search_term)
criteria = None
for field in self._search_fields:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
if criteria is None:
criteria = q
else:
criteria |= q
return query.filter(criteria)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied filters
:param execute:
Run query immediately or not
"""
query = self.get_query()
# Filters
if self._filters:
for flt, flt_name, value in filters:
f = self._filters[flt]
query = f.apply(query, f.clean(value))
# Search
if self._search_supported and search:
query = self._search(query, search)
# Get count
count = query.count() if not self.simple_list_pager else None
# Sorting
if sort_column:
query = query.order_by('%s%s' % ('-' if sort_desc else '', sort_column))
else:
order = self._get_default_order()
if order:
query = query.order_by('%s%s' % ('-' if order[1] else '', order[0]))
# Pagination
if page is not None:
query = query.skip(page * self.page_size)
query = query.limit(self.page_size)
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model instance by its ID
:param id:
Model ID
"""
try:
return self.get_query().filter(pk=id).first()
except mongoengine.ValidationError as ex:
flash(gettext('Failed to get model. %(error)s',
error=format_error(ex)),
'error')
return None
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self._on_model_change(form, model, True)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to create record.')
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to update record.')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
self.on_model_delete(model)
model.delete()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
# FileField access API
@expose('/api/file/')
def api_file_view(self):
pk = request.args.get('id')
coll = request.args.get('coll')
db = request.args.get('db', 'default')
if not pk or not coll or not db:
abort(404)
fs = gridfs.GridFS(get_db(db), coll)
data = fs.get(self.object_id_converter(pk))
if not data:
abort(404)
return Response(data.read(),
content_type=data.content_type,
headers={
'Content-Length': data.length
})
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
count = 0
all_ids = [self.object_id_converter(pk) for pk in ids]
for obj in self.get_query().in_bulk(all_ids).values():
count += self.delete_model(obj)
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete records. %(error)s', error=str(ex)),
'error')
|
hexlism/css_platform
|
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
|
Python
|
apache-2.0
| 20,150
|
import logging
import multiprocessing
import os
from mimetypes import guess_type
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from zerver.lib.avatar_hash import user_avatar_path
from zerver.lib.upload import S3UploadBackend, upload_image_to_s3
from zerver.models import Attachment, RealmEmoji, UserProfile
s3backend = S3UploadBackend()
def transfer_uploads_to_s3(processes: int) -> None:
# TODO: Eventually, we'll want to add realm icon and logo
transfer_avatars_to_s3(processes)
transfer_message_files_to_s3(processes)
transfer_emoji_to_s3(processes)
def _transfer_avatar_to_s3(user: UserProfile) -> None:
avatar_path = user_avatar_path(user)
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path) + ".original"
try:
with open(file_path, "rb") as f:
s3backend.upload_avatar_image(f, user, user)
logging.info("Uploaded avatar for %s in realm %s", user.id, user.realm.name)
except FileNotFoundError:
pass
def transfer_avatars_to_s3(processes: int) -> None:
users = list(UserProfile.objects.all())
if processes == 1:
for user in users:
_transfer_avatar_to_s3(user)
else: # nocoverage
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(_transfer_avatar_to_s3, users):
pass
def _transfer_message_files_to_s3(attachment: Attachment) -> None:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", attachment.path_id)
try:
with open(file_path, "rb") as f:
guessed_type = guess_type(attachment.file_name)[0]
upload_image_to_s3(
s3backend.uploads_bucket,
attachment.path_id,
guessed_type,
attachment.owner,
f.read(),
)
logging.info("Uploaded message file in path %s", file_path)
except FileNotFoundError: # nocoverage
pass
def transfer_message_files_to_s3(processes: int) -> None:
attachments = list(Attachment.objects.all())
if processes == 1:
for attachment in attachments:
_transfer_message_files_to_s3(attachment)
else: # nocoverage
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(_transfer_message_files_to_s3, attachments):
pass
def _transfer_emoji_to_s3(realm_emoji: RealmEmoji) -> None:
if not realm_emoji.file_name or not realm_emoji.author:
return # nocoverage
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=realm_emoji.realm.id,
emoji_file_name=realm_emoji.file_name,
)
emoji_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", emoji_path) + ".original"
try:
with open(emoji_path, "rb") as f:
s3backend.upload_emoji_image(f, realm_emoji.file_name, realm_emoji.author)
logging.info("Uploaded emoji file in path %s", emoji_path)
except FileNotFoundError: # nocoverage
pass
def transfer_emoji_to_s3(processes: int) -> None:
realm_emojis = list(RealmEmoji.objects.filter())
if processes == 1:
for realm_emoji in realm_emojis:
_transfer_emoji_to_s3(realm_emoji)
else: # nocoverage
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(_transfer_emoji_to_s3, realm_emojis):
pass
|
andersk/zulip
|
zerver/lib/transfer.py
|
Python
|
apache-2.0
| 3,678
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
from time import sleep
import pexpect as pexpect
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.config import GPDBConfig
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.walrepl.run import StandbyRunMixin
from mpp.gpdb.tests.storage.walrepl.lib.verify import StandbyVerify
from mpp.gpdb.tests.storage.walrepl.lib import WalReplException
from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility
class GpinitStandby(object):
'''Class for gpinitstandby operations
Disclaimer: Some of these may repeat with the mpp/lib version'''
def __init__(self):
self.stdby = StandbyVerify()
self.runmixin = StandbyRunMixin()
self.runmixin.createdb(dbname='walrepl')
self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
self.config = GPDBConfig()
self.pgutil = GpUtility()
self.host = socket.gethostname()
def run(self, option = ''):
'''Runs gpinitstandby and returns True if successfull '''
gpinitstandby_cmd = 'gpinitstandby -a %s' % option
cmd = Command(name='Running Gpinitstandby', cmdStr="%s" % gpinitstandby_cmd)
tinctest.logger.info(" %s" % cmd)
cmd.run(validateAfter=False)
result = cmd.get_results()
if result.rc != 0:
return False
return True
def verify_gpinitstandby(self, primary_pid):
'''Verify the presence of standby in recovery mode '''
if (self.stdby.check_gp_segment_config()) and (self.stdby.check_pg_stat_replication()) and (self.stdby.check_standby_processes())and self.compare_primary_pid(primary_pid) :
return True
return False
def get_masterhost(self):
std_sql = "select hostname from gp_segment_configuration where content=-1 and role='p';"
master_host = PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'postgres')
return master_host.strip()
def get_standbyhost(self):
std_sql = "select hostname from gp_segment_configuration where content='-1' and role='m';"
standby_host = PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'postgres')
return standby_host.strip()
def get_filespace_location(self):
fs_sql = "select fselocation from pg_filespace_entry where fselocation like '%fs_walrepl_a%' and fsedbid=1;"
filespace_loc = PSQL.run_sql_command(fs_sql, flags = '-q -t', dbname= 'postgres')
return filespace_loc.strip()
def get_standbyhostnode(self):
'''
Function used to obtain the hostname of one of the segment node inorder to use it as the standby master node"
@return : returns the hostname of the segment node which can be used as the standby master node
'''
hostlist = self.config.get_hosts()
standby = ''
for host in hostlist:
if host.strip() != self.host:
standby = host.strip()
if len(standby) > 0 :
return standby
else:
tinctest.logger.error('No segment host other than master available to have remote standby')
def get_primary_pid(self):
pid = self.pgutil.get_pid_by_keyword(pgport=os.environ.get('PGPORT'), keyword=self.mdd)
if int(pid) == -1:
raise WalReplException('Unable to get pid of primary master process')
else:
return int(pid)
def compare_primary_pid(self, initial_pid):
final_pid = self.get_primary_pid()
if initial_pid == final_pid :
return True
return False
def create_dir_on_standby(self, standby, location):
fs_cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s' " % (standby, location, location)
cmd = Command(name='Make dierctory on standby before running the command', cmdStr = fs_cmd)
tinctest.logger.info('%s' % cmd)
cmd.run(validateAfter=True)
result = cmd.get_results()
if result.rc != 0:
raise WalReplException('Unable to create directory on standby')
else:
return True
def initstand_by_with_default(self):
master_host = self.get_masterhost()
gp_cmd = "/bin/bash -c 'gpinitstandby -s %s'" % (master_host)
logfile = open(local_path('install.log'),'w')
child = pexpect.spawn(gp_cmd, timeout=400)
child.logfile = logfile
sleep(2)
check = child.expect(['.* Enter standby filespace location for filespace pg_system .*', ' '])
if check != 0:
child.close()
l_file = open(local_path('install.log'),'r')
lines = l_file.readlines()
for line in lines:
if 'default: NA' in line:
return True
return False
def init_with_prompt(self,filespace_loc):
standby = self.get_standbyhostnode()
gp_cmd = "/bin/bash -c 'gpinitstandby -s %s -a'" % (standby)
logfile = open(local_path('install2.log'),'w')
child = pexpect.spawn(gp_cmd, timeout=400)
child.logfile = logfile
sleep(5)
check = child.expect(['.* Enter standby filespace location for filespace.*', ' '])
child.sendline(filespace_loc)
sleep(10)
check = child.expect(['.*Successfully created standby master.*'])
if check != 0:
tinctest.logger.error('gpinitstandy failed')
return False
child.close()
return True
|
edespino/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/gpinitstandby/__init__.py
|
Python
|
apache-2.0
| 6,141
|
import functools
from django import http
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from olympia import amo
from olympia.access import acl
from olympia.addons.decorators import addon_view_factory
from olympia.addons.models import Addon
from olympia.amo.decorators import login_required
def dev_required(owner_for_post=False, allow_reviewers=False, theme=False,
submitting=False):
"""Requires user to be add-on owner or admin.
When allow_reviewers is True, reviewers can view the page.
"""
def decorator(f):
@addon_view_factory(qs=Addon.objects.all)
@login_required
@functools.wraps(f)
def wrapper(request, addon, *args, **kw):
if theme:
kw['theme'] = addon.is_persona()
elif addon.is_persona():
# Don't allow theme views if theme not passed in.
raise http.Http404
def fun():
return f(request, addon_id=addon.id, addon=addon, *args, **kw)
if allow_reviewers:
if acl.is_reviewer(request, addon):
return fun()
# Require an owner or dev for POST requests.
if request.method == 'POST':
if acl.check_addon_ownership(request, addon,
dev=not owner_for_post):
return fun()
# Ignore disabled so they can view their add-on.
elif acl.check_addon_ownership(request, addon, dev=True,
ignore_disabled=True):
# Redirect to the submit flow if they're not done.
if (not submitting and addon.should_redirect_to_submit_flow()):
return redirect('devhub.submit.details', addon.slug)
return fun()
raise PermissionDenied
return wrapper
# The arg will be a function if they didn't pass owner_for_post.
if callable(owner_for_post):
f = owner_for_post
owner_for_post = False
return decorator(f)
else:
return decorator
def no_admin_disabled(f):
"""Requires the addon not be STATUS_DISABLED (mozilla admin disabled)."""
@functools.wraps(f)
def wrapper(*args, **kw):
addon = kw.get('addon')
if addon and addon.status == amo.STATUS_DISABLED:
raise http.Http404()
return f(*args, **kw)
return wrapper
|
aviarypl/mozilla-l10n-addons-server
|
src/olympia/devhub/decorators.py
|
Python
|
bsd-3-clause
| 2,494
|
# -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <https://tools.ietf.org/html/rfc5545>`_,
including support for caching of results.
"""
import itertools
import datetime
import calendar
import re
import sys
try:
from math import gcd
except ImportError:
from fractions import gcd
from six import advance_iterator, integer_types
from six.moves import _thread, range
import heapq
from ._common import weekday as weekdaybase
from .tz import tzutc, tzlocal
# For warning about deprecation of until and count
from warnings import warn
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(weekdaybase):
"""
This version of weekday does not allow n = 0.
"""
def __init__(self, wkday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n==0")
super(weekday, self).__init__(wkday, n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
def _invalidates_cache(f):
"""
Decorator for rruleset methods which may invalidate the
cached length.
"""
def inner_func(self, *args, **kwargs):
rv = f(self, *args, **kwargs)
self._invalidate_cache()
return rv
return inner_func
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._invalidate_cache()
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _invalidate_cache(self):
if self._cache is not None:
self._cache = []
self._cache_complete = False
self._cache_gen = self._iter()
if self._cache_lock.locked():
self._cache_lock.release()
self._len = None
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
.. note::
Per RFC section 3.3.10, recurrence instances falling on invalid dates
and times are ignored rather than coerced:
Recurrence rules may generate recurrence instances with an invalid
date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
on a day where the local time is moved forward by an hour at 1:00
AM). Such recurrence instances MUST be ignored and MUST NOT be
counted as part of the recurrence set.
This can lead to possibly surprising behavior when, for example, the
start date occurs at the end of the month:
>>> from dateutil.rrule import rrule, MONTHLY
>>> from datetime import datetime
>>> start_date = datetime(2014, 12, 31)
>>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
... # doctest: +NORMALIZE_WHITESPACE
[datetime.datetime(2014, 12, 31, 0, 0),
datetime.datetime(2015, 1, 31, 0, 0),
datetime.datetime(2015, 3, 31, 0, 0),
datetime.datetime(2015, 5, 31, 0, 0)]
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. The last recurrence in the rule is the greatest
datetime that is less than or equal to the value specified in the
``until`` parameter.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
# Cache the original byxxx rules, if they are provided, as the _byxxx
# attributes do not necessarily map to the inputs, and this can be
# a problem in generating the strings. Only store things if they've
# been supplied (the string retrieval will just use .get())
self._original_rule = {}
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if self._dtstart and self._until:
if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
# According to RFC5545 Section 3.3.10:
# https://tools.ietf.org/html/rfc5545#section-3.3.10
#
# > If the "DTSTART" property is specified as a date with UTC
# > time or a date with local time and time zone reference,
# > then the UNTIL rule part MUST be specified as a date with
# > UTC time.
raise ValueError(
'RRULE UNTIL values must be specified in UTC when DTSTART '
'is timezone-aware'
)
if count is not None and until:
warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
" and has been deprecated in dateutil. Future versions will "
"raise an error.", DeprecationWarning)
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if self._bysetpos:
self._original_rule['bysetpos'] = self._bysetpos
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
self._original_rule['bymonth'] = None
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == MONTHLY:
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == WEEKLY:
byweekday = dtstart.weekday()
self._original_rule['byweekday'] = None
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
if 'bymonth' not in self._original_rule:
self._original_rule['bymonth'] = self._bymonth
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
self._original_rule['byyearday'] = self._byyearday
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
self._original_rule['byeaster'] = self._byeaster
else:
self._byeaster = None
# bymonthday
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
bymonthday = set(bymonthday) # Ensure it's unique
self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
# Storing positive numbers first, then negative numbers
if 'bymonthday' not in self._original_rule:
self._original_rule['bymonthday'] = tuple(
itertools.chain(self._bymonthday, self._bynmonthday))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
self._original_rule['byweekno'] = self._byweekno
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
orig_byweekday = [weekday(x) for x in self._byweekday]
else:
orig_byweekday = ()
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
else:
orig_bynweekday = ()
if 'byweekday' not in self._original_rule:
self._original_rule['byweekday'] = tuple(itertools.chain(
orig_byweekday, orig_bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = {dtstart.hour}
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
self._original_rule['byhour'] = self._byhour
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = {dtstart.minute}
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
self._original_rule['byminute'] = self._byminute
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
self._original_rule['bysecond'] = self._bysecond
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def __str__(self):
"""
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC5545, except for the
dateutil-specific extension BYEASTER.
"""
output = []
h, m, s = [None] * 3
if self._dtstart:
output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
h, m, s = self._dtstart.timetuple()[3:6]
parts = ['FREQ=' + FREQNAMES[self._freq]]
if self._interval != 1:
parts.append('INTERVAL=' + str(self._interval))
if self._wkst:
parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
if self._count is not None:
parts.append('COUNT=' + str(self._count))
if self._until:
parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
if self._original_rule.get('byweekday') is not None:
# The str() method on weekday objects doesn't generate
# RFC5545-compliant strings, so we should modify that.
original_rule = dict(self._original_rule)
wday_strings = []
for wday in original_rule['byweekday']:
if wday.n:
wday_strings.append('{n:+d}{wday}'.format(
n=wday.n,
wday=repr(wday)[0:2]))
else:
wday_strings.append(repr(wday))
original_rule['byweekday'] = wday_strings
else:
original_rule = self._original_rule
partfmt = '{name}={vals}'
for name, key in [('BYSETPOS', 'bysetpos'),
('BYMONTH', 'bymonth'),
('BYMONTHDAY', 'bymonthday'),
('BYYEARDAY', 'byyearday'),
('BYWEEKNO', 'byweekno'),
('BYDAY', 'byweekday'),
('BYHOUR', 'byhour'),
('BYMINUTE', 'byminute'),
('BYSECOND', 'bysecond'),
('BYEASTER', 'byeaster')]:
value = original_rule.get(key)
if value:
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
for v in value))))
output.append('RRULE:' + ';'.join(parts))
return '\n'.join(output)
def replace(self, **kwargs):
"""Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified."""
new_kwargs = {"interval": self._interval,
"count": self._count,
"dtstart": self._dtstart,
"freq": self._freq,
"until": self._until,
"wkst": self._wkst,
"cache": False if self._cache is None else True }
new_kwargs.update(self._original_rule)
new_kwargs.update(kwargs)
return rrule(**new_kwargs)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal + i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399 - (hour * 3600 + minute * 60 + second))
// interval) * interval)
rep_rate = (24 * 3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
if self.genlist[0] is self:
heapq.heappop(self.genlist)
else:
self.genlist.remove(self)
heapq.heapify(self.genlist)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
@_invalidates_cache
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
@_invalidates_cache
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
@_invalidates_cache
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
@_invalidates_cache
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
lastdt = None
total = 0
heapq.heapify(rlist)
heapq.heapify(exlist)
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exitem = exlist[0]
advance_iterator(exitem)
if exlist and exlist[0] is exitem:
heapq.heapreplace(exlist, exitem)
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
if rlist and rlist[0] is ritem:
heapq.heapreplace(rlist, ritem)
self._len = total
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzids=None,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
TZID_NAMES = dict(map(
lambda x: (x.upper(), x),
re.findall('TZID=(?P<name>[^:]+):', s)
))
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported EXDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but
# may be found only once.
value_found = False
TZID = None
valid_values = {"VALUE=DATE-TIME", "VALUE=DATE"}
for parm in parms:
if parm.startswith("TZID="):
try:
tzkey = TZID_NAMES[parm.split('TZID=')[-1]]
except KeyError:
continue
if tzids is None:
from . import tz
tzlookup = tz.gettz
elif callable(tzids):
tzlookup = tzids
else:
tzlookup = getattr(tzids, 'get', None)
if tzlookup is None:
msg = ('tzids must be a callable, ' +
'mapping, or None, ' +
'not %s' % tzids)
raise ValueError(msg)
TZID = tzlookup(tzkey)
continue
if parm not in valid_values:
raise ValueError("unsupported DTSTART parm: "+parm)
else:
if value_found:
msg = ("Duplicate value parameter found in " +
"DTSTART: " + parm)
raise ValueError(msg)
value_found = True
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
if TZID is not None:
if dtstart.tzinfo is None:
dtstart = dtstart.replace(tzinfo=TZID)
else:
raise ValueError('DTSTART specifies multiple timezones')
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
rset.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
|
ledtvavs/repository.ledtv
|
script.tvguide.Vader/resources/lib/dateutil/rrule.py
|
Python
|
gpl-3.0
| 64,642
|
""" API v0 views. """
import logging
from django.http import Http404
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from lms.djangoapps.ccx.utils import prep_course_for_grading
from lms.djangoapps.courseware import courses
from lms.djangoapps.grades.api.serializers import GradingPolicySerializer
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin
log = logging.getLogger(__name__)
class GradeViewMixin(DeveloperErrorViewMixin):
"""
Mixin class for Grades related views.
"""
authentication_classes = (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthentication,
)
permission_classes = (IsAuthenticated,)
def _get_course(self, course_key_string, user, access_action):
"""
Returns the course for the given course_key_string after
verifying the requested access to the course by the given user.
"""
try:
course_key = CourseKey.from_string(course_key_string)
except InvalidKeyError:
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The provided course key cannot be parsed.',
error_code='invalid_course_key'
)
try:
return courses.get_course_with_access(
user,
access_action,
course_key,
check_if_enrolled=True
)
except Http404:
log.info('Course with ID "%s" not found', course_key_string)
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The user, the course or both do not exist.',
error_code='user_or_course_does_not_exist'
)
def perform_authentication(self, request):
"""
Ensures that the user is authenticated (e.g. not an AnonymousUser), unless DEBUG mode is enabled.
"""
super(GradeViewMixin, self).perform_authentication(request)
if request.user.is_anonymous():
raise AuthenticationFailed
class UserGradeView(GradeViewMixin, GenericAPIView):
"""
**Use Case**
* Get the current course grades for users in a course.
Currently, getting the grade for only an individual user is supported.
**Example Request**
GET /api/grades/v0/course_grade/{course_id}/users/?username={username}
**GET Parameters**
A GET request must include the following parameters.
* course_id: A string representation of a Course ID.
* username: A string representation of a user's username.
**GET Response Values**
If the request for information about the course grade
is successful, an HTTP 200 "OK" response is returned.
The HTTP 200 response has the following values.
* username: A string representation of a user's username passed in the request.
* course_id: A string representation of a Course ID.
* passed: Boolean representing whether the course has been
passed according the course's grading policy.
* percent: A float representing the overall grade for the course
* letter_grade: A letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None
**Example GET Response**
[{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"passed": false,
"percent": 0.03,
"letter_grade": None,
}]
"""
def get(self, request, course_id):
"""
Gets a course progress status.
Args:
request (Request): Django request object.
course_id (string): URI element specifying the course location.
Return:
A JSON serialized representation of the requesting user's current grade status.
"""
username = request.GET.get('username')
# only the student can access her own grade status info
if request.user.username != username:
log.info(
'User %s tried to access the grade for user %s.',
request.user.username,
username
)
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The user requested does not match the logged in user.',
error_code='user_mismatch'
)
course = self._get_course(course_id, request.user, 'load')
if isinstance(course, Response):
return course
prep_course_for_grading(course, request)
course_grade = CourseGradeFactory().create(request.user, course)
return Response([{
'username': username,
'course_key': course_id,
'passed': course_grade.passed,
'percent': course_grade.percent,
'letter_grade': course_grade.letter_grade,
}])
class CourseGradingPolicy(GradeViewMixin, ListAPIView):
"""
**Use Case**
Get the course grading policy.
**Example requests**:
GET /api/grades/v0/policy/{course_id}/
**Response Values**
* assignment_type: The type of the assignment, as configured by course
staff. For example, course staff might make the assignment types Homework,
Quiz, and Exam.
* count: The number of assignments of the type.
* dropped: Number of assignments of the type that are dropped.
* weight: The weight, or effect, of the assignment type on the learner's
final grade.
"""
allow_empty = False
def get(self, request, course_id, **kwargs):
course = self._get_course(course_id, request.user, 'staff')
if isinstance(course, Response):
return course
return Response(GradingPolicySerializer(course.raw_grader, many=True).data)
|
naresh21/synergetics-edx-platform
|
lms/djangoapps/grades/api/views.py
|
Python
|
agpl-3.0
| 6,482
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.python_requirement import PythonRequirement
from pants.base.payload import Payload
from pants.base.payload_field import PythonRequirementsField
from pants.base.validation import assert_list
from pants.build_graph.target import Target
class PythonRequirementLibrary(Target):
"""A set of pip requirements.
:API: public
"""
def __init__(self, payload=None, requirements=None, **kwargs):
"""
:param requirements: pip requirements as `python_requirement <#python_requirement>`_\s.
:type requirements: List of python_requirement calls
"""
payload = payload or Payload()
assert_list(requirements, expected_type=PythonRequirement, key_arg='requirements')
payload.add_fields({
'requirements': PythonRequirementsField(requirements or []),
})
super(PythonRequirementLibrary, self).__init__(payload=payload, **kwargs)
self.add_labels('python')
@property
def requirements(self):
return self.payload.requirements
|
cevaris/pants
|
src/python/pants/backend/python/targets/python_requirement_library.py
|
Python
|
apache-2.0
| 1,293
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Classes and functions for building TensorFlow graphs.
## Core graph data structures
@@Graph
@@Operation
@@Tensor
## Tensor types
@@DType
@@as_dtype
## Utility functions
@@device
@@name_scope
@@control_dependencies
@@convert_to_tensor
@@convert_to_tensor_or_indexed_slices
@@get_default_graph
@@reset_default_graph
@@import_graph_def
@@load_file_system_library
@@load_op_library
## Graph collections
@@add_to_collection
@@get_collection
@@get_collection_ref
@@GraphKeys
## Defining new operations
@@RegisterGradient
@@NoGradient
@@RegisterShape
@@TensorShape
@@Dimension
@@op_scope
@@get_seed
## For libraries building on TensorFlow
@@register_tensor_conversion_function
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Classes used when building a Graph.
from tensorflow.python.framework.device import DeviceSpec
from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import SparseTensor
from tensorflow.python.framework.ops import SparseTensorValue
from tensorflow.python.framework.ops import IndexedSlices
# Utilities used when building a Graph.
from tensorflow.python.framework.ops import device
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.framework.ops import control_dependencies
from tensorflow.python.framework.ops import get_default_graph
from tensorflow.python.framework.ops import reset_default_graph
from tensorflow.python.framework.ops import GraphKeys
from tensorflow.python.framework.ops import add_to_collection
from tensorflow.python.framework.ops import get_collection
from tensorflow.python.framework.ops import get_collection_ref
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices
from tensorflow.python.framework.random_seed import get_seed
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.framework.importer import import_graph_def
# Needed when you defined a new Op in C++.
from tensorflow.python.framework.ops import RegisterGradient
from tensorflow.python.framework.ops import NoGradient
from tensorflow.python.framework.ops import RegisterShape
from tensorflow.python.framework.tensor_shape import Dimension
from tensorflow.python.framework.tensor_shape import TensorShape
# Needed when interfacing tensorflow to new array libraries
from tensorflow.python.framework.ops import register_tensor_conversion_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.framework.dtypes import *
# Load a TensorFlow plugin
from tensorflow.python.framework.load_library import *
# pylint: enable=wildcard-import
|
peterbraden/tensorflow
|
tensorflow/python/framework/framework_lib.py
|
Python
|
apache-2.0
| 3,634
|
#! /usr/bin/env python
# Selectively preprocess #ifdef / #ifndef statements.
# Usage:
# ifdef [-Dname] ... [-Uname] ... [file] ...
#
# This scans the file(s), looking for #ifdef and #ifndef preprocessor
# commands that test for one of the names mentioned in the -D and -U
# options. On standard output it writes a copy of the input file(s)
# minus those code sections that are suppressed by the selected
# combination of defined/undefined symbols. The #if(n)def/#else/#else
# lines themselfs (if the #if(n)def tests for one of the mentioned
# names) are removed as well.
# Features: Arbitrary nesting of recognized and unrecognized
# preprocesor statements works correctly. Unrecognized #if* commands
# are left in place, so it will never remove too much, only too
# little. It does accept whitespace around the '#' character.
# Restrictions: There should be no comments or other symbols on the
# #if(n)def lines. The effect of #define/#undef commands in the input
# file or in included files is not taken into account. Tests using
# #if and the defined() pseudo function are not recognized. The #elif
# command is not recognized. Improperly nesting is not detected.
# Lines that look like preprocessor commands but which are actually
# part of comments or string literals will be mistaken for
# preprocessor commands.
import sys
import getopt
defs = []
undefs = []
def main():
opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
for o, a in opts:
if o == '-D':
defs.append(a)
if o == '-U':
undefs.append(a)
if not args:
args = ['-']
for filename in args:
if filename == '-':
process(sys.stdin, sys.stdout)
else:
f = open(filename, 'r')
process(f, sys.stdout)
f.close()
def process(fpi, fpo):
keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
ok = 1
stack = []
while 1:
line = fpi.readline()
if not line: break
while line[-2:] == '\\\n':
nextline = fpi.readline()
if not nextline: break
line = line + nextline
tmp = line.strip()
if tmp[:1] != '#':
if ok: fpo.write(line)
continue
tmp = tmp[1:].strip()
words = tmp.split()
keyword = words[0]
if keyword not in keywords:
if ok: fpo.write(line)
continue
if keyword in ('ifdef', 'ifndef') and len(words) == 2:
if keyword == 'ifdef':
ko = 1
else:
ko = 0
word = words[1]
if word in defs:
stack.append((ok, ko, word))
if not ko: ok = 0
elif word in undefs:
stack.append((ok, not ko, word))
if ko: ok = 0
else:
stack.append((ok, -1, word))
if ok: fpo.write(line)
elif keyword == 'if':
stack.append((ok, -1, ''))
if ok: fpo.write(line)
elif keyword == 'else' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
else:
s_ko = not s_ko
ok = s_ok
if not s_ko: ok = 0
stack[-1] = s_ok, s_ko, s_word
elif keyword == 'endif' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
del stack[-1]
ok = s_ok
else:
sys.stderr.write('Unknown keyword %s\n' % keyword)
if stack:
sys.stderr.write('stack: %s\n' % stack)
if __name__ == '__main__':
main()
|
google/google-ctf
|
third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/ifdef.py
|
Python
|
apache-2.0
| 3,829
|
"""Auto-generated file, do not edit by hand. IE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IE = PhoneMetadata(id='IE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[159]\\d{2,4}', possible_number_pattern='\\d{3,5}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='112|999', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='112|51210|999', possible_number_pattern='\\d{3,5}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='51210', possible_number_pattern='\\d{5}'),
short_data=True)
|
dongguangming/python-phonenumbers
|
python/phonenumbers/shortdata/region_IE.py
|
Python
|
apache-2.0
| 993
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'image_anchor05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
'D7', self.image_dir + 'yellow.png',
{'x_offset': 1, 'y_offset': 2, 'positioning': 2})
workbook.close()
self.assertExcelEqual()
|
liukaijv/XlsxWriter
|
xlsxwriter/test/comparison/test_image_anchor05.py
|
Python
|
bsd-2-clause
| 1,181
|
""" Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_
import pytest
from scipy._lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from .test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@pytest.mark.xfail
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
self._check_func_fail(f, func)
continue
self._check_nonlin_func(f, func)
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
self._check_func_fail(f, meth)
continue
self._check_root(f, meth)
class TestSecant(object):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(object):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3)
class TestNonlinOldTests(object):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/optimize/tests/test_nonlin.py
|
Python
|
mit
| 15,054
|
#!/usr/bin/python
import ldns
pkt = ldns.ldns_pkt.new_query_frm_str("www.google.com",ldns.LDNS_RR_TYPE_ANY, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_QR | ldns.LDNS_AA)
rra = ldns.ldns_rr.new_frm_str("www.google.com. IN A 192.168.1.1",300)
rrb = ldns.ldns_rr.new_frm_str("www.google.com. IN TXT Some\ Description",300)
list = ldns.ldns_rr_list()
if (rra): list.push_rr(rra)
if (rrb): list.push_rr(rrb)
pkt.push_rr_list(ldns.LDNS_SECTION_ANSWER, list)
print("Packet:")
print(pkt)
|
fangdingjun/dnsproxy
|
third-part/ldns-1.6.17/contrib/python/examples/python3/ldns-newpkt.py
|
Python
|
gpl-3.0
| 476
|
"""add message column to event
Revision ID: 211e93aff1e1
Revises: 2493281d621
Create Date: 2015-03-20 18:50:29.961734
"""
# revision identifiers, used by Alembic.
revision = '211e93aff1e1'
down_revision = '2f3c8fa3fc3a'
from alembic import op
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE event ADD COLUMN message_id int(11) DEFAULT NULL"))
conn.execute(text("ALTER TABLE event ADD CONSTRAINT message_ifbk FOREIGN KEY "
"(`message_id`) REFERENCES `message` (`id`) ON DELETE CASCADE"))
def downgrade():
conn = op.get_bind()
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE event DROP FOREIGN KEY message_ifbk"))
conn.execute(text("ALTER TABLE event DROP COLUMN message_id"))
|
nylas/sync-engine
|
migrations/versions/152_add_message_id_to_event.py
|
Python
|
agpl-3.0
| 867
|
"""
Test cases for tabs.
"""
from mock import MagicMock, Mock, patch
from courseware.courses import get_course_by_id
from courseware.views import get_static_tab_contents
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from student.tests.factories import UserFactory
from xmodule.tabs import CourseTabList
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from courseware.tests.helpers import get_request_for_user, LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from opaque_keys.edx.locations import SlashSeparatedCourseKey
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StaticTabDateTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""Test cases for Static Tab Dates."""
def setUp(self):
self.course = CourseFactory.create()
self.page = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="new_tab"
)
self.toy_course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_logged_in(self):
self.setup_user()
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_anonymous_user(self):
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
def test_get_static_tab_contents(self):
course = get_course_by_id(self.toy_course_key)
request = get_request_for_user(UserFactory.create())
tab = CourseTabList.get_tab_by_slug(course.tabs, 'resources')
# Test render works okay
tab_content = get_static_tab_contents(request, course, tab)
self.assertIn(self.toy_course_key.to_deprecated_string(), tab_content)
self.assertIn('static_tab', tab_content)
# Test when render raises an exception
with patch('courseware.views.get_module') as mock_module_render:
mock_module_render.return_value = MagicMock(
render=Mock(side_effect=Exception('Render failed!'))
)
static_tab = get_static_tab_contents(request, course, tab)
self.assertIn("this module is temporarily unavailable", static_tab)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StaticTabDateTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that tabs still appear when
# the course is already closed
xml_course_key = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in the test course's tab
# common/test/data/2014/tabs/8e4cce2b4aaf4ba28b1220804619e41f.html
xml_data = "static 463139"
xml_url = "8e4cce2b4aaf4ba28b1220804619e41f"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
|
huchoi/edx-platform
|
lms/djangoapps/courseware/tests/test_tabs.py
|
Python
|
agpl-3.0
| 4,042
|
# Copyright 2014 Scality
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import testtools
from tempest.common import waiters
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestShelveInstance(manager.ScenarioTest):
"""
This test shelves then unshelves a Nova instance
The following is the scenario outline:
* boot a instance and create a timestamp file in it
* shelve the instance
* unshelve the instance
* check the existence of the timestamp file in the unshelved instance
"""
def _write_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
def _check_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
def _shelve_then_unshelve_server(self, server):
self.servers_client.shelve_server(server['id'])
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
waiters.wait_for_server_status(self.servers_client, server['id'],
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
waiters.wait_for_server_status(self.servers_client,
server['id'], 'SHELVED')
self.servers_client.shelve_offload_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'SHELVED_OFFLOADED')
self.servers_client.unshelve_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
@test.idempotent_id('1164e700-0af0-4a4c-8792-35909a88743c')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.services('compute', 'network', 'image')
def test_shelve_instance(self):
self.keypair = self.create_keypair()
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups
}
server = self.create_server(image=CONF.compute.image_ref,
create_kwargs=create_kwargs)
if CONF.compute.use_floatingip_for_ssh:
floating_ip = self.floating_ips_client.create_floating_ip()
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], server['id'])
self._write_timestamp(floating_ip['ip'])
else:
self._write_timestamp(server)
# Prevent bug #1257594 from coming back
# Unshelve used to boot the instance with the original image, not
# with the instance snapshot
self._shelve_then_unshelve_server(server)
if CONF.compute.use_floatingip_for_ssh:
self._check_timestamp(floating_ip['ip'])
else:
self._check_timestamp(server)
|
varunarya10/tempest
|
tempest/scenario/test_shelve_instance.py
|
Python
|
apache-2.0
| 4,194
|
# Lint as: python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The core data types ctexplain manipulates."""
from typing import Mapping
from typing import Optional
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from dataclasses import dataclass
from dataclasses import field
from frozendict import frozendict
@dataclass(frozen=True)
class Configuration():
"""Stores a build configuration as a collection of fragments and options."""
# Mapping of each BuildConfiguration.Fragment in this configuration to the
# FragmentOptions it requires.
#
# All names are qualified up to the base file name, without package prefixes.
# For example, foo.bar.BazConfiguration appears as "BazConfiguration".
# foo.bar.BazConfiguration$Options appears as "BazeConfiguration$Options".
fragments: Mapping[str, Tuple[str, ...]]
# Mapping of FragmentOptions to option key/value pairs. For example:
# {"CoreOptions": {"action_env": "[]", "cpu": "x86", ...}, ...}.
#
# Option values are stored as strings of whatever "bazel config" outputs.
#
# Note that Fragment and FragmentOptions aren't the same thing.
options: Mapping[str, Mapping[str, str]]
@dataclass(frozen=True)
class ConfiguredTarget():
"""Encapsulates a target + configuration + required fragments."""
# Label of the target this represents.
label: str
# Configuration this target is applied to. May be None.
config: Optional[Configuration]
# The hash of this configuration as reported by Bazel.
config_hash: str
# Fragments required by this configured target and its transitive
# dependencies. Stored as base names without packages. For example:
# "PlatformOptions" or "FooConfiguration$Options".
transitive_fragments: Tuple[str, ...]
@dataclass(frozen=True)
class HostConfiguration(Configuration):
"""Special marker for the host configuration.
There's exactly one host configuration per build, so we shouldn't suggest
merging it with other configurations.
TODO(gregce): suggest host configuration trimming once we figure out the right
criteria. Even if Bazel's not technically equipped to do the trimming, it's
still theoretically valuable information. Note that moving from host to exec
configurations make this all a little less relevant, since exec configurations
aren't "special" compared to normal configurations.
"""
# We don't currently read the host config's fragments or option values.
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
@dataclass(frozen=True)
class NullConfiguration(Configuration):
"""Special marker for the null configuration.
By definition this has no fragments or options.
"""
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
|
twitter-forks/bazel
|
tools/ctexplain/types.py
|
Python
|
apache-2.0
| 3,539
|
'''
Created on Jun 27, 2010
@author: jnaous
'''
from openflow.dummyom.models import DummyOM
def run():
for om in DummyOM.objects.all():
om.delete()
for i in xrange(3):
om = DummyOM.objects.create()
om.populate_links(10, 20)
|
ict-felix/stack
|
vt_manager_kvm/src/python/scripts/create_oms.py
|
Python
|
apache-2.0
| 273
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for feature in MLlib.
"""
import sys
import warnings
from py4j.protocol import Py4JJavaError
from pyspark import since
from pyspark.rdd import RDD
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Vectors, _convert_to_vector
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['Normalizer', 'StandardScalerModel', 'StandardScaler',
'HashingTF', 'IDFModel', 'IDF', 'Word2Vec', 'Word2VecModel',
'ChiSqSelector', 'ChiSqSelectorModel', 'ElementwiseProduct']
class VectorTransformer(object):
"""
Base class for transformation of a vector or RDD of vector
"""
def transform(self, vector):
"""
Applies transformation on a vector.
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
vector or convertible or RDD to be transformed.
"""
raise NotImplementedError
class Normalizer(VectorTransformer):
r"""
Normalizes samples individually to unit L\ :sup:`p`\ norm
For any 1 <= `p` < float('inf'), normalizes samples using
sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm.
For `p` = float('inf'), max(abs(vector)) will be used as norm for
normalization.
.. versionadded:: 1.2.0
Parameters
----------
p : float, optional
Normalization in L^p^ space, p = 2 by default.
Examples
--------
>>> from pyspark.mllib.linalg import Vectors
>>> v = Vectors.dense(range(3))
>>> nor = Normalizer(1)
>>> nor.transform(v)
DenseVector([0.0, 0.3333, 0.6667])
>>> rdd = sc.parallelize([v])
>>> nor.transform(rdd).collect()
[DenseVector([0.0, 0.3333, 0.6667])]
>>> nor2 = Normalizer(float("inf"))
>>> nor2.transform(v)
DenseVector([0.0, 0.5, 1.0])
"""
def __init__(self, p=2.0):
assert p >= 1.0, "p should be greater than 1.0"
self.p = float(p)
def transform(self, vector):
"""
Applies unit length normalization on a vector.
.. versionadded:: 1.2.0
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
vector or RDD of vector to be normalized.
Returns
-------
:py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
normalized vector(s). If the norm of the input is zero, it
will return the input vector.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return callMLlibFunc("normalizeVector", self.p, vector)
class JavaVectorTransformer(JavaModelWrapper, VectorTransformer):
"""
Wrapper for the model in JVM
"""
def transform(self, vector):
"""
Applies transformation on a vector or an RDD[Vector].
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Input vector(s) to be transformed.
Notes
-----
In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return self.call("transform", vector)
class StandardScalerModel(JavaVectorTransformer):
"""
Represents a StandardScaler model that can transform vectors.
.. versionadded:: 1.2.0
"""
def transform(self, vector):
"""
Applies standardization transformation on a vector.
.. versionadded:: 1.2.0
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Input vector(s) to be standardized.
Returns
-------
:py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Standardized vector(s). If the variance of a column is
zero, it will return default `0.0` for the column with
zero variance.
Notes
-----
In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
"""
return JavaVectorTransformer.transform(self, vector)
@since('1.4.0')
def setWithMean(self, withMean):
"""
Setter of the boolean which decides
whether it uses mean or not
"""
self.call("setWithMean", withMean)
return self
@since('1.4.0')
def setWithStd(self, withStd):
"""
Setter of the boolean which decides
whether it uses std or not
"""
self.call("setWithStd", withStd)
return self
@property
@since('2.0.0')
def withStd(self):
"""
Returns if the model scales the data to unit standard deviation.
"""
return self.call("withStd")
@property
@since('2.0.0')
def withMean(self):
"""
Returns if the model centers the data before scaling.
"""
return self.call("withMean")
@property
@since('2.0.0')
def std(self):
"""
Return the column standard deviation values.
"""
return self.call("std")
@property
@since('2.0.0')
def mean(self):
"""
Return the column mean values.
"""
return self.call("mean")
class StandardScaler(object):
"""
Standardizes features by removing the mean and scaling to unit
variance using column summary statistics on the samples in the
training set.
.. versionadded:: 1.2.0
Parameters
----------
withMean : bool, optional
False by default. Centers the data with mean
before scaling. It will build a dense output, so take
care when applying to sparse input.
withStd : bool, optional
True by default. Scales the data to unit
standard deviation.
Examples
--------
>>> vs = [Vectors.dense([-2.0, 2.3, 0]), Vectors.dense([3.8, 0.0, 1.9])]
>>> dataset = sc.parallelize(vs)
>>> standardizer = StandardScaler(True, True)
>>> model = standardizer.fit(dataset)
>>> result = model.transform(dataset)
>>> for r in result.collect(): r
DenseVector([-0.7071, 0.7071, -0.7071])
DenseVector([0.7071, -0.7071, 0.7071])
>>> int(model.std[0])
4
>>> int(model.mean[0]*10)
9
>>> model.withStd
True
>>> model.withMean
True
"""
def __init__(self, withMean=False, withStd=True):
if not (withMean or withStd):
warnings.warn("Both withMean and withStd are false. The model does nothing.")
self.withMean = withMean
self.withStd = withStd
def fit(self, dataset):
"""
Computes the mean and variance and stores as a model to be used
for later scaling.
.. versionadded:: 1.2.0
Parameters
----------
dataset : :py:class:`pyspark.RDD`
The data used to compute the mean and variance
to build the transformation model.
Returns
-------
:py:class:`StandardScalerModel`
"""
dataset = dataset.map(_convert_to_vector)
jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset)
return StandardScalerModel(jmodel)
class ChiSqSelectorModel(JavaVectorTransformer):
"""
Represents a Chi Squared selector model.
.. versionadded:: 1.4.0
"""
def transform(self, vector):
"""
Applies transformation on a vector.
.. versionadded:: 1.4.0
Examples
--------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Input vector(s) to be transformed.
Returns
-------
:py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
transformed vector(s).
"""
return JavaVectorTransformer.transform(self, vector)
class ChiSqSelector(object):
"""
Creates a ChiSquared feature selector.
The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
`fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector, DenseVector
>>> from pyspark.mllib.regression import LabeledPoint
>>> data = sc.parallelize([
... LabeledPoint(0.0, SparseVector(3, {0: 8.0, 1: 7.0})),
... LabeledPoint(1.0, SparseVector(3, {1: 9.0, 2: 6.0})),
... LabeledPoint(1.0, [0.0, 9.0, 8.0]),
... LabeledPoint(2.0, [7.0, 9.0, 5.0]),
... LabeledPoint(2.0, [8.0, 7.0, 3.0])
... ])
>>> model = ChiSqSelector(numTopFeatures=1).fit(data)
>>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0}))
SparseVector(1, {})
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
>>> model = ChiSqSelector(selectorType="fpr", fpr=0.2).fit(data)
>>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0}))
SparseVector(1, {})
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
>>> model = ChiSqSelector(selectorType="percentile", percentile=0.34).fit(data)
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
"""
def __init__(self, numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
self.numTopFeatures = numTopFeatures
self.selectorType = selectorType
self.percentile = percentile
self.fpr = fpr
self.fdr = fdr
self.fwe = fwe
@since('2.1.0')
def setNumTopFeatures(self, numTopFeatures):
"""
set numTopFeature for feature selection by number of top features.
Only applicable when selectorType = "numTopFeatures".
"""
self.numTopFeatures = int(numTopFeatures)
return self
@since('2.1.0')
def setPercentile(self, percentile):
"""
set percentile [0.0, 1.0] for feature selection by percentile.
Only applicable when selectorType = "percentile".
"""
self.percentile = float(percentile)
return self
@since('2.1.0')
def setFpr(self, fpr):
"""
set FPR [0.0, 1.0] for feature selection by FPR.
Only applicable when selectorType = "fpr".
"""
self.fpr = float(fpr)
return self
@since('2.2.0')
def setFdr(self, fdr):
"""
set FDR [0.0, 1.0] for feature selection by FDR.
Only applicable when selectorType = "fdr".
"""
self.fdr = float(fdr)
return self
@since('2.2.0')
def setFwe(self, fwe):
"""
set FWE [0.0, 1.0] for feature selection by FWE.
Only applicable when selectorType = "fwe".
"""
self.fwe = float(fwe)
return self
@since('2.1.0')
def setSelectorType(self, selectorType):
"""
set the selector type of the ChisqSelector.
Supported options: "numTopFeatures" (default), "percentile", "fpr", "fdr", "fwe".
"""
self.selectorType = str(selectorType)
return self
def fit(self, data):
"""
Returns a ChiSquared feature selector.
.. versionadded:: 1.4.0
Parameters
----------
data : :py:class:`pyspark.RDD` of :py:class:`pyspark.mllib.regression.LabeledPoint`
containing the labeled dataset with categorical features.
Real-valued features will be treated as categorical for each
distinct value. Apply feature discretizer before using this function.
"""
jmodel = callMLlibFunc("fitChiSqSelector", self.selectorType, self.numTopFeatures,
self.percentile, self.fpr, self.fdr, self.fwe, data)
return ChiSqSelectorModel(jmodel)
class PCAModel(JavaVectorTransformer):
"""
Model fitted by [[PCA]] that can project vectors to a low-dimensional space using PCA.
.. versionadded:: 1.5.0
"""
class PCA(object):
"""
A feature transformer that projects vectors to a low-dimensional space using PCA.
.. versionadded:: 1.5.0
Examples
--------
>>> data = [Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),
... Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),
... Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0])]
>>> model = PCA(2).fit(sc.parallelize(data))
>>> pcArray = model.transform(Vectors.sparse(5, [(1, 1.0), (3, 7.0)])).toArray()
>>> pcArray[0]
1.648...
>>> pcArray[1]
-4.013...
"""
def __init__(self, k):
"""
Parameters
----------
k : int
number of principal components.
"""
self.k = int(k)
def fit(self, data):
"""
Computes a [[PCAModel]] that contains the principal components of the input vectors.
.. versionadded:: 1.5.0
Parameters
----------
data : :py:class:`pyspark.RDD`
source vectors
"""
jmodel = callMLlibFunc("fitPCA", self.k, data)
return PCAModel(jmodel)
class HashingTF(object):
"""
Maps a sequence of terms to their term frequencies using the hashing
trick.
.. versionadded:: 1.2.0
Parameters
----------
numFeatures : int, optional
number of features (default: 2^20)
Notes
-----
The terms must be hashable (can not be dict/set/list...).
Examples
--------
>>> htf = HashingTF(100)
>>> doc = "a a b b c d".split(" ")
>>> htf.transform(doc)
SparseVector(100, {...})
"""
def __init__(self, numFeatures=1 << 20):
self.numFeatures = numFeatures
self.binary = False
@since("2.0.0")
def setBinary(self, value):
"""
If True, term frequency vector will be binary such that non-zero
term counts will be set to 1
(default: False)
"""
self.binary = value
return self
@since('1.2.0')
def indexOf(self, term):
""" Returns the index of the input term. """
return hash(term) % self.numFeatures
@since('1.2.0')
def transform(self, document):
"""
Transforms the input document (list of terms) to term frequency
vectors, or transform the RDD of document to RDD of term
frequency vectors.
"""
if isinstance(document, RDD):
return document.map(self.transform)
freq = {}
for term in document:
i = self.indexOf(term)
freq[i] = 1.0 if self.binary else freq.get(i, 0) + 1.0
return Vectors.sparse(self.numFeatures, freq.items())
class IDFModel(JavaVectorTransformer):
"""
Represents an IDF model that can transform term frequency vectors.
.. versionadded:: 1.2.0
"""
def transform(self, x):
"""
Transforms term frequency (TF) vectors to TF-IDF vectors.
If `minDocFreq` was set for the IDF calculation,
the terms which occur in fewer than `minDocFreq`
documents will have an entry of 0.
.. versionadded:: 1.2.0
Parameters
----------
x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
an RDD of term frequency vectors or a term frequency
vector
Returns
-------
:py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
an RDD of TF-IDF vectors or a TF-IDF vector
Notes
-----
In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
"""
return JavaVectorTransformer.transform(self, x)
@since('1.4.0')
def idf(self):
"""
Returns the current IDF vector.
"""
return self.call('idf')
@since('3.0.0')
def docFreq(self):
"""
Returns the document frequency.
"""
return self.call('docFreq')
@since('3.0.0')
def numDocs(self):
"""
Returns number of documents evaluated to compute idf
"""
return self.call('numDocs')
class IDF(object):
"""
Inverse document frequency (IDF).
The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`,
where `m` is the total number of documents and `d(t)` is the number
of documents that contain term `t`.
This implementation supports filtering out terms which do not appear
in a minimum number of documents (controlled by the variable
`minDocFreq`). For terms that are not in at least `minDocFreq`
documents, the IDF is found as 0, resulting in TF-IDFs of 0.
.. versionadded:: 1.2.0
Parameters
----------
minDocFreq : int
minimum of documents in which a term should appear for filtering
Examples
--------
>>> n = 4
>>> freqs = [Vectors.sparse(n, (1, 3), (1.0, 2.0)),
... Vectors.dense([0.0, 1.0, 2.0, 3.0]),
... Vectors.sparse(n, [1], [1.0])]
>>> data = sc.parallelize(freqs)
>>> idf = IDF()
>>> model = idf.fit(data)
>>> tfidf = model.transform(data)
>>> for r in tfidf.collect(): r
SparseVector(4, {1: 0.0, 3: 0.5754})
DenseVector([0.0, 0.0, 1.3863, 0.863])
SparseVector(4, {1: 0.0})
>>> model.transform(Vectors.dense([0.0, 1.0, 2.0, 3.0]))
DenseVector([0.0, 0.0, 1.3863, 0.863])
>>> model.transform([0.0, 1.0, 2.0, 3.0])
DenseVector([0.0, 0.0, 1.3863, 0.863])
>>> model.transform(Vectors.sparse(n, (1, 3), (1.0, 2.0)))
SparseVector(4, {1: 0.0, 3: 0.5754})
"""
def __init__(self, minDocFreq=0):
self.minDocFreq = minDocFreq
def fit(self, dataset):
"""
Computes the inverse document frequency.
.. versionadded:: 1.2.0
Parameters
----------
dataset : :py:class:`pyspark.RDD`
an RDD of term frequency vectors
"""
if not isinstance(dataset, RDD):
raise TypeError("dataset should be an RDD of term frequency vectors")
jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector))
return IDFModel(jmodel)
class Word2VecModel(JavaVectorTransformer, JavaSaveable, JavaLoader):
"""
class for Word2Vec model
"""
def transform(self, word):
"""
Transforms a word to its vector representation
.. versionadded:: 1.2.0
Parameters
----------
word : str
a word
Returns
-------
:py:class:`pyspark.mllib.linalg.Vector`
vector representation of word(s)
Notes
-----
Local use only
"""
try:
return self.call("transform", word)
except Py4JJavaError:
raise ValueError("%s not found" % word)
def findSynonyms(self, word, num):
"""
Find synonyms of a word
.. versionadded:: 1.2.0
Parameters
----------
word : str or :py:class:`pyspark.mllib.linalg.Vector`
a word or a vector representation of word
num : int
number of synonyms to find
Returns
-------
:py:class:`collections.abc.Iterable`
array of (word, cosineSimilarity)
Notes
-----
Local use only
"""
if not isinstance(word, str):
word = _convert_to_vector(word)
words, similarity = self.call("findSynonyms", word, num)
return zip(words, similarity)
@since('1.4.0')
def getVectors(self):
"""
Returns a map of words to their vector representations.
"""
return self.call("getVectors")
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
jmodel = sc._jvm.org.apache.spark.mllib.feature \
.Word2VecModel.load(sc._jsc.sc(), path)
model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel)
return Word2VecModel(model)
class Word2Vec(object):
"""Word2Vec creates vector representation of words in a text corpus.
The algorithm first constructs a vocabulary from the corpus
and then learns vector representation of words in the vocabulary.
The vector representation can be used as features in
natural language processing and machine learning algorithms.
We used skip-gram model in our implementation and hierarchical
softmax method to train the model. The variable names in the
implementation matches the original C implementation.
For original C implementation,
see https://code.google.com/p/word2vec/
For research papers, see
Efficient Estimation of Word Representations in Vector Space
and Distributed Representations of Words and Phrases and their
Compositionality.
.. versionadded:: 1.2.0
Examples
--------
>>> sentence = "a b " * 100 + "a c " * 10
>>> localDoc = [sentence, sentence]
>>> doc = sc.parallelize(localDoc).map(lambda line: line.split(" "))
>>> model = Word2Vec().setVectorSize(10).setSeed(42).fit(doc)
Querying for synonyms of a word will not return that word:
>>> syms = model.findSynonyms("a", 2)
>>> [s[0] for s in syms]
['b', 'c']
But querying for synonyms of a vector may return the word whose
representation is that vector:
>>> vec = model.transform("a")
>>> syms = model.findSynonyms(vec, 2)
>>> [s[0] for s in syms]
['a', 'b']
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = Word2VecModel.load(sc, path)
>>> model.transform("a") == sameModel.transform("a")
True
>>> syms = sameModel.findSynonyms("a", 2)
>>> [s[0] for s in syms]
['b', 'c']
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
def __init__(self):
"""
Construct Word2Vec instance
"""
self.vectorSize = 100
self.learningRate = 0.025
self.numPartitions = 1
self.numIterations = 1
self.seed = None
self.minCount = 5
self.windowSize = 5
@since('1.2.0')
def setVectorSize(self, vectorSize):
"""
Sets vector size (default: 100).
"""
self.vectorSize = vectorSize
return self
@since('1.2.0')
def setLearningRate(self, learningRate):
"""
Sets initial learning rate (default: 0.025).
"""
self.learningRate = learningRate
return self
@since('1.2.0')
def setNumPartitions(self, numPartitions):
"""
Sets number of partitions (default: 1). Use a small number for
accuracy.
"""
self.numPartitions = numPartitions
return self
@since('1.2.0')
def setNumIterations(self, numIterations):
"""
Sets number of iterations (default: 1), which should be smaller
than or equal to number of partitions.
"""
self.numIterations = numIterations
return self
@since('1.2.0')
def setSeed(self, seed):
"""
Sets random seed.
"""
self.seed = seed
return self
@since('1.4.0')
def setMinCount(self, minCount):
"""
Sets minCount, the minimum number of times a token must appear
to be included in the word2vec model's vocabulary (default: 5).
"""
self.minCount = minCount
return self
@since('2.0.0')
def setWindowSize(self, windowSize):
"""
Sets window size (default: 5).
"""
self.windowSize = windowSize
return self
def fit(self, data):
"""
Computes the vector representation of each word in vocabulary.
.. versionadded:: 1.2.0
Parameters
----------
data : :py:class:`pyspark.RDD`
training data. RDD of list of string
Returns
-------
:py:class:`Word2VecModel`
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD of list of string")
jmodel = callMLlibFunc("trainWord2VecModel", data, int(self.vectorSize),
float(self.learningRate), int(self.numPartitions),
int(self.numIterations), self.seed,
int(self.minCount), int(self.windowSize))
return Word2VecModel(jmodel)
class ElementwiseProduct(VectorTransformer):
"""
Scales each column of the vector, with the supplied weight vector.
i.e the elementwise product.
.. versionadded:: 1.5.0
Examples
--------
>>> weight = Vectors.dense([1.0, 2.0, 3.0])
>>> eprod = ElementwiseProduct(weight)
>>> a = Vectors.dense([2.0, 1.0, 3.0])
>>> eprod.transform(a)
DenseVector([2.0, 2.0, 9.0])
>>> b = Vectors.dense([9.0, 3.0, 4.0])
>>> rdd = sc.parallelize([a, b])
>>> eprod.transform(rdd).collect()
[DenseVector([2.0, 2.0, 9.0]), DenseVector([9.0, 6.0, 12.0])]
"""
def __init__(self, scalingVector):
self.scalingVector = _convert_to_vector(scalingVector)
@since('1.5.0')
def transform(self, vector):
"""
Computes the Hadamard product of the vector.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.feature tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
sys.path.pop(0)
_test()
|
maropu/spark
|
python/pyspark/mllib/feature.py
|
Python
|
apache-2.0
| 28,134
|
"""Tests for the Bond cover device."""
from datetime import timedelta
from bond_api import Action, DeviceType
from homeassistant import core
from homeassistant.components.cover import DOMAIN as COVER_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_STOP_COVER,
)
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_action,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def shades(name: str):
"""Create motorized shades with given name."""
return {"name": name, "type": DeviceType.MOTORIZED_SHADES}
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(
hass,
COVER_DOMAIN,
shades("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["cover.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_open_cover(hass: core.HomeAssistant):
"""Tests that open cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_open, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_open.assert_called_once_with("test-device-id", Action.open())
async def test_close_cover(hass: core.HomeAssistant):
"""Tests that close cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_close, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_close.assert_called_once_with("test-device-id", Action.close())
async def test_stop_cover(hass: core.HomeAssistant):
"""Tests that stop cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_hold, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_hold.assert_called_once_with("test-device-id", Action.hold())
async def test_update_reports_open_cover(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports cover is open."""
await setup_platform(hass, COVER_DOMAIN, shades("name-1"))
with patch_bond_device_state(return_value={"open": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("cover.name_1").state == "open"
async def test_update_reports_closed_cover(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports cover is closed."""
await setup_platform(hass, COVER_DOMAIN, shades("name-1"))
with patch_bond_device_state(return_value={"open": 0}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("cover.name_1").state == "closed"
async def test_cover_available(hass: core.HomeAssistant):
"""Tests that available state is updated based on API errors."""
await help_test_entity_available(
hass, COVER_DOMAIN, shades("name-1"), "cover.name_1"
)
|
sdague/home-assistant
|
tests/components/bond/test_cover.py
|
Python
|
apache-2.0
| 4,207
|
#!/usr/bin/python
# Creator: Daniel Wooten
# License: GPL
# import the python logging utility as log
import logging as log
# Set the root logger level ( what messages it will print )
log.basicConfig( level = 10 )
# Some sample messages for the root logger
log.debug( "This is the debug level reporting in" )
log.info( "This is the info level reporting in " )
log.warning( "This is the warning level reporting in" )
log.error( "This is the error level reporting in" )
log.critical( "This is the critical level reporting in" )
|
jnaulty/berkeley
|
python_logger/log_pres_2.py
|
Python
|
bsd-3-clause
| 529
|
from __future__ import print_function
from django.core.management.base import BaseCommand
from optparse import make_option
from laws.models import Bill
from laws.vote_choices import BILL_STAGE_CHOICES
from mks.models import Knesset
class Command(BaseCommand):
help = "Freeze bills staged in previous knessets"
option_list = BaseCommand.option_list + (
make_option(
'-n', action='store_true', dest="dryrun", default=False,
help='Dry run, changes nothing in the db, just display results'
),
)
def handle(self, *args, **options):
start_date = Knesset.objects.current_knesset().start_date
valid_stages = [key for (key, val) in BILL_STAGE_CHOICES
if key.isnumeric() and 1 < int(key) < 6]
bills = Bill.objects.filter(stage_date__lte=start_date,
stage__in=valid_stages)
total = Bill.objects.count()
found = bills.count()
msg = "Found {0} bills of {1} in stages {2} and dated before {3}"
print(msg.format(found, total, u','.join(valid_stages), start_date))
if options['dryrun']:
print("Not updating the db, dry run was specified")
else:
print('Settings {0} bills stage to u"0"'.format(found))
bills.update(stage=u'0')
|
noamelf/Open-Knesset
|
laws/management/commands/freeze_bills.py
|
Python
|
bsd-3-clause
| 1,347
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, cos, exp, log, arange, pi, roll, sin, sqrt, sum
from .go_benchmark import Benchmark
class BartelsConn(Benchmark):
r"""
Bartels-Conn objective function.
The BartelsConn [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{BartelsConn}}(x) = \lvert {x_1^2 + x_2^2 + x_1x_2} \rvert +
\lvert {\sin(x_1)} \rvert + \lvert {\cos(x_2)} \rvert
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 1` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-500.] * self.N, [500.] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 1.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0] ** 2.0 + x[1] ** 2.0 + x[0] * x[1]) + abs(sin(x[0]))
+ abs(cos(x[1])))
class Beale(Benchmark):
r"""
Beale objective function.
The Beale [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{Beale}}(x) = \left(x_1 x_2 - x_1 + 1.5\right)^{2} +
\left(x_1 x_2^{2} - x_1 + 2.25\right)^{2} + \left(x_1 x_2^{3} - x_1 +
2.625\right)^{2}
with :math:`x_i \in [-4.5, 4.5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x=[3, 0.5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-4.5] * self.N, [4.5] * self.N)
self.global_optimum = [[3.0, 0.5]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((1.5 - x[0] + x[0] * x[1]) ** 2
+ (2.25 - x[0] + x[0] * x[1] ** 2) ** 2
+ (2.625 - x[0] + x[0] * x[1] ** 3) ** 2)
class BiggsExp02(Benchmark):
r"""
BiggsExp02 objective function.
The BiggsExp02 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}
f_{\text{BiggsExp02}}(x) = \sum_{i=1}^{10} (e^{-t_i x_1}
- 5 e^{-t_i x_2} - y_i)^2 \\
t_i = 0.1 i\\
y_i = e^{-t_i} - 5 e^{-10t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0] * 2,
[20] * 2)
self.global_optimum = [[1., 10.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (exp(-t * x[0]) - 5 * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp03(Benchmark):
r"""
BiggsExp03 objective function.
The BiggsExp03 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp03}}(x) = \sum_{i=1}^{10}
(e^{-t_i x_1} - x_3e^{-t_i x_2} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5e^{-10 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0] * 3,
[20] * 3)
self.global_optimum = [[1., 10., 5.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1., 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (exp(-t * x[0]) - x[2] * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp04(Benchmark):
r"""
BiggsExp04 objective function.
The BiggsExp04 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp04}}(x) = \sum_{i=1}^{10}
(x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5 e^{-10 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1, 5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0.] * 4,
[20.] * 4)
self.global_optimum = [[1., 10., 1., 5.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp05(Benchmark):
r"""
BiggsExp05 objective function.
The BiggsExp05 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp05}}(x) = \sum_{i=1}^{11}
(x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} + 3 e^{-t_i x_5} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5e^{-10 t_i} + 3e^{-4 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i=1, ..., 5`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1, 5, 4]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0.] * 5,
[20.] * 5)
self.global_optimum = [[1., 10., 1., 5., 4.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 12.) * 0.1
y = exp(-t) - 5 * exp(-10 * t) + 3 * exp(-4 * t)
vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1])
+ 3 * exp(-t * x[4]) - y) ** 2
return sum(vec)
class Bird(Benchmark):
r"""
Bird objective function.
The Bird global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Bird}}(x) = \left(x_1 - x_2\right)^{2} + e^{\left[1 -
\sin\left(x_1\right) \right]^{2}} \cos\left(x_2\right) + e^{\left[1 -
\cos\left(x_2\right)\right]^{2}} \sin\left(x_1\right)
with :math:`x_i \in [-2\pi, 2\pi]`
*Global optimum*: :math:`f(x) = -106.7645367198034` for :math:`x
= [4.701055751981055, 3.152946019601391]` or :math:`x =
[-1.582142172055011, -3.130246799635430]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-2.0 * pi] * self.N,
[2.0 * pi] * self.N)
self.global_optimum = [[4.701055751981055, 3.152946019601391],
[-1.582142172055011, -3.130246799635430]]
self.fglob = -106.7645367198034
def fun(self, x, *args):
self.nfev += 1
return (sin(x[0]) * exp((1 - cos(x[1])) ** 2)
+ cos(x[1]) * exp((1 - sin(x[0])) ** 2) + (x[0] - x[1]) ** 2)
class Bohachevsky1(Benchmark):
r"""
Bohachevsky 1 objective function.
The Bohachevsky 1 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{Bohachevsky}}(x) = \sum_{i=1}^{n-1}\left[x_i^2 + 2 x_{i+1}^2 -
0.3 \cos(3 \pi x_i) - 0.4 \cos(4 \pi x_{i + 1}) + 0.7 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-15, 15]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1,
..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. see Jamil#17
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-100.0] * self.N, [100.0] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0])
- 0.4 * cos(4 * pi * x[1]) + 0.7)
class Bohachevsky2(Benchmark):
r"""
Bohachevsky 2 objective function.
The Bohachevsky 2 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{Bohachevsky}}(x) = \sum_{i=1}^{n-1}\left[x_i^2 + 2 x_{i+1}^2 -
0.3 \cos(3 \pi x_i) - 0.4 \cos(4 \pi x_{i + 1}) + 0.7 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-15, 15]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1,
..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. Jamil is also wrong.
There should be no 0.4 factor in front of the cos term
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-100.0] * self.N, [100.0] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0])
* cos(4 * pi * x[1]) + 0.3)
class Bohachevsky3(Benchmark):
r"""
Bohachevsky 3 objective function.
The Bohachevsky 3 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{Bohachevsky}}(x) = \sum_{i=1}^{n-1}\left[x_i^2 + 2 x_{i+1}^2 -
0.3 \cos(3 \pi x_i) - 0.4 \cos(4 \pi x_{i + 1}) + 0.7 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-15, 15]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1,
..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. Jamil#19
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-100.0] * self.N, [100.0] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2
- 0.3 * cos(3 * pi * x[0] + 4 * pi * x[1]) + 0.3)
class BoxBetts(Benchmark):
r"""
BoxBetts objective function.
The BoxBetts global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{BoxBetts}}(x) = \sum_{i=1}^k g(x_i)^2
Where, in this exercise:
.. math::
g(x) = e^{-0.1i x_1} - e^{-0.1i x_2} - x_3\left[e^{-0.1i}
- e^{-i}\right]
And :math:`k = 10`.
Here, :math:`x_1 \in [0.9, 1.2], x_2 \in [9, 11.2], x_3 \in [0.9, 1.2]`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])
self.global_optimum = [[1.0, 10.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
i = arange(1, 11)
g = (exp(-0.1 * i * x[0]) - exp(-0.1 * i * x[1])
- (exp(-0.1 * i) - exp(-i)) * x[2])
return sum(g**2)
class Branin01(Benchmark):
r"""
Branin01 objective function.
The Branin01 global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Branin01}}(x) = \left(- 1.275 \frac{x_1^{2}}{\pi^{2}} + 5
\frac{x_1}{\pi} + x_2 -6\right)^{2} + \left(10 -\frac{5}{4 \pi} \right)
\cos\left(x_1\right) + 10
with :math:`x_1 \in [-5, 10], x_2 \in [0, 15]`
*Global optimum*: :math:`f(x) = 0.39788735772973816` for :math:`x =
[-\pi, 12.275]` or :math:`x = [\pi, 2.275]` or :math:`x = [3\pi, 2.475]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil#22, one of the solutions is different
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5., 10.), (0., 15.)]
self.global_optimum = [[-pi, 12.275], [pi, 2.275], [3 * pi, 2.475]]
self.fglob = 0.39788735772973816
def fun(self, x, *args):
self.nfev += 1
return ((x[1] - (5.1 / (4 * pi ** 2)) * x[0] ** 2
+ 5 * x[0] / pi - 6) ** 2
+ 10 * (1 - 1 / (8 * pi)) * cos(x[0]) + 10)
class Branin02(Benchmark):
r"""
Branin02 objective function.
The Branin02 global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Branin02}}(x) = \left(- 1.275 \frac{x_1^{2}}{\pi^{2}}
+ 5 \frac{x_1}{\pi} + x_2 - 6 \right)^{2} + \left(10 - \frac{5}{4 \pi}
\right) \cos\left(x_1\right) \cos\left(x_2\right)
+ \log(x_1^2+x_2^2 + 1) + 10
with :math:`x_i \in [-5, 15]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 5.559037` for :math:`x = [-3.2, 12.53]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5.0, 15.0), (-5.0, 15.0)]
self.global_optimum = [[-3.1969884, 12.52625787]]
self.fglob = 5.5589144038938247
def fun(self, x, *args):
self.nfev += 1
return ((x[1] - (5.1 / (4 * pi ** 2)) * x[0] ** 2
+ 5 * x[0] / pi - 6) ** 2
+ 10 * (1 - 1 / (8 * pi)) * cos(x[0]) * cos(x[1])
+ log(x[0] ** 2.0 + x[1] ** 2.0 + 1.0) + 10)
class Brent(Benchmark):
r"""
Brent objective function.
The Brent [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Brent}}(x) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2 -x_2^2)}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, -10]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO solution is different to Jamil#24
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.custom_bounds = ([-10, 2], [-10, 2])
self.global_optimum = [[-10.0, -10.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((x[0] + 10.0) ** 2.0 + (x[1] + 10.0) ** 2.0
+ exp(-x[0] ** 2.0 - x[1] ** 2.0))
class Brown(Benchmark):
r"""
Brown objective function.
The Brown [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Brown}}(x) = \sum_{i=1}^{n-1}\left[
\left(x_i^2\right)^{x_{i + 1}^2 + 1}
+ \left(x_{i + 1}^2\right)^{x_i^2 + 1}\right]
with :math:`x_i \in [-1, 4]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for
:math:`i=1,...,n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-1.0] * self.N, [4.0] * self.N)
self.custom_bounds = ([-1.0, 1.0], [-1.0, 1.0])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
x0 = x[:-1]
x1 = x[1:]
return sum((x0 ** 2.0) ** (x1 ** 2.0 + 1.0)
+ (x1 ** 2.0) ** (x0 ** 2.0 + 1.0))
class Bukin02(Benchmark):
r"""
Bukin02 objective function.
The Bukin02 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin02}}(x) = 100 (x_2^2 - 0.01x_1^2 + 1)
+ 0.01(x_1 + 10)^2
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = -124.75` for :math:`x = [-15, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think that Gavana and Jamil are wrong on this function. In both
sources the x[1] term is not squared. As such there will be a minimum at
the smallest value of x[1].
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-15.0, 0.0]]
self.fglob = -124.75
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[1] ** 2 - 0.01 * x[0] ** 2 + 1.0)
+ 0.01 * (x[0] + 10.0) ** 2.0)
class Bukin04(Benchmark):
r"""
Bukin04 objective function.
The Bukin04 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin04}}(x) = 100 x_2^{2} + 0.01 \lvert{x_1 + 10}
\rvert
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-10.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100 * x[1] ** 2 + 0.01 * abs(x[0] + 10)
class Bukin06(Benchmark):
r"""
Bukin06 objective function.
The Bukin06 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin06}}(x) = 100 \sqrt{ \lvert{x_2 - 0.01 x_1^{2}}
\rvert} + 0.01 \lvert{x_1 + 10} \rvert
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-10.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100 * sqrt(abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * abs(x[0] + 10)
|
chatcannon/scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
|
Python
|
bsd-3-clause
| 21,639
|
from __future__ import unicode_literals
import datetime
import re
import sys
from unittest import skipIf
import warnings
from xml.dom.minidom import parseString
try:
import pytz
except ImportError:
pytz = None
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Min, Max
from django.http import HttpRequest
from django.template import Context, RequestContext, Template, TemplateSyntaxError
from django.test import TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import six
from django.utils import timezone
from .forms import EventForm, EventSplitForm, EventLocalizedForm, EventModelForm, EventLocalizedModelForm
from .models import Event, MaybeEvent, Session, SessionEvent, Timestamp, AllDayEvent
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backend.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backend.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@skipIf(pytz is None, "this test requires pytz")
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(TestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"- fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@skipIf(pytz is None, "this test requires pytz")
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@skipIf(pytz is None, "this test requires pytz")
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@skipIf(pytz is None, "this test requires pytz")
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.core.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
self.assertEqual(tpl.render(Context()), "")
self.assertEqual(tpl.render(RequestContext(HttpRequest())), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminTests(TestCase):
urls = 'timezones.urls'
fixtures = ['tz_users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
@override_settings(TIME_ZONE='Africa/Nairobi')
class UtilitiesTests(TestCase):
def test_make_aware(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
)
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 10, 20, 30), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
)
def test_make_naive(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30)
)
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30)
)
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30)
)
|
deployed/django
|
tests/timezones/tests.py
|
Python
|
bsd-3-clause
| 55,059
|
# Example using PIO to create a UART TX interface
from machine import Pin
from rp2 import PIO, StateMachine, asm_pio
UART_BAUD = 115200
PIN_BASE = 10
NUM_UARTS = 8
@asm_pio(sideset_init=PIO.OUT_HIGH, out_init=PIO.OUT_HIGH, out_shiftdir=PIO.SHIFT_RIGHT)
def uart_tx():
# fmt: off
# Block with TX deasserted until data available
pull()
# Initialise bit counter, assert start bit for 8 cycles
set(x, 7) .side(0) [7]
# Shift out 8 data bits, 8 execution cycles per bit
label("bitloop")
out(pins, 1) [6]
jmp(x_dec, "bitloop")
# Assert stop bit for 8 cycles total (incl 1 for pull())
nop() .side(1) [6]
# fmt: on
# Now we add 8 UART TXs, on pins 10 to 17. Use the same baud rate for all of them.
uarts = []
for i in range(NUM_UARTS):
sm = StateMachine(
i, uart_tx, freq=8 * UART_BAUD, sideset_base=Pin(PIN_BASE + i), out_base=Pin(PIN_BASE + i)
)
sm.active(1)
uarts.append(sm)
# We can print characters from each UART by pushing them to the TX FIFO
def pio_uart_print(sm, s):
for c in s:
sm.put(ord(c))
# Print a different message from each UART
for i, u in enumerate(uarts):
pio_uart_print(u, "Hello from UART {}!\n".format(i))
|
pfalcon/micropython
|
examples/rp2/pio_uart_tx.py
|
Python
|
mit
| 1,250
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database_facts
description:
- Gather facts for GCP Database
short_description: Gather facts for GCP Database
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The instance to create the database on.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a string
Alternatively, you can add `register: name-of-resource` to a gcp_spanner_instance
task and then set this instance field to "{{ name-of-resource }}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a database facts
gcp_spanner_database_facts:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
returned: success
type: str
extraStatements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement,
the database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True)))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
items = fetch_list(module, collection(module))
if items.get('databases'):
items = items.get('databases')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/cloud/google/gcp_spanner_database_facts.py
|
Python
|
gpl-3.0
| 4,857
|
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import objects
ALIAS = 'os-pci'
soft_authorize = extensions.os_compute_soft_authorizer(ALIAS + ':pci_servers')
authorize = extensions.os_compute_authorizer(ALIAS)
PCI_ADMIN_KEYS = ['id', 'address', 'vendor_id', 'product_id', 'status',
'compute_node_id']
PCI_DETAIL_KEYS = ['dev_type', 'label', 'instance_uuid', 'dev_id',
'extra_info']
class PciServerController(wsgi.Controller):
def _extend_server(self, server, instance):
dev_id = []
for dev in instance.pci_devices:
dev_id.append({'id': dev.id})
server['%s:pci_devices' % Pci.alias] = dev_id
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
server = resp_obj.obj['server']
instance = req.get_db_instance(server['id'])
self._extend_server(server, instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if soft_authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
instance = req.get_db_instance(server['id'])
self._extend_server(server, instance)
class PciHypervisorController(wsgi.Controller):
def _extend_hypervisor(self, hypervisor, compute_node):
if compute_node.pci_device_pools is not None:
pci_pools = [pci_pool.to_dict()
for pci_pool in compute_node.pci_device_pools]
else:
pci_pools = []
hypervisor['%s:pci_stats' % Pci.alias] = pci_pools
@wsgi.extends
def show(self, req, resp_obj, id):
hypervisor = resp_obj.obj['hypervisor']
compute_node = req.get_db_compute_node(hypervisor['id'])
self._extend_hypervisor(hypervisor, compute_node)
@wsgi.extends
def detail(self, req, resp_obj):
hypervisors = list(resp_obj.obj['hypervisors'])
for hypervisor in hypervisors:
compute_node = req.get_db_compute_node(hypervisor['id'])
self._extend_hypervisor(hypervisor, compute_node)
class PciController(wsgi.Controller):
def __init__(self):
self.host_api = compute.HostAPI()
def _view_pcidevice(self, device, detail=False):
dev_dict = {}
for key in PCI_ADMIN_KEYS:
dev_dict[key] = getattr(device, key)
if detail:
for field in PCI_DETAIL_KEYS:
if field == 'instance_uuid':
dev_dict['server_uuid'] = getattr(device, field)
else:
dev_dict[field] = getattr(device, field)
return dev_dict
def _get_all_nodes_pci_devices(self, req, detail, action):
context = req.environ['nova.context']
authorize(context, action=action)
compute_nodes = self.host_api.compute_node_get_all(context)
results = []
for node in compute_nodes:
pci_devs = objects.PciDeviceList.get_by_compute_node(
context, node['id'])
results.extend([self._view_pcidevice(dev, detail)
for dev in pci_devs])
return results
@extensions.expected_errors(())
def detail(self, req):
results = self._get_all_nodes_pci_devices(req, True, 'detail')
return dict(pci_devices=results)
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context, action='show')
try:
pci_dev = objects.PciDevice.get_by_dev_id(context, id)
except exception.PciDeviceNotFoundById as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
result = self._view_pcidevice(pci_dev, True)
return dict(pci_device=result)
@extensions.expected_errors(())
def index(self, req):
results = self._get_all_nodes_pci_devices(req, False, 'index')
return dict(pci_devices=results)
class Pci(extensions.V3APIExtensionBase):
"""Pci access support."""
name = "PciAccess"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(ALIAS,
PciController(),
collection_actions={'detail': 'GET'})]
return resources
def get_controller_extensions(self):
server_extension = extensions.ControllerExtension(
self, 'servers', PciServerController())
compute_extension = extensions.ControllerExtension(
self, 'os-hypervisors', PciHypervisorController())
return [server_extension, compute_extension]
|
LoHChina/nova
|
nova/api/openstack/compute/plugins/v3/pci.py
|
Python
|
apache-2.0
| 5,476
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from framework.auth.oauth_scopes import public_scopes
from website.models import ApiOAuth2PersonalToken
from api.base.serializers import JSONAPISerializer, LinksField, IDField, TypeField
class ApiOAuth2PersonalTokenSerializer(JSONAPISerializer):
"""Serialize data about a registered personal access token"""
id = IDField(source='_id', read_only=True, help_text='The object ID for this token (automatically generated)')
type = TypeField()
name = ser.CharField(help_text='A short, descriptive name for this token',
required=True)
owner = ser.CharField(help_text='The user who owns this token',
read_only=True, # Don't let user register a token in someone else's name
source='owner._id')
scopes = ser.CharField(help_text='Governs permissions associated with this token',
required=True)
token_id = ser.CharField(read_only=True, allow_blank=True)
class Meta:
type_ = 'tokens'
links = LinksField({
'html': 'absolute_url'
})
def absolute_url(self, obj):
return obj.absolute_url
def to_representation(self, obj, envelope='data'):
data = super(ApiOAuth2PersonalTokenSerializer, self).to_representation(obj, envelope=envelope)
# Make sure users only see token_id on create
if not self.context['request'].method == 'POST':
if 'data' in data:
data['data']['attributes'].pop('token_id')
else:
data['attributes'].pop('token_id')
return data
def create(self, validated_data):
validate_requested_scopes(validated_data)
instance = ApiOAuth2PersonalToken(**validated_data)
instance.save()
return instance
def update(self, instance, validated_data):
validate_requested_scopes(validated_data)
assert isinstance(instance, ApiOAuth2PersonalToken), 'instance must be an ApiOAuth2PersonalToken'
instance.deactivate(save=False) # This will cause CAS to revoke the existing token but still allow it to be used in the future, new scopes will be updated properly at that time.
instance.reload()
for attr, value in validated_data.iteritems():
if attr == 'token_id': # Do not allow user to update token_id
continue
else:
setattr(instance, attr, value)
instance.save()
return instance
def validate_requested_scopes(validated_data):
scopes_set = set(validated_data['scopes'].split(' '))
for scope in scopes_set:
if scope not in public_scopes or not public_scopes[scope].is_public:
raise exceptions.ValidationError('User requested invalid scope')
|
ticklemepierce/osf.io
|
api/tokens/serializers.py
|
Python
|
apache-2.0
| 2,864
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import hashlib
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
class MessageBasedHashServerProtocol(WebSocketServerProtocol):
"""
Message-based WebSockets server that computes a SHA-256 for every
message it receives and sends back the computed digest.
"""
def onMessage(self, payload, isBinary):
sha256 = hashlib.sha256()
sha256.update(payload)
digest = sha256.hexdigest()
self.sendMessage(digest.encode('utf8'))
print("Sent digest for message: {}".format(digest))
if __name__ == '__main__':
factory = WebSocketServerFactory(u"ws://127.0.0.1:9000")
factory.protocol = MessageBasedHashServerProtocol
listenWS(factory)
reactor.run()
|
hzruandd/AutobahnPython
|
examples/twisted/websocket/streaming/message_based_server.py
|
Python
|
mit
| 2,143
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Errors used in the Python datastore API."""
class Error(Exception):
"""Base datastore error type.
"""
class BadValueError(Error):
"""Raised by Entity.__setitem__(), Query.__setitem__(), Get(), and others
when a property value or filter value is invalid.
"""
class BadPropertyError(Error):
"""Raised by Entity.__setitem__() when a property name isn't a string.
"""
class BadRequestError(Error):
"""Raised by datastore calls when the parameter(s) are invalid.
"""
class EntityNotFoundError(Error):
"""DEPRECATED: Raised by Get() when the requested entity is not found.
"""
class BadArgumentError(Error):
"""Raised by Query.Order(), Iterator.Next(), and others when they're
passed an invalid argument.
"""
class QueryNotFoundError(Error):
"""DEPRECATED: Raised by Iterator methods when the Iterator is invalid. This
should not happen during normal usage; it protects against malicious users
and system errors.
"""
class TransactionNotFoundError(Error):
"""DEPRECATED: Raised by RunInTransaction. This is an internal error; you
should not see this.
"""
class Rollback(Error):
"""May be raised by transaction functions when they want to roll back
instead of committing. Note that *any* exception raised by a transaction
function will cause a rollback. This is purely for convenience. See
datastore.RunInTransaction for details.
"""
class TransactionFailedError(Error):
"""Raised by RunInTransaction methods when the transaction could not be
committed, even after retrying. This is usually due to high contention.
"""
class BadFilterError(Error):
"""Raised by Query.__setitem__() and Query.Run() when a filter string is
invalid.
"""
def __init__(self, filter):
self.filter = filter
message = (u'invalid filter: %s.' % self.filter).encode('utf-8')
super(BadFilterError, self).__init__(message)
class BadQueryError(Error):
"""Raised by Query when a query or query string is invalid.
"""
class BadKeyError(Error):
"""Raised by Key.__str__ when the key is invalid.
"""
class InternalError(Error):
"""An internal datastore error. Please report this to Google.
"""
class NeedIndexError(Error):
"""No matching index was found for a query that requires an index. Check
the Indexes page in the Admin Console and your index.yaml file.
"""
def __init__(self, error, original_message=None, header=None, yaml_index=None,
xml_index=None):
super(NeedIndexError, self).__init__(error)
self._original_message = original_message
self._header = header
self._yaml_index = yaml_index
self._xml_index = xml_index
def OriginalMessage(self):
return self._original_message
def Header(self):
return self._header
def YamlIndex(self):
return self._yaml_index
def XmlIndex(self):
return self._xml_index
class ReferencePropertyResolveError(Error):
"""An error occurred while trying to resolve a ReferenceProperty."""
class Timeout(Error):
"""The datastore operation timed out, or the data was temporarily
unavailable. This can happen when you attempt to put, get, or delete too
many entities or an entity with too many properties, or if the datastore is
overloaded or having trouble.
"""
class CommittedButStillApplying(Timeout):
"""The write or transaction was committed, but some entities or index rows
may not have been fully updated. Those updates should automatically be
applied soon. You can roll them forward immediately by reading one of the
entities inside a transaction.
"""
|
GdZ/scriptfile
|
software/googleAppEngine/google/appengine/api/datastore_errors.py
|
Python
|
mit
| 4,172
|
import binascii
import itertools
import os
import random
import subprocess
from weaver.stack import WeaverNests
from weaver.util import Stash
def nstdir(path):
return os.path.join(CurrentNest().work_dir, path)
# Thoughts:
# - For shared files: fifo-0,push-async-1 is equivalent to fifo-0,pull-inf
TASKS = 25
SHARED = [
{
'count': 128,
'prefix': '1R-shared',
'size': lambda: random.randint(1, 64*2**10),
},
{
'count': 128,
'prefix': '1G-shared',
'size': lambda: 1*2**30,
},
{
'count': 64,
'prefix': '2G-shared',
'size': lambda: 2*2**30,
},
{
'count': 32,
'prefix': '4G-shared',
'size': lambda: 4*2**30,
},
{
'count': 16,
'prefix': '8G-shared',
'size': lambda: 8*2**30,
},
]
UNIQUE = [
# {
# 'count': 4,
# 'prefix': '2G',
# 'size': lambda: 2*2**30,
# },
# {
# 'count': 2,
# 'prefix': '4G',
# 'size': lambda: 4*2**30,
# },
]
consumer = ShellFunction('''
for f; do
test -e "$f" || exit 1
done
''', cmd_format = "{EXE} {ARG}")
producer = ShellFunction('''
touch "$1"
shift
while [ "$#" -ge 3 ]; do
openssl enc -aes-256-ctr -nosalt -pass pass:"$1" < /dev/zero 2> /dev/null | head -c "$2" > "$3"
shift
shift
shift
done
''', cmd_format = "{EXE} {ARG}")
gen = []
shared = []
for i in range(TASKS):
shared.append(nstdir('sync.%08d' % i))
for f in SHARED:
for i in range(f['count']):
path = nstdir((f['prefix'] + '.%08d') % i)
gen.append({'path': path, 'size': f['size']()})
shared.append(path)
for task in range(TASKS):
print("compiling task %d" % task)
inputs = []
inputs.extend(shared)
taskdir = nstdir('task.%08d' % task)
os.mkdir(taskdir)
for f in UNIQUE:
for i in range(f['count']):
path = os.path.join(taskdir, (f['prefix'] + '.%08d') % i)
inputs.append(path)
gen.append({'path': path, 'size': f['size']()})
consumer(arguments = inputs, inputs = inputs)
random.shuffle(gen)
def makerandoms(i, files):
sync = nstdir('sync.%08d' % i)
args = [sync]
outputs = [sync]
for f in files:
args.extend((binascii.hexlify(os.urandom(64)), f['size'], f['path']))
outputs.append(f['path'])
producer(arguments = args, outputs = outputs)
for i in range(TASKS):
makerandoms(i, gen[i::TASKS])
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
nkremerh/cctools
|
chirp/tools/workflows/pull-tests.py
|
Python
|
gpl-2.0
| 2,512
|
# -*- coding: utf-8 -*-
from ansible.compat.tests import unittest
from ansible.modules.packaging.os.yum import YumModule
yum_plugin_load_error = """
Plugin "product-id" can't be imported
Plugin "search-disabled-repos" can't be imported
Plugin "subscription-manager" can't be imported
Plugin "product-id" can't be imported
Plugin "search-disabled-repos" can't be imported
Plugin "subscription-manager" can't be imported
"""
# from https://github.com/ansible/ansible/issues/20608#issuecomment-276106505
wrapped_output_1 = """
Загружены модули: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirror.h1host.ru
* extras: mirror.h1host.ru
* updates: mirror.h1host.ru
vms-agent.x86_64 0.0-9 dev
"""
# from https://github.com/ansible/ansible/issues/20608#issuecomment-276971275
wrapped_output_2 = """
Загружены модули: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirror.corbina.net
* extras: mirror.corbina.net
* updates: mirror.corbina.net
empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty.x86_64
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1-0
addons
libtiff.x86_64 4.0.3-27.el7_3 updates
"""
# From https://github.com/ansible/ansible/issues/20608#issuecomment-276698431
wrapped_output_3 = """
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
ceph.x86_64 1:11.2.0-0.el7 ceph
ceph-base.x86_64 1:11.2.0-0.el7 ceph
ceph-common.x86_64 1:11.2.0-0.el7 ceph
ceph-mds.x86_64 1:11.2.0-0.el7 ceph
ceph-mon.x86_64 1:11.2.0-0.el7 ceph
ceph-osd.x86_64 1:11.2.0-0.el7 ceph
ceph-selinux.x86_64 1:11.2.0-0.el7 ceph
libcephfs1.x86_64 1:11.0.2-0.el7 ceph
librados2.x86_64 1:11.2.0-0.el7 ceph
libradosstriper1.x86_64 1:11.2.0-0.el7 ceph
librbd1.x86_64 1:11.2.0-0.el7 ceph
librgw2.x86_64 1:11.2.0-0.el7 ceph
python-cephfs.x86_64 1:11.2.0-0.el7 ceph
python-rados.x86_64 1:11.2.0-0.el7 ceph
python-rbd.x86_64 1:11.2.0-0.el7 ceph
"""
# from https://github.com/ansible/ansible-modules-core/issues/4318#issuecomment-251416661
wrapped_output_4 = """
ipxe-roms-qemu.noarch 20160127-1.git6366fa7a.el7
rhelosp-9.0-director-puddle
quota.x86_64 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z
quota-nls.noarch 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z
rdma.noarch 7.2_4.1_rc6-2.el7 rhelosp-rhel-7.2-z
screen.x86_64 4.1.0-0.23.20120314git3c2946.el7_2
rhelosp-rhel-7.2-z
sos.noarch 3.2-36.el7ost.2 rhelosp-9.0-puddle
sssd-client.x86_64 1.13.0-40.el7_2.12 rhelosp-rhel-7.2-z
"""
# A 'normal-ish' yum check-update output, without any wrapped lines
unwrapped_output_rhel7 = """
Loaded plugins: etckeeper, product-id, search-disabled-repos, subscription-
: manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
NetworkManager-openvpn.x86_64 1:1.2.6-1.el7 epel
NetworkManager-openvpn-gnome.x86_64 1:1.2.6-1.el7 epel
cabal-install.x86_64 1.16.1.0-2.el7 epel
cgit.x86_64 1.1-1.el7 epel
python34-libs.x86_64 3.4.5-3.el7 epel
python34-test.x86_64 3.4.5-3.el7 epel
python34-tkinter.x86_64 3.4.5-3.el7 epel
python34-tools.x86_64 3.4.5-3.el7 epel
qgit.x86_64 2.6-4.el7 epel
rdiff-backup.x86_64 1.2.8-12.el7 epel
stoken-libs.x86_64 0.91-1.el7 epel
xlockmore.x86_64 5.49-2.el7 epel
"""
# Some wrapped obsoletes for prepending to output for testing both
wrapped_output_rhel7_obsoletes_postfix = """
Obsoleting Packages
ddashboard.x86_64 0.2.0.1-1.el7_3 mhlavink-developerdashboard
developerdashboard.x86_64 0.1.12.2-1.el7_2 @mhlavink-developerdashboard
python-bugzilla.noarch 1.2.2-3.el7_2.1 mhlavink-developerdashboard
python-bugzilla-develdashboardfixes.noarch
1.2.2-3.el7 @mhlavink-developerdashboard
python2-futures.noarch 3.0.5-1.el7 epel
python-futures.noarch 3.0.3-1.el7 @epel
python2-pip.noarch 8.1.2-5.el7 epel
python-pip.noarch 7.1.0-1.el7 @epel
python2-pyxdg.noarch 0.25-6.el7 epel
pyxdg.noarch 0.25-5.el7 @epel
python2-simplejson.x86_64 3.10.0-1.el7 epel
python-simplejson.x86_64 3.3.3-1.el7 @epel
Security: kernel-3.10.0-327.28.2.el7.x86_64 is an installed security update
Security: kernel-3.10.0-327.22.2.el7.x86_64 is the currently running version
"""
longname = """
Loaded plugins: fastestmirror, priorities, rhnplugin
This system is receiving updates from RHN Classic or Red Hat Satellite.
Loading mirror speeds from cached hostfile
xxxxxxxxxxxxxxxxxxxxxxxxxx.noarch
1.16-1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
glibc.x86_64 2.17-157.el7_3.1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"""
unwrapped_output_rhel7_obsoletes = unwrapped_output_rhel7 + wrapped_output_rhel7_obsoletes_postfix
unwrapped_output_rhel7_expected_pkgs = ["NetworkManager-openvpn", "NetworkManager-openvpn-gnome", "cabal-install",
"cgit", "python34-libs", "python34-test", "python34-tkinter",
"python34-tools", "qgit", "rdiff-backup", "stoken-libs", "xlockmore"]
class TestYumUpdateCheckParse(unittest.TestCase):
def _assert_expected(self, expected_pkgs, result):
for expected_pkg in expected_pkgs:
self.assertIn(expected_pkg, result)
self.assertEqual(len(result), len(expected_pkgs))
self.assertIsInstance(result, dict)
def test_empty_output(self):
res = YumModule.parse_check_update("")
expected_pkgs = []
self._assert_expected(expected_pkgs, res)
def test_longname(self):
res = YumModule.parse_check_update(longname)
expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc']
self._assert_expected(expected_pkgs, res)
def test_plugin_load_error(self):
res = YumModule.parse_check_update(yum_plugin_load_error)
expected_pkgs = []
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_1(self):
res = YumModule.parse_check_update(wrapped_output_1)
expected_pkgs = ["vms-agent"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_2(self):
res = YumModule.parse_check_update(wrapped_output_2)
expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty",
"libtiff"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_3(self):
res = YumModule.parse_check_update(wrapped_output_3)
expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds",
"ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1",
"librados2", "libradosstriper1", "librbd1", "librgw2",
"python-cephfs", "python-rados", "python-rbd"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_4(self):
res = YumModule.parse_check_update(wrapped_output_4)
expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen",
"sos", "sssd-client"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_rhel7(self):
res = YumModule.parse_check_update(unwrapped_output_rhel7)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)
def test_wrapped_output_rhel7_obsoletes(self):
res = YumModule.parse_check_update(unwrapped_output_rhel7_obsoletes)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)
|
maartenq/ansible
|
test/units/modules/packaging/os/test_yum.py
|
Python
|
gpl-3.0
| 9,340
|
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_gcc_variable_attribute,
check_inline,
check_restrict,
check_compiler_gcc4)
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and (self.compiler.compiler_type in
('msvc', 'intelw', 'intelemw')):
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
build -c mingw32" instead. If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
VS 2010 for >= 3.3).
Original exception was: %s, and the Compiler class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
# After MSVC is initialized, add an explicit /MANIFEST to linker
# flags. See issues gh-4245 and gh-4101 for details. Also
# relevant are issues 4431 and 16296 on the Python bug tracker.
from distutils import msvc9compiler
if msvc9compiler.get_build_version() >= 10:
for ldflags in [self.compiler.ldflags_shared,
self.compiler.ldflags_shared_debug]:
if '/MANIFEST' not in ldflags:
ldflags.append('/MANIFEST')
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main(void) {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
if type(decl) == str:
body.append(decl)
else:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
libraru_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_restrict(self):
"""Return the restrict keyword recognized by the compiler, empty string
otherwise."""
return check_restrict(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c", use_tee=None):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
# 2008-11-16, RemoveMe
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of get_output is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
exitstatus, output = exec_command(exe, execute_in='.',
use_tee=use_tee)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
|
JFriel/honours_project
|
venv/lib/python2.7/site-packages/numpy/distutils/command/config.py
|
Python
|
gpl-3.0
| 17,986
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume (Camptocamp)
# Copyright 2010-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Add "To Send" and "To Validate" states in Invoices',
'version': '8.0.1.0.1',
'category': 'Generic Modules/Invoicing',
'description':
'''
This module adds 2 states between draft and open state in invoices:
- To Validate: For invoices which need a validation
- To Send: For all invoices that need to be sent
''',
'author': "Camptocamp,Odoo Community Association (OCA)",
'website': 'http://camptocamp.com',
'license': 'AGPL-3',
'depends': ['account'],
'data': [
'invoice_wkf.xml',
'invoice_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
'application': False
}
|
scigghia/account-invoicing
|
account_invoice_validation_workflow/__openerp__.py
|
Python
|
agpl-3.0
| 1,645
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
class URLMappings(object):
def __init__(self, src_root, build_dir):
self.mappings = {
'dart:mojo.internal': os.path.join(src_root, 'mojo/public/dart/sdk_ext/internal.dart'),
'dart:sky': os.path.join(build_dir, 'gen/sky/bindings/dart_sky.dart'),
'dart:sky.internals': os.path.join(src_root, 'sky/engine/bindings/sky_internals.dart'),
'dart:sky_builtin_natives': os.path.join(src_root, 'sky/engine/bindings/builtin_natives.dart'),
}
self.packages_root = os.path.join(build_dir, 'gen/dart-pkg/packages')
@property
def as_args(self):
return map(lambda item: '--url-mapping=%s,%s' % item, self.mappings.items())
|
xunmengfeng/engine
|
sky/tools/skypy/url_mappings.py
|
Python
|
bsd-3-clause
| 878
|
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links.
See <https://pythonhosted.org/Markdown/extensions/wikilinks.html>
for documentation.
Original code Copyright [Waylan Limberg](http://achinghead.com/).
All changes Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import Pattern
from ..util import etree
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(Extension):
def __init__ (self, *args, **kwargs):
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
super(WikiLinkExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(Pattern):
def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(*args, **kwargs) :
return WikiLinkExtension(*args, **kwargs)
|
andela-bojengwa/talk
|
venv/lib/python2.7/site-packages/markdown/extensions/wikilinks.py
|
Python
|
mit
| 2,901
|
#!/usr/bin/python
#
# Scaleway SSH keys management module
#
# Copyright (C) 2018 Online SAS.
# https://www.scaleway.com
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: scaleway_sshkey
short_description: Scaleway SSH keys management module
version_added: "2.6"
author: Remy Leone (@sieben)
description:
- This module manages SSH keys on Scaleway account
U(https://developer.scaleway.com)
options:
state:
description:
- Indicate desired state of the SSH key.
required: true
choices:
- present
- absent
ssh_pub_key:
description:
- The public SSH key as a string to add.
required: true
oauth_token:
description:
- Scaleway OAuth token.
required: true
timeout:
description:
- Timeout for API calls
default: 30
base_url:
description:
- Base URL for account API
default: "https://account.scaleway.com"
'''
EXAMPLES = '''
- name: "Add SSH key"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "Present"
- name: "Delete SSH key"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "absent"
- name: "Add SSH key with explicit token"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "Present"
oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
'''
RETURN = '''
data:
description: This is only present when C(state=present)
returned: when C(state=present)
type: dict
sample: {
"ssh_public_keys": [
{"key": "ssh-rsa AAAA...."}
]
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.scaleway import ScalewayAPI
def extract_present_sshkeys(raw_organization_dict):
ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
return ssh_key_lookup
def extract_user_id(raw_organization_dict):
return raw_organization_dict["organizations"][0]["users"][0]["id"]
def sshkey_user_patch(ssh_lookup):
ssh_list = {"ssh_public_keys": [{"key": key}
for key in ssh_lookup]}
return ssh_list
def core(module):
api_token = module.params['oauth_token']
ssh_pub_key = module.params['ssh_pub_key']
state = module.params["state"]
account_api = ScalewayAPI(module,
headers={'X-Auth-Token': api_token},
base_url=module.params["base_url"])
response = account_api.get('organizations')
status_code = response.status_code
organization_json = response.json
if not response.ok:
module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
status_code, response.json['message']))
user_id = extract_user_id(organization_json)
present_sshkeys = []
try:
present_sshkeys = extract_present_sshkeys(organization_json)
except (KeyError, IndexError) as e:
module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
if state in ('present',):
if ssh_pub_key in present_sshkeys:
module.exit_json(changed=False)
# If key not found create it!
if module.check_mode:
module.exit_json(changed=True)
present_sshkeys.append(ssh_pub_key)
payload = sshkey_user_patch(present_sshkeys)
response = account_api.patch('/users/%s' % user_id, data=payload)
if response.ok:
module.exit_json(changed=True, data=response.json)
module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
response.status_code, response.json))
elif state in ('absent',):
if ssh_pub_key not in present_sshkeys:
module.exit_json(changed=False)
if module.check_mode:
module.exit_json(changed=True)
present_sshkeys.remove(ssh_pub_key)
payload = sshkey_user_patch(present_sshkeys)
response = account_api.patch('/users/%s' % user_id, data=payload)
if response.ok:
module.exit_json(changed=True, data=response.json)
module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
response.status_code, response.json))
def main():
module = AnsibleModule(
argument_spec=dict(
base_url=dict(default='https://account.scaleway.com'),
oauth_token=dict(
no_log=True,
# Support environment variable for Scaleway OAuth Token
fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN']),
required=True,
),
state=dict(choices=['present', 'absent'], required=True),
ssh_pub_key=dict(required=True),
timeout=dict(type='int', default=30),
),
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/cloud/scaleway/scaleway_sshkey.py
|
Python
|
gpl-3.0
| 5,245
|
#!/usr/bin/env $PYTHON$
# Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
import os
import socket
import struct
import sys
import traceback
# See com.intellij.idea.SocketLock for the server side of this interface.
RUN_PATH = u'$RUN_PATH$'
CONFIG_PATH = u'$CONFIG_PATH$'
SYSTEM_PATH = u'$SYSTEM_PATH$'
def print_usage(cmd):
print(('Usage:\n' +
' {0} -h | -? | --help\n' +
' {0} [project_dir] [-w|--wait]\n' +
' {0} [-l|--line line] [project_dir|--temp-project] [-w|--wait] file[:line]\n' +
' {0} diff <left> <right>\n' +
' {0} merge <local> <remote> [base] <merged>').format(cmd))
def write_to_sock(sock, data):
if sys.version_info[0] >= 3:
data = data.encode('utf-8')
sock.send(struct.pack('>h', len(data)) + data)
def read_from_sock(sock):
length = struct.unpack('>h', sock.recv(2))[0]
return sock.recv(length).decode('utf-8')
def read_sequence_from_sock(sock):
result = []
while True:
try:
data = read_from_sock(sock)
if data == '---':
break
result.append(data)
except (socket.error, IOError) as e:
print("I/O error({0}): {1} ({2})".format(e.errno, e.strerror, e))
traceback.print_exception(*sys.exc_info())
break
return result
def process_args(argv):
args = []
skip_next = False
for i, arg in enumerate(argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print_usage(argv[0])
exit(0)
elif i == 0 and (arg == 'diff' or arg == 'merge' or arg == '--temp-project'):
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif arg == '-w' or arg == '--wait':
args.append('--wait')
elif arg == '-p' or arg == '--project':
args.append(arg)
elif arg == '-e' or arg == '--edit':
args.append(arg)
elif skip_next:
args.append(arg)
skip_next = False
else:
path = arg
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
path = file_path
args.append(os.path.abspath(path))
return args
def try_activate_instance(args):
port_path = os.path.join(CONFIG_PATH, 'port')
token_path = os.path.join(SYSTEM_PATH, 'token')
if not (os.path.exists(port_path) and os.path.exists(token_path)):
return False
try:
with open(port_path) as pf:
port = int(pf.read())
with open(token_path) as tf:
token = tf.read()
except ValueError:
return False
s = socket.socket()
s.settimeout(1.0)
try:
s.connect(('127.0.0.1', port))
except (socket.error, IOError):
return False
paths = read_sequence_from_sock(s)
found = CONFIG_PATH in paths or os.path.realpath(CONFIG_PATH) in paths
if found:
write_to_sock(s, 'activate ' + token + '\0' + os.getcwd() + '\0' + '\0'.join(args))
s.settimeout(None)
response = read_sequence_from_sock(s)
if len(response) < 2 or response[0] != 'ok':
print('bad response: ' + str(response))
exit(1)
if len(response) > 2:
print(response[2])
exit(int(response[1]))
return False
def start_new_instance(args):
if sys.platform == 'darwin':
if len(args) > 0:
args.insert(0, '--args')
if '--wait' in args:
args.insert(0, '-W')
os.execv('/usr/bin/open', ['open', '-na', RUN_PATH] + args)
else:
bin_file = os.path.split(RUN_PATH)[1]
os.execv(RUN_PATH, [bin_file] + args)
ide_args = process_args(sys.argv)
if not try_activate_instance(ide_args):
start_new_instance(ide_args)
|
siosio/intellij-community
|
platform/platform-resources/src/launcher.py
|
Python
|
apache-2.0
| 4,104
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from typing import Any, Dict, Optional, TYPE_CHECKING
from pyspark import since, keyword_only
from pyspark.ml.param.shared import (
HasPredictionCol,
HasBlockSize,
HasMaxIter,
HasRegParam,
HasCheckpointInterval,
HasSeed,
)
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.common import inherit_doc
from pyspark.ml.param import Params, TypeConverters, Param
from pyspark.ml.util import JavaMLWritable, JavaMLReadable
from pyspark.sql import DataFrame
if TYPE_CHECKING:
from py4j.java_gateway import JavaObject
__all__ = ["ALS", "ALSModel"]
@inherit_doc
class _ALSModelParams(HasPredictionCol, HasBlockSize):
"""
Params for :py:class:`ALS` and :py:class:`ALSModel`.
.. versionadded:: 3.0.0
"""
userCol: Param[str] = Param(
Params._dummy(),
"userCol",
"column name for user ids. Ids must be within " + "the integer value range.",
typeConverter=TypeConverters.toString,
)
itemCol: Param[str] = Param(
Params._dummy(),
"itemCol",
"column name for item ids. Ids must be within " + "the integer value range.",
typeConverter=TypeConverters.toString,
)
coldStartStrategy: Param[str] = Param(
Params._dummy(),
"coldStartStrategy",
"strategy for dealing with "
+ "unknown or new users/items at prediction time. This may be useful "
+ "in cross-validation or production scenarios, for handling "
+ "user/item ids the model has not seen in the training data. "
+ "Supported values: 'nan', 'drop'.",
typeConverter=TypeConverters.toString,
)
def __init__(self, *args: Any):
super(_ALSModelParams, self).__init__(*args)
self._setDefault(blockSize=4096)
@since("1.4.0")
def getUserCol(self) -> str:
"""
Gets the value of userCol or its default value.
"""
return self.getOrDefault(self.userCol)
@since("1.4.0")
def getItemCol(self) -> str:
"""
Gets the value of itemCol or its default value.
"""
return self.getOrDefault(self.itemCol)
@since("2.2.0")
def getColdStartStrategy(self) -> str:
"""
Gets the value of coldStartStrategy or its default value.
"""
return self.getOrDefault(self.coldStartStrategy)
@inherit_doc
class _ALSParams(_ALSModelParams, HasMaxIter, HasRegParam, HasCheckpointInterval, HasSeed):
"""
Params for :py:class:`ALS`.
.. versionadded:: 3.0.0
"""
rank: Param[int] = Param(
Params._dummy(), "rank", "rank of the factorization", typeConverter=TypeConverters.toInt
)
numUserBlocks: Param[int] = Param(
Params._dummy(),
"numUserBlocks",
"number of user blocks",
typeConverter=TypeConverters.toInt,
)
numItemBlocks: Param[int] = Param(
Params._dummy(),
"numItemBlocks",
"number of item blocks",
typeConverter=TypeConverters.toInt,
)
implicitPrefs: Param[bool] = Param(
Params._dummy(),
"implicitPrefs",
"whether to use implicit preference",
typeConverter=TypeConverters.toBoolean,
)
alpha: Param[float] = Param(
Params._dummy(),
"alpha",
"alpha for implicit preference",
typeConverter=TypeConverters.toFloat,
)
ratingCol: Param[str] = Param(
Params._dummy(),
"ratingCol",
"column name for ratings",
typeConverter=TypeConverters.toString,
)
nonnegative: Param[bool] = Param(
Params._dummy(),
"nonnegative",
"whether to use nonnegative constraint for least squares",
typeConverter=TypeConverters.toBoolean,
)
intermediateStorageLevel: Param[str] = Param(
Params._dummy(),
"intermediateStorageLevel",
"StorageLevel for intermediate datasets. Cannot be 'NONE'.",
typeConverter=TypeConverters.toString,
)
finalStorageLevel: Param[str] = Param(
Params._dummy(),
"finalStorageLevel",
"StorageLevel for ALS model factors.",
typeConverter=TypeConverters.toString,
)
def __init__(self, *args: Any):
super(_ALSParams, self).__init__(*args)
self._setDefault(
rank=10,
maxIter=10,
regParam=0.1,
numUserBlocks=10,
numItemBlocks=10,
implicitPrefs=False,
alpha=1.0,
userCol="user",
itemCol="item",
ratingCol="rating",
nonnegative=False,
checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK",
coldStartStrategy="nan",
)
@since("1.4.0")
def getRank(self) -> int:
"""
Gets the value of rank or its default value.
"""
return self.getOrDefault(self.rank)
@since("1.4.0")
def getNumUserBlocks(self) -> int:
"""
Gets the value of numUserBlocks or its default value.
"""
return self.getOrDefault(self.numUserBlocks)
@since("1.4.0")
def getNumItemBlocks(self) -> int:
"""
Gets the value of numItemBlocks or its default value.
"""
return self.getOrDefault(self.numItemBlocks)
@since("1.4.0")
def getImplicitPrefs(self) -> bool:
"""
Gets the value of implicitPrefs or its default value.
"""
return self.getOrDefault(self.implicitPrefs)
@since("1.4.0")
def getAlpha(self) -> float:
"""
Gets the value of alpha or its default value.
"""
return self.getOrDefault(self.alpha)
@since("1.4.0")
def getRatingCol(self) -> str:
"""
Gets the value of ratingCol or its default value.
"""
return self.getOrDefault(self.ratingCol)
@since("1.4.0")
def getNonnegative(self) -> bool:
"""
Gets the value of nonnegative or its default value.
"""
return self.getOrDefault(self.nonnegative)
@since("2.0.0")
def getIntermediateStorageLevel(self) -> str:
"""
Gets the value of intermediateStorageLevel or its default value.
"""
return self.getOrDefault(self.intermediateStorageLevel)
@since("2.0.0")
def getFinalStorageLevel(self) -> str:
"""
Gets the value of finalStorageLevel or its default value.
"""
return self.getOrDefault(self.finalStorageLevel)
@inherit_doc
class ALS(JavaEstimator["ALSModel"], _ALSParams, JavaMLWritable, JavaMLReadable["ALS"]):
"""
Alternating Least Squares (ALS) matrix factorization.
ALS attempts to estimate the ratings matrix `R` as the product of
two lower-rank matrices, `X` and `Y`, i.e. `X * Yt = R`. Typically
these approximations are called 'factor' matrices. The general
approach is iterative. During each iteration, one of the factor
matrices is held constant, while the other is solved for using least
squares. The newly-solved factor matrix is then held constant while
solving for the other factor matrix.
This is a blocked implementation of the ALS factorization algorithm
that groups the two sets of factors (referred to as "users" and
"products") into blocks and reduces communication by only sending
one copy of each user vector to each product block on each
iteration, and only for the product blocks that need that user's
feature vector. This is achieved by pre-computing some information
about the ratings matrix to determine the "out-links" of each user
(which blocks of products it will contribute to) and "in-link"
information for each product (which of the feature vectors it
receives from each user block it will depend on). This allows us to
send only an array of feature vectors between each user block and
product block, and have the product block find the users' ratings
and update the products based on these messages.
For implicit preference data, the algorithm used is based on
`"Collaborative Filtering for Implicit Feedback Datasets",
<https://doi.org/10.1109/ICDM.2008.22>`_, adapted for the blocked
approach used here.
Essentially instead of finding the low-rank approximations to the
rating matrix `R`, this finds the approximations for a preference
matrix `P` where the elements of `P` are 1 if r > 0 and 0 if r <= 0.
The ratings then act as 'confidence' values related to strength of
indicated user preferences rather than explicit ratings given to
items.
.. versionadded:: 1.4.0
Notes
-----
The input rating dataframe to the ALS implementation should be deterministic.
Nondeterministic data can cause failure during fitting ALS model.
For example, an order-sensitive operation like sampling after a repartition makes
dataframe output nondeterministic, like `df.repartition(2).sample(False, 0.5, 1618)`.
Checkpointing sampled dataframe or adding a sort before sampling can help make the
dataframe deterministic.
Examples
--------
>>> df = spark.createDataFrame(
... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
... ["user", "item", "rating"])
>>> als = ALS(rank=10, seed=0)
>>> als.setMaxIter(5)
ALS...
>>> als.getMaxIter()
5
>>> als.setRegParam(0.1)
ALS...
>>> als.getRegParam()
0.1
>>> als.clear(als.regParam)
>>> model = als.fit(df)
>>> model.getBlockSize()
4096
>>> model.getUserCol()
'user'
>>> model.setUserCol("user")
ALSModel...
>>> model.getItemCol()
'item'
>>> model.setPredictionCol("newPrediction")
ALS...
>>> model.rank
10
>>> model.userFactors.orderBy("id").collect()
[Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)]
>>> test = spark.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"])
>>> predictions = sorted(model.transform(test).collect(), key=lambda r: r[0])
>>> predictions[0]
Row(user=0, item=2, newPrediction=0.6929...)
>>> predictions[1]
Row(user=1, item=0, newPrediction=3.47356...)
>>> predictions[2]
Row(user=2, item=0, newPrediction=-0.899198...)
>>> user_recs = model.recommendForAllUsers(3)
>>> user_recs.where(user_recs.user == 0)\
.select("recommendations.item", "recommendations.rating").collect()
[Row(item=[0, 1, 2], rating=[3.910..., 1.997..., 0.692...])]
>>> item_recs = model.recommendForAllItems(3)
>>> item_recs.where(item_recs.item == 2)\
.select("recommendations.user", "recommendations.rating").collect()
[Row(user=[2, 1, 0], rating=[4.892..., 3.991..., 0.692...])]
>>> user_subset = df.where(df.user == 2)
>>> user_subset_recs = model.recommendForUserSubset(user_subset, 3)
>>> user_subset_recs.select("recommendations.item", "recommendations.rating").first()
Row(item=[2, 1, 0], rating=[4.892..., 1.076..., -0.899...])
>>> item_subset = df.where(df.item == 0)
>>> item_subset_recs = model.recommendForItemSubset(item_subset, 3)
>>> item_subset_recs.select("recommendations.user", "recommendations.rating").first()
Row(user=[0, 1, 2], rating=[3.910..., 3.473..., -0.899...])
>>> als_path = temp_path + "/als"
>>> als.save(als_path)
>>> als2 = ALS.load(als_path)
>>> als.getMaxIter()
5
>>> model_path = temp_path + "/als_model"
>>> model.save(model_path)
>>> model2 = ALSModel.load(model_path)
>>> model.rank == model2.rank
True
>>> sorted(model.userFactors.collect()) == sorted(model2.userFactors.collect())
True
>>> sorted(model.itemFactors.collect()) == sorted(model2.itemFactors.collect())
True
>>> model.transform(test).take(1) == model2.transform(test).take(1)
True
"""
_input_kwargs: Dict[str, Any]
@keyword_only
def __init__(
self,
*,
rank: int = 10,
maxIter: int = 10,
regParam: float = 0.1,
numUserBlocks: int = 10,
numItemBlocks: int = 10,
implicitPrefs: bool = False,
alpha: float = 1.0,
userCol: str = "user",
itemCol: str = "item",
seed: Optional[int] = None,
ratingCol: str = "rating",
nonnegative: bool = False,
checkpointInterval: int = 10,
intermediateStorageLevel: str = "MEMORY_AND_DISK",
finalStorageLevel: str = "MEMORY_AND_DISK",
coldStartStrategy: str = "nan",
blockSize: int = 4096,
):
"""
__init__(self, \\*, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10,
numItemBlocks=10, implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", \
seed=None, ratingCol="rating", nonnegative=False, checkpointInterval=10, \
intermediateStorageLevel="MEMORY_AND_DISK", \
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan", blockSize=4096)
"""
super(ALS, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.recommendation.ALS", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(
self,
*,
rank: int = 10,
maxIter: int = 10,
regParam: float = 0.1,
numUserBlocks: int = 10,
numItemBlocks: int = 10,
implicitPrefs: bool = False,
alpha: float = 1.0,
userCol: str = "user",
itemCol: str = "item",
seed: Optional[int] = None,
ratingCol: str = "rating",
nonnegative: bool = False,
checkpointInterval: int = 10,
intermediateStorageLevel: str = "MEMORY_AND_DISK",
finalStorageLevel: str = "MEMORY_AND_DISK",
coldStartStrategy: str = "nan",
blockSize: int = 4096,
) -> "ALS":
"""
setParams(self, \\*, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, \
numItemBlocks=10, implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", \
seed=None, ratingCol="rating", nonnegative=False, checkpointInterval=10, \
intermediateStorageLevel="MEMORY_AND_DISK", \
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan", blockSize=4096)
Sets params for ALS.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model: "JavaObject") -> "ALSModel":
return ALSModel(java_model)
@since("1.4.0")
def setRank(self, value: int) -> "ALS":
"""
Sets the value of :py:attr:`rank`.
"""
return self._set(rank=value)
@since("1.4.0")
def setNumUserBlocks(self, value: int) -> "ALS":
"""
Sets the value of :py:attr:`numUserBlocks`.
"""
return self._set(numUserBlocks=value)
@since("1.4.0")
def setNumItemBlocks(self, value: int) -> "ALS":
"""
Sets the value of :py:attr:`numItemBlocks`.
"""
return self._set(numItemBlocks=value)
@since("1.4.0")
def setNumBlocks(self, value: int) -> "ALS":
"""
Sets both :py:attr:`numUserBlocks` and :py:attr:`numItemBlocks` to the specific value.
"""
self._set(numUserBlocks=value)
return self._set(numItemBlocks=value)
@since("1.4.0")
def setImplicitPrefs(self, value: bool) -> "ALS":
"""
Sets the value of :py:attr:`implicitPrefs`.
"""
return self._set(implicitPrefs=value)
@since("1.4.0")
def setAlpha(self, value: float) -> "ALS":
"""
Sets the value of :py:attr:`alpha`.
"""
return self._set(alpha=value)
@since("1.4.0")
def setUserCol(self, value: str) -> "ALS":
"""
Sets the value of :py:attr:`userCol`.
"""
return self._set(userCol=value)
@since("1.4.0")
def setItemCol(self, value: str) -> "ALS":
"""
Sets the value of :py:attr:`itemCol`.
"""
return self._set(itemCol=value)
@since("1.4.0")
def setRatingCol(self, value: str) -> "ALS":
"""
Sets the value of :py:attr:`ratingCol`.
"""
return self._set(ratingCol=value)
@since("1.4.0")
def setNonnegative(self, value: bool) -> "ALS":
"""
Sets the value of :py:attr:`nonnegative`.
"""
return self._set(nonnegative=value)
@since("2.0.0")
def setIntermediateStorageLevel(self, value: str) -> "ALS":
"""
Sets the value of :py:attr:`intermediateStorageLevel`.
"""
return self._set(intermediateStorageLevel=value)
@since("2.0.0")
def setFinalStorageLevel(self, value: str) -> "ALS":
"""
Sets the value of :py:attr:`finalStorageLevel`.
"""
return self._set(finalStorageLevel=value)
@since("2.2.0")
def setColdStartStrategy(self, value: str) -> "ALS":
"""
Sets the value of :py:attr:`coldStartStrategy`.
"""
return self._set(coldStartStrategy=value)
def setMaxIter(self, value: int) -> "ALS":
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value: float) -> "ALS":
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setPredictionCol(self, value: str) -> "ALS":
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setCheckpointInterval(self, value: int) -> "ALS":
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value: int) -> "ALS":
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setBlockSize(self, value: int) -> "ALS":
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
class ALSModel(JavaModel, _ALSModelParams, JavaMLWritable, JavaMLReadable["ALSModel"]):
"""
Model fitted by ALS.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setUserCol(self, value: str) -> "ALSModel":
"""
Sets the value of :py:attr:`userCol`.
"""
return self._set(userCol=value)
@since("3.0.0")
def setItemCol(self, value: str) -> "ALSModel":
"""
Sets the value of :py:attr:`itemCol`.
"""
return self._set(itemCol=value)
@since("3.0.0")
def setColdStartStrategy(self, value: str) -> "ALSModel":
"""
Sets the value of :py:attr:`coldStartStrategy`.
"""
return self._set(coldStartStrategy=value)
@since("3.0.0")
def setPredictionCol(self, value: str) -> "ALSModel":
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setBlockSize(self, value: int) -> "ALSModel":
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@property # type: ignore[misc]
@since("1.4.0")
def rank(self) -> int:
"""rank of the matrix factorization model"""
return self._call_java("rank")
@property # type: ignore[misc]
@since("1.4.0")
def userFactors(self) -> DataFrame:
"""
a DataFrame that stores user factors in two columns: `id` and
`features`
"""
return self._call_java("userFactors")
@property # type: ignore[misc]
@since("1.4.0")
def itemFactors(self) -> DataFrame:
"""
a DataFrame that stores item factors in two columns: `id` and
`features`
"""
return self._call_java("itemFactors")
def recommendForAllUsers(self, numItems: int) -> DataFrame:
"""
Returns top `numItems` items recommended for each user, for all users.
.. versionadded:: 2.2.0
Parameters
----------
numItems : int
max number of recommendations for each user
Returns
-------
:py:class:`pyspark.sql.DataFrame`
a DataFrame of (userCol, recommendations), where recommendations are
stored as an array of (itemCol, rating) Rows.
"""
return self._call_java("recommendForAllUsers", numItems)
def recommendForAllItems(self, numUsers: int) -> DataFrame:
"""
Returns top `numUsers` users recommended for each item, for all items.
.. versionadded:: 2.2.0
Parameters
----------
numUsers : int
max number of recommendations for each item
Returns
-------
:py:class:`pyspark.sql.DataFrame`
a DataFrame of (itemCol, recommendations), where recommendations are
stored as an array of (userCol, rating) Rows.
"""
return self._call_java("recommendForAllItems", numUsers)
def recommendForUserSubset(self, dataset: DataFrame, numItems: int) -> DataFrame:
"""
Returns top `numItems` items recommended for each user id in the input data set. Note that
if there are duplicate ids in the input dataset, only one set of recommendations per unique
id will be returned.
.. versionadded:: 2.3.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
a DataFrame containing a column of user ids. The column name must match `userCol`.
numItems : int
max number of recommendations for each user
Returns
-------
:py:class:`pyspark.sql.DataFrame`
a DataFrame of (userCol, recommendations), where recommendations are
stored as an array of (itemCol, rating) Rows.
"""
return self._call_java("recommendForUserSubset", dataset, numItems)
def recommendForItemSubset(self, dataset: DataFrame, numUsers: int) -> DataFrame:
"""
Returns top `numUsers` users recommended for each item id in the input data set. Note that
if there are duplicate ids in the input dataset, only one set of recommendations per unique
id will be returned.
.. versionadded:: 2.3.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
a DataFrame containing a column of item ids. The column name must match `itemCol`.
numUsers : int
max number of recommendations for each item
Returns
-------
:py:class:`pyspark.sql.DataFrame`
a DataFrame of (itemCol, recommendations), where recommendations are
stored as an array of (userCol, rating) Rows.
"""
return self._call_java("recommendForItemSubset", dataset, numUsers)
if __name__ == "__main__":
import doctest
import pyspark.ml.recommendation
from pyspark.sql import SparkSession
globs = pyspark.ml.recommendation.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder.master("local[2]").appName("ml.recommendation tests").getOrCreate()
sc = spark.sparkContext
globs["sc"] = sc
globs["spark"] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs["temp_path"] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
mahak/spark
|
python/pyspark/ml/recommendation.py
|
Python
|
apache-2.0
| 24,774
|
s = rf"f<caret>oo{'bar'}"
|
siosio/intellij-community
|
python/testData/intentions/convertingRawFStringQuotes_after.py
|
Python
|
apache-2.0
| 26
|
"""Proides the constants needed for component."""
ATTR_APP_ID = "app_id"
ATTR_APP_NAME = "app_name"
ATTR_INPUT_SOURCE = "source"
ATTR_INPUT_SOURCE_LIST = "source_list"
ATTR_MEDIA_ALBUM_ARTIST = "media_album_artist"
ATTR_MEDIA_ALBUM_NAME = "media_album_name"
ATTR_MEDIA_ARTIST = "media_artist"
ATTR_MEDIA_CHANNEL = "media_channel"
ATTR_MEDIA_CONTENT_ID = "media_content_id"
ATTR_MEDIA_CONTENT_TYPE = "media_content_type"
ATTR_MEDIA_DURATION = "media_duration"
ATTR_MEDIA_ENQUEUE = "enqueue"
ATTR_MEDIA_EPISODE = "media_episode"
ATTR_MEDIA_PLAYLIST = "media_playlist"
ATTR_MEDIA_POSITION = "media_position"
ATTR_MEDIA_POSITION_UPDATED_AT = "media_position_updated_at"
ATTR_MEDIA_SEASON = "media_season"
ATTR_MEDIA_SEEK_POSITION = "seek_position"
ATTR_MEDIA_SERIES_TITLE = "media_series_title"
ATTR_MEDIA_SHUFFLE = "shuffle"
ATTR_MEDIA_TITLE = "media_title"
ATTR_MEDIA_TRACK = "media_track"
ATTR_MEDIA_VOLUME_LEVEL = "volume_level"
ATTR_MEDIA_VOLUME_MUTED = "is_volume_muted"
ATTR_SOUND_MODE = "sound_mode"
ATTR_SOUND_MODE_LIST = "sound_mode_list"
DOMAIN = "media_player"
MEDIA_TYPE_MUSIC = "music"
MEDIA_TYPE_TVSHOW = "tvshow"
MEDIA_TYPE_MOVIE = "movie"
MEDIA_TYPE_VIDEO = "video"
MEDIA_TYPE_EPISODE = "episode"
MEDIA_TYPE_CHANNEL = "channel"
MEDIA_TYPE_PLAYLIST = "playlist"
MEDIA_TYPE_IMAGE = "image"
MEDIA_TYPE_URL = "url"
MEDIA_TYPE_GAME = "game"
MEDIA_TYPE_APP = "app"
SERVICE_CLEAR_PLAYLIST = "clear_playlist"
SERVICE_PLAY_MEDIA = "play_media"
SERVICE_SELECT_SOUND_MODE = "select_sound_mode"
SERVICE_SELECT_SOURCE = "select_source"
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
SUPPORT_PLAY = 16384
SUPPORT_SHUFFLE_SET = 32768
SUPPORT_SELECT_SOUND_MODE = 65536
|
fbradyirl/home-assistant
|
homeassistant/components/media_player/const.py
|
Python
|
apache-2.0
| 1,935
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import IECore
class IgnoredExceptionsTest( unittest.TestCase ) :
def test( self ) :
def f( toRaise, toIgnore ) :
with IECore.IgnoredExceptions( toIgnore ) :
raise toRaise
self.assertRaises( RuntimeError, f, RuntimeError, KeyError )
self.assertRaises( RuntimeError, f, RuntimeError, ( KeyError, IndexError ) )
f( KeyError, KeyError )
f( KeyError, ( KeyError, IndexError ) )
f( IndexError, ( KeyError, IndexError ) )
c = IECore.CompoundObject()
with IECore.IgnoredExceptions( KeyError ) :
c["d"]
with IECore.IgnoredExceptions( Exception ) :
c["d"]
p = IECore.Parameterised( "" )
with IECore.IgnoredExceptions( Exception ) :
p["d"]
def testNoExceptions( self ) :
with IECore.IgnoredExceptions( Exception ) :
pass
if __name__ == "__main__":
unittest.main()
|
lento/cortex
|
test/IECore/IgnoredExceptionsTest.py
|
Python
|
bsd-3-clause
| 2,674
|
"""
Empty
"""
|
fallisd/validate
|
unittests/__init__.py
|
Python
|
gpl-2.0
| 14
|
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_servicebusqueue
version_added: "2.8"
short_description: Manage Azure Service Bus queue.
description:
- Create, update or delete an Azure Service Bus queue.
options:
resource_group:
description:
- name of resource group.
required: true
name:
description:
- name of the queue.
required: true
namespace:
description:
- Servicebus namespace name.
- A namespace is a scoping container for all messaging components.
- Multiple queues and topics can reside within a single namespace, and namespaces often serve as application containers.
required: true
state:
description:
- Assert the state of the queue. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
auto_delete_on_idle_in_seconds:
description:
- Time idle interval after which a queue is automatically deleted.
- The minimum duration is 5 minutes.
type: int
dead_lettering_on_message_expiration:
description:
- A value that indicates whether a queue has dead letter support when a message expires.
type: bool
default_message_time_to_live_seconds:
description:
- Default message timespan to live value.
- This is the duration after which the message expires, starting from when the message is sent to Service Bus.
- This is the default value used when TimeToLive is not set on a message itself.
type: int
enable_batched_operations:
description:
- Value that indicates whether server-side batched operations are enabled.
type: bool
enable_express:
description:
- Value that indicates whether Express Entities are enabled.
- An express topic or queue holds a message in memory temporarily before writing it to persistent storage.
type: bool
enable_partitioning:
description:
- A value that indicates whether the topic or queue is to be partitioned across multiple message brokers.
type: bool
forward_dead_lettered_messages_to:
description:
- Queue or topic name to forward the Dead Letter message for a queue.
forward_to:
description:
- Queue or topic name to forward the messages for a queue.
lock_duration_in_seconds:
description:
- Timespan duration of a peek-lock.
- The amount of time that the message is locked for other receivers.
- The maximum value for LockDuration is 5 minutes.
type: int
max_delivery_count:
description:
- he maximum delivery count.
- A message is automatically deadlettered after this number of deliveries.
type: int
max_size_in_mb:
description:
- The maximum size of the queue in megabytes, which is the size of memory allocated for the queue.
type: int
requires_duplicate_detection:
description:
- A value indicating if this queue or topic requires duplicate detection.
type: bool
duplicate_detection_time_in_seconds:
description:
- TimeSpan structure that defines the duration of the duplicate detection history.
type: int
requires_session:
description:
- A value that indicates whether the queue supports the concept of sessions.
type: bool
status:
description:
- Status of the entity.
choices:
- active
- disabled
- send_disabled
- receive_disabled
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Create a queue
azure_rm_servicebusqueue:
name: subqueue
resource_group: myResourceGroup
namespace: bar
duplicate_detection_time_in_seconds: 600
'''
RETURN = '''
id:
description: Current state of the queue.
returned: success
type: str
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
from ansible.module_utils._text import to_native
from datetime import datetime, timedelta
duration_spec_map = dict(
default_message_time_to_live='default_message_time_to_live_seconds',
duplicate_detection_history_time_window='duplicate_detection_time_in_seconds',
auto_delete_on_idle='auto_delete_on_idle_in_seconds',
lock_duration='lock_duration_in_seconds'
)
sas_policy_spec = dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
name=dict(type='str', required=True),
regenerate_key=dict(type='bool'),
rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send'])
)
class AzureRMServiceBusQueue(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
namespace=dict(type='str', required=True),
auto_delete_on_idle_in_seconds=dict(type='int'),
dead_lettering_on_message_expiration=dict(type='bool'),
default_message_time_to_live_seconds=dict(type='int'),
duplicate_detection_time_in_seconds=dict(type='int'),
enable_batched_operations=dict(type='bool'),
enable_express=dict(type='bool'),
enable_partitioning=dict(type='bool'),
forward_dead_lettered_messages_to=dict(type='str'),
forward_to=dict(type='str'),
lock_duration_in_seconds=dict(type='int'),
max_delivery_count=dict(type='int'),
max_size_in_mb=dict(type='int'),
requires_duplicate_detection=dict(type='bool'),
requires_session=dict(type='bool'),
status=dict(type='str',
choices=['active', 'disabled', 'send_disabled', 'receive_disabled'])
)
self.resource_group = None
self.name = None
self.state = None
self.namespace = None
self.location = None
self.type = None
self.subscription_topic_name = None
self.auto_delete_on_idle_in_seconds = None
self.dead_lettering_on_message_expiration = None
self.default_message_time_to_live_seconds = None
self.enable_batched_operations = None
self.enable_express = None
self.enable_partitioning = None
self.forward_dead_lettered_messages_to = None
self.forward_to = None
self.lock_duration_in_seconds = None
self.max_delivery_count = None
self.max_size_in_mb = None
self.requires_duplicate_detection = None
self.status = None
self.results = dict(
changed=False,
id=None
)
super(AzureRMServiceBusQueue, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
changed = False
original = self.get()
if self.state == 'present':
# Create the resource instance
params = dict(
dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration,
enable_batched_operations=self.enable_batched_operations,
enable_express=self.enable_express,
enable_partitioning=self.enable_partitioning,
forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,
forward_to=self.forward_to,
max_delivery_count=self.max_delivery_count,
max_size_in_megabytes=self.max_size_in_mb
)
if self.status:
params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status)))
for k, v in duration_spec_map.items():
seconds = getattr(self, v)
if seconds:
params[k] = timedelta(seconds=seconds)
instance = self.servicebus_models.SBQueue(**params)
result = original
if not original:
changed = True
result = instance
else:
result = original
attribute_map = set(self.servicebus_models.SBQueue._attribute_map.keys()) - set(self.servicebus_models.SBQueue._validation.keys())
for attribute in attribute_map:
value = getattr(instance, attribute)
if value and value != getattr(original, attribute):
changed = True
if changed and not self.check_mode:
result = self.create_or_update(instance)
self.results = self.to_dict(result)
elif original:
changed = True
if not self.check_mode:
self.delete()
self.results['deleted'] = True
self.results['changed'] = changed
return self.results
def create_or_update(self, param):
try:
client = self._get_client()
return client.create_or_update(self.resource_group, self.namespace, self.name, param)
except Exception as exc:
self.fail('Error creating or updating queue {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc)))
def delete(self):
try:
client = self._get_client()
client.delete(self.resource_group, self.namespace, self.name)
return True
except Exception as exc:
self.fail("Error deleting queue {0} - {1}".format(self.name, str(exc)))
def _get_client(self):
return self.servicebus_client.queues
def get(self):
try:
client = self._get_client()
return client.get(self.resource_group, self.namespace, self.name)
except Exception:
return None
def to_dict(self, instance):
result = dict()
attribute_map = self.servicebus_models.SBQueue._attribute_map
for attribute in attribute_map.keys():
value = getattr(instance, attribute)
if not value:
continue
if attribute_map[attribute]['type'] == 'duration':
if is_valid_timedelta(value):
key = duration_spec_map.get(attribute) or attribute
result[key] = int(value.total_seconds())
elif attribute == 'status':
result['status'] = _camel_to_snake(value)
elif isinstance(value, self.servicebus_models.MessageCountDetails):
result[attribute] = value.as_dict()
elif isinstance(value, self.servicebus_models.SBSku):
result[attribute] = value.name.lower()
elif isinstance(value, datetime):
result[attribute] = str(value)
elif isinstance(value, str):
result[attribute] = to_native(value)
elif attribute == 'max_size_in_megabytes':
result['max_size_in_mb'] = value
else:
result[attribute] = value
return result
def is_valid_timedelta(value):
if value == timedelta(10675199, 10085, 477581):
return None
return value
def main():
AzureRMServiceBusQueue()
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py
|
Python
|
gpl-3.0
| 12,400
|
#!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import imp
import os
import threading
from ycmd.utils import ForceSemanticCompletion
from ycmd.completers.general.general_completer_store import (
GeneralCompleterStore )
from ycmd.completers.completer_utils import PathToFiletypeCompleterPluginLoader
class ServerState( object ):
def __init__( self, user_options ):
self._user_options = user_options
self._filetype_completers = {}
self._filetype_completers_lock = threading.Lock()
self._gencomp = GeneralCompleterStore( self._user_options )
@property
def user_options( self ):
return self._user_options
def Shutdown( self ):
with self._filetype_completers_lock:
for completer in self._filetype_completers.itervalues():
if completer:
completer.Shutdown()
self._gencomp.Shutdown()
def _GetFiletypeCompleterForFiletype( self, filetype ):
with self._filetype_completers_lock:
try:
return self._filetype_completers[ filetype ]
except KeyError:
pass
module_path = PathToFiletypeCompleterPluginLoader( filetype )
completer = None
supported_filetypes = [ filetype ]
if os.path.exists( module_path ):
module = imp.load_source( filetype, module_path )
completer = module.GetCompleter( self._user_options )
if completer:
supported_filetypes.extend( completer.SupportedFiletypes() )
for supported_filetype in supported_filetypes:
self._filetype_completers[ supported_filetype ] = completer
return completer
def GetFiletypeCompleter( self, current_filetypes ):
completers = [ self._GetFiletypeCompleterForFiletype( filetype )
for filetype in current_filetypes ]
for completer in completers:
if completer:
return completer
raise ValueError( 'No semantic completer exists for filetypes: {0}'.format(
current_filetypes ) )
def FiletypeCompletionAvailable( self, filetypes ):
try:
self.GetFiletypeCompleter( filetypes )
return True
except:
return False
def FiletypeCompletionUsable( self, filetypes ):
return ( self.CurrentFiletypeCompletionEnabled( filetypes ) and
self.FiletypeCompletionAvailable( filetypes ) )
def ShouldUseGeneralCompleter( self, request_data ):
return self._gencomp.ShouldUseNow( request_data )
def ShouldUseFiletypeCompleter( self, request_data ):
"""
Determines whether or not the semantic completer should be called, and
returns an indication of the reason why. Specifically, returns a tuple:
( should_use_completer_now, was_semantic_completion_forced ), where:
- should_use_completer_now: if True, the semantic engine should be used
- was_semantic_completion_forced: if True, the user requested "forced"
semantic completion
was_semantic_completion_forced is always False if should_use_completer_now
is False
"""
filetypes = request_data[ 'filetypes' ]
if self.FiletypeCompletionUsable( filetypes ):
if ForceSemanticCompletion( request_data ):
# use semantic, and it was forced
return ( True, True )
else:
# was not forced. check the conditions for triggering
return ( self.GetFiletypeCompleter( filetypes ).ShouldUseNow(
request_data ), False )
# don't use semantic, ignore whether or not the user requested forced
# completion
return ( False, False )
def GetGeneralCompleter( self ):
return self._gencomp
def CurrentFiletypeCompletionEnabled( self, current_filetypes ):
filetype_to_disable = self._user_options[
'filetype_specific_completion_to_disable' ]
if '*' in filetype_to_disable:
return False
else:
return not all([ x in filetype_to_disable for x in current_filetypes ])
|
WillianPaiva/ycmd
|
ycmd/server_state.py
|
Python
|
gpl-3.0
| 4,592
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CLI Backend for the Analyzer Part of the Debugger.
The analyzer performs post hoc analysis of dumped intermediate tensors and
graph structure information from debugged Session.run() calls.
The other part of the debugger is the stepper (c.f. stepper_cli.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import evaluator
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import source_utils
RL = debugger_cli_common.RichLine
# String constants for the depth-dependent hanging indent at the beginning
# of each line.
HANG_UNFINISHED = "| " # Used for unfinished recursion depths.
HANG_FINISHED = " "
HANG_SUFFIX = "|- "
# String constant for displaying depth and op type.
DEPTH_TEMPLATE = "(%d) "
OP_TYPE_TEMPLATE = "[%s] "
# String constants for control inputs/outputs, etc.
CTRL_LABEL = "(Ctrl) "
ELLIPSIS = "..."
SORT_TENSORS_BY_TIMESTAMP = "timestamp"
SORT_TENSORS_BY_DUMP_SIZE = "dump_size"
SORT_TENSORS_BY_OP_TYPE = "op_type"
SORT_TENSORS_BY_TENSOR_NAME = "tensor_name"
def _add_main_menu(output,
node_name=None,
enable_list_tensors=True,
enable_node_info=True,
enable_print_tensor=True,
enable_list_inputs=True,
enable_list_outputs=True):
"""Generate main menu for the screen output from a command.
Args:
output: (debugger_cli_common.RichTextLines) the output object to modify.
node_name: (str or None) name of the node involved (if any). If None,
the menu items node_info, list_inputs and list_outputs will be
automatically disabled, overriding the values of arguments
enable_node_info, enable_list_inputs and enable_list_outputs.
enable_list_tensors: (bool) whether the list_tensor menu item will be
enabled.
enable_node_info: (bool) whether the node_info item will be enabled.
enable_print_tensor: (bool) whether the print_tensor item will be enabled.
enable_list_inputs: (bool) whether the item list_inputs will be enabled.
enable_list_outputs: (bool) whether the item list_outputs will be enabled.
"""
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem(
"list_tensors", "list_tensors", enabled=enable_list_tensors))
if node_name:
menu.append(
debugger_cli_common.MenuItem(
"node_info",
"node_info -a -d -t %s" % node_name,
enabled=enable_node_info))
menu.append(
debugger_cli_common.MenuItem(
"print_tensor",
"print_tensor %s" % node_name,
enabled=enable_print_tensor))
menu.append(
debugger_cli_common.MenuItem(
"list_inputs",
"list_inputs -c -r %s" % node_name,
enabled=enable_list_inputs))
menu.append(
debugger_cli_common.MenuItem(
"list_outputs",
"list_outputs -c -r %s" % node_name,
enabled=enable_list_outputs))
else:
menu.append(
debugger_cli_common.MenuItem(
"node_info", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("print_tensor", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_inputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_outputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("run_info", "run_info"))
menu.append(
debugger_cli_common.MenuItem("help", "help"))
output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
class DebugAnalyzer(object):
"""Analyzer for debug data from dump directories."""
_TIMESTAMP_COLUMN_HEAD = "t (ms)"
_DUMP_SIZE_COLUMN_HEAD = "Size (B)"
_OP_TYPE_COLUMN_HEAD = "Op type"
_TENSOR_NAME_COLUMN_HEAD = "Tensor name"
# Op types to be omitted when generating descriptions of graph structure.
_GRAPH_STRUCT_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
def __init__(self, debug_dump, config):
"""DebugAnalyzer constructor.
Args:
debug_dump: A DebugDumpDir object.
config: A `cli_config.CLIConfig` object that carries user-facing
configurations.
"""
self._debug_dump = debug_dump
self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)
# Initialize tensor filters state.
self._tensor_filters = {}
self._build_argument_parsers(config)
config.set_callback("graph_recursion_depth",
self._build_argument_parsers)
# TODO(cais): Implement list_nodes.
def _build_argument_parsers(self, config):
"""Build argument parsers for DebugAnalayzer.
Args:
config: A `cli_config.CLIConfig` object.
Returns:
A dict mapping command handler name to `ArgumentParser` instance.
"""
# Argument parsers for command handlers.
self._arg_parsers = {}
# Parser for list_tensors.
ap = argparse.ArgumentParser(
description="List dumped intermediate tensors.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-f",
"--tensor_filter",
dest="tensor_filter",
type=str,
default="",
help="List only Tensors passing the filter of the specified name")
ap.add_argument(
"-n",
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="filter op type by regex.")
ap.add_argument(
"-s",
"--sort_by",
dest="sort_by",
type=str,
default=SORT_TENSORS_BY_TIMESTAMP,
help=("the field to sort the data by: (%s | %s | %s | %s)" %
(SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE,
SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME)))
ap.add_argument(
"-r",
"--reverse",
dest="reverse",
action="store_true",
help="sort the data in reverse (descending) order")
self._arg_parsers["list_tensors"] = ap
# Parser for node_info.
ap = argparse.ArgumentParser(
description="Show information about a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an associated tensor, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-a",
"--attributes",
dest="attributes",
action="store_true",
help="Also list attributes of the node.")
ap.add_argument(
"-d",
"--dumps",
dest="dumps",
action="store_true",
help="Also list dumps available from the node.")
ap.add_argument(
"-t",
"--traceback",
dest="traceback",
action="store_true",
help="Also include the traceback of the node's creation "
"(if available in Python).")
self._arg_parsers["node_info"] = ap
# Parser for list_inputs.
ap = argparse.ArgumentParser(
description="Show inputs to a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=config.get("graph_recursion_depth"),
help="Maximum depth of recursion used when showing the input tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show inputs to the node recursively, i.e., the input tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of input nodes.")
self._arg_parsers["list_inputs"] = ap
# Parser for list_outputs.
ap = argparse.ArgumentParser(
description="Show the nodes that receive the outputs of given node.",
usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=config.get("graph_recursion_depth"),
help="Maximum depth of recursion used when showing the output tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show recipients of the node recursively, i.e., the output "
"tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of recipient nodes.")
self._arg_parsers["list_outputs"] = ap
# Parser for print_tensor.
self._arg_parsers["print_tensor"] = (
command_parser.get_print_tensor_argparser(
"Print the value of a dumped tensor."))
# Parser for print_source.
ap = argparse.ArgumentParser(
description="Print a Python source file with overlaid debug "
"information, including the nodes (ops) or Tensors created at the "
"source lines.",
usage=argparse.SUPPRESS)
ap.add_argument(
"source_file_path",
type=str,
help="Path to the source file.")
ap.add_argument(
"-t",
"--tensors",
dest="tensors",
action="store_true",
help="Label lines with dumped Tensors, instead of ops.")
ap.add_argument(
"-m",
"--max_elements_per_line",
type=int,
default=10,
help="Maximum number of elements (ops or Tensors) to show per source "
"line.")
ap.add_argument(
"-b",
"--line_begin",
type=int,
default=1,
help="Print source beginning at line number (1-based.)")
self._arg_parsers["print_source"] = ap
# Parser for list_source.
ap = argparse.ArgumentParser(
description="List source files responsible for constructing nodes and "
"tensors present in the run().",
usage=argparse.SUPPRESS)
ap.add_argument(
"-p",
"--path_filter",
type=str,
default="",
help="Regular expression filter for file path.")
ap.add_argument(
"-n",
"--node_name_filter",
type=str,
default="",
help="Regular expression filter for node name.")
self._arg_parsers["list_source"] = ap
# Parser for eval.
ap = argparse.ArgumentParser(
description="""Evaluate an arbitrary expression. Can use tensor values
from the current debug dump. The debug tensor names should be enclosed
in pairs of backticks. Expressions with spaces should be enclosed in
a pair of double quotes or a pair of single quotes. By default, numpy
is imported as np and can be used in the expressions. E.g.,
1) eval np.argmax(`Softmax:0`),
2) eval 'np.sum(`Softmax:0`, axis=1)',
3) eval "np.matmul((`output/Identity:0`/`Softmax:0`).T, `Softmax:0`)".
""",
usage=argparse.SUPPRESS)
ap.add_argument(
"expression",
type=str,
help="""Expression to be evaluated.
1) in the simplest case, use <node_name>:<output_slot>, e.g.,
hidden_0/MatMul:0.
2) if the default debug op "DebugIdentity" is to be overridden, use
<node_name>:<output_slot>:<debug_op>, e.g.,
hidden_0/MatMul:0:DebugNumericSummary.
3) if the tensor of the same name exists on more than one device, use
<device_name>:<node_name>:<output_slot>[:<debug_op>], e.g.,
/job:worker/replica:0/task:0/gpu:0:hidden_0/MatMul:0
/job:worker/replica:0/task:2/cpu:0:hidden_0/MatMul:0:DebugNanCount.
4) if the tensor is executed multiple times in a given `Session.run`
call, specify the execution index with a 0-based integer enclose in a
pair of brackets at the end, e.g.,
RNN/tanh:0[0]
/job:worker/replica:0/task:0/gpu:0:RNN/tanh:0[0].""")
ap.add_argument(
"-a",
"--all",
dest="print_all",
action="store_true",
help="Print the tensor in its entirety, i.e., do not use ellipses "
"(may be slow for large results).")
ap.add_argument(
"-w",
"--write_path",
default="",
help="Path of the numpy file to write the evaluation result to, "
"using numpy.save()")
self._arg_parsers["eval"] = ap
def add_tensor_filter(self, filter_name, filter_callable):
"""Add a tensor filter.
A tensor filter is a named callable of the signature:
filter_callable(dump_datum, tensor),
wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying
metadata about the dumped tensor, including tensor name, timestamps, etc.
tensor is the value of the dumped tensor as an numpy.ndarray object.
The return value of the function is a bool.
This is the same signature as the input argument to
debug_data.DebugDumpDir.find().
Args:
filter_name: (str) name of the filter. Cannot be empty.
filter_callable: (callable) a filter function of the signature described
as above.
Raises:
ValueError: If filter_name is an empty str.
TypeError: If filter_name is not a str.
Or if filter_callable is not callable.
"""
if not isinstance(filter_name, str):
raise TypeError("Input argument filter_name is expected to be str, "
"but is not.")
# Check that filter_name is not an empty str.
if not filter_name:
raise ValueError("Input argument filter_name cannot be empty.")
# Check that filter_callable is callable.
if not callable(filter_callable):
raise TypeError(
"Input argument filter_callable is expected to be callable, "
"but is not.")
self._tensor_filters[filter_name] = filter_callable
def get_tensor_filter(self, filter_name):
"""Retrieve filter function by name.
Args:
filter_name: Name of the filter set during add_tensor_filter() call.
Returns:
The callable associated with the filter name.
Raises:
ValueError: If there is no tensor filter of the specified filter name.
"""
if filter_name not in self._tensor_filters:
raise ValueError("There is no tensor filter named \"%s\"" % filter_name)
return self._tensor_filters[filter_name]
def get_help(self, handler_name):
return self._arg_parsers[handler_name].format_help()
def list_tensors(self, args, screen_info=None):
"""Command handler for list_tensors.
List tensors dumped during debugged Session.run() call.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# TODO(cais): Add annotations of substrings for dumped tensor names, to
# facilitate on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["list_tensors"].parse_args(args)
output = []
filter_strs = []
if parsed.op_type_filter:
op_type_regex = re.compile(parsed.op_type_filter)
filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
else:
op_type_regex = None
if parsed.node_name_filter:
node_name_regex = re.compile(parsed.node_name_filter)
filter_strs.append("Node name regex filter: \"%s\"" %
parsed.node_name_filter)
else:
node_name_regex = None
output = debugger_cli_common.RichTextLines(filter_strs)
output.append("")
if parsed.tensor_filter:
try:
filter_callable = self.get_tensor_filter(parsed.tensor_filter)
except ValueError:
output = cli_shared.error("There is no tensor filter named \"%s\"." %
parsed.tensor_filter)
_add_main_menu(output, node_name=None, enable_list_tensors=False)
return output
data_to_show = self._debug_dump.find(filter_callable)
else:
data_to_show = self._debug_dump.dumped_tensor_data
# TODO(cais): Implement filter by lambda on tensor value.
max_timestamp_width, max_dump_size_width, max_op_type_width = (
self._measure_tensor_list_column_widths(data_to_show))
# Sort the data.
data_to_show = self._sort_dump_data_by(
data_to_show, parsed.sort_by, parsed.reverse)
output.extend(
self._tensor_list_column_heads(parsed, max_timestamp_width,
max_dump_size_width, max_op_type_width))
dump_count = 0
for dump in data_to_show:
if node_name_regex and not node_name_regex.match(dump.node_name):
continue
if op_type_regex:
op_type = self._debug_dump.node_op_type(dump.node_name)
if not op_type_regex.match(op_type):
continue
rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot)
op_type = self._debug_dump.node_op_type(dump.node_name)
line = "[%.3f]" % rel_time
line += " " * (max_timestamp_width - len(line))
line += dump_size_str
line += " " * (max_timestamp_width + max_dump_size_width - len(line))
line += op_type
line += " " * (max_timestamp_width + max_dump_size_width +
max_op_type_width - len(line))
line += dumped_tensor_name
output.append(
line,
font_attr_segs=[(
len(line) - len(dumped_tensor_name), len(line),
debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))])
dump_count += 1
if parsed.tensor_filter:
output.prepend([
"%d dumped tensor(s) passing filter \"%s\":" %
(dump_count, parsed.tensor_filter)
])
else:
output.prepend(["%d dumped tensor(s):" % dump_count])
_add_main_menu(output, node_name=None, enable_list_tensors=False)
return output
def _measure_tensor_list_column_widths(self, data):
"""Determine the maximum widths of the timestamp and op-type column.
This method assumes that data is sorted in the default order, i.e.,
by ascending timestamps.
Args:
data: (list of DebugTensorDaum) the data based on which the maximum
column widths will be determined.
Returns:
(int) maximum width of the timestamp column. 0 if data is empty.
(int) maximum width of the dump size column. 0 if data is empty.
(int) maximum width of the op type column. 0 if data is empty.
"""
max_timestamp_width = 0
if data:
max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0
max_timestamp_width = len("[%.3f] " % max_rel_time_ms) + 1
max_timestamp_width = max(max_timestamp_width,
len(self._TIMESTAMP_COLUMN_HEAD) + 1)
max_dump_size_width = 0
for dump in data:
dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
if len(dump_size_str) + 1 > max_dump_size_width:
max_dump_size_width = len(dump_size_str) + 1
max_dump_size_width = max(max_dump_size_width,
len(self._DUMP_SIZE_COLUMN_HEAD) + 1)
max_op_type_width = 0
for dump in data:
op_type = self._debug_dump.node_op_type(dump.node_name)
if len(op_type) + 1 > max_op_type_width:
max_op_type_width = len(op_type) + 1
max_op_type_width = max(max_op_type_width,
len(self._OP_TYPE_COLUMN_HEAD) + 1)
return max_timestamp_width, max_dump_size_width, max_op_type_width
def _sort_dump_data_by(self, data, sort_by, reverse):
"""Sort a list of DebugTensorDatum in specified order.
Args:
data: (list of DebugTensorDatum) the data to be sorted.
sort_by: The field to sort data by.
reverse: (bool) Whether to use reversed (descending) order.
Returns:
(list of DebugTensorDatum) in sorted order.
Raises:
ValueError: given an invalid value of sort_by.
"""
if sort_by == SORT_TENSORS_BY_TIMESTAMP:
return sorted(
data,
reverse=reverse,
key=lambda x: x.timestamp)
elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:
return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)
elif sort_by == SORT_TENSORS_BY_OP_TYPE:
return sorted(
data,
reverse=reverse,
key=lambda x: self._debug_dump.node_op_type(x.node_name))
elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:
return sorted(
data,
reverse=reverse,
key=lambda x: "%s:%d" % (x.node_name, x.output_slot))
else:
raise ValueError("Unsupported key to sort tensors by: %s" % sort_by)
def _tensor_list_column_heads(self, parsed, max_timestamp_width,
max_dump_size_width, max_op_type_width):
"""Generate a line containing the column heads of the tensor list.
Args:
parsed: Parsed arguments (by argparse) of the list_tensors command.
max_timestamp_width: (int) maximum width of the timestamp column.
max_dump_size_width: (int) maximum width of the dump size column.
max_op_type_width: (int) maximum width of the op type column.
Returns:
A RichTextLines object.
"""
base_command = "list_tensors"
if parsed.tensor_filter:
base_command += " -f %s" % parsed.tensor_filter
if parsed.op_type_filter:
base_command += " -t %s" % parsed.op_type_filter
if parsed.node_name_filter:
base_command += " -n %s" % parsed.node_name_filter
attr_segs = {0: []}
row = self._TIMESTAMP_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TIMESTAMP)
if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse:
command += " -r"
attr_segs[0].append(
(0, len(row), [debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (max_timestamp_width - len(row))
prev_len = len(row)
row += self._DUMP_SIZE_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_DUMP_SIZE)
if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (max_dump_size_width + max_timestamp_width - len(row))
prev_len = len(row)
row += self._OP_TYPE_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_OP_TYPE)
if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (
max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
)
prev_len = len(row)
row += self._TENSOR_NAME_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TENSOR_NAME)
if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem("", command), "bold"]))
row += " " * (
max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
)
return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)
def node_info(self, args, screen_info=None):
"""Command handler for node_info.
Query information about a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# TODO(cais): Add annotation of substrings for node names, to facilitate
# on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["node_info"].parse_args(args)
# Get a node name, regardless of whether the input is a node name (without
# output slot attached) or a tensor name (with output slot attached).
node_name, unused_slot = debug_graphs.parse_node_or_tensor_name(
parsed.node_name)
if not self._debug_dump.node_exists(node_name):
output = cli_shared.error(
"There is no node named \"%s\" in the partition graphs" % node_name)
_add_main_menu(
output,
node_name=None,
enable_list_tensors=True,
enable_node_info=False,
enable_list_inputs=False,
enable_list_outputs=False)
return output
# TODO(cais): Provide UI glossary feature to explain to users what the
# term "partition graph" means and how it is related to TF graph objects
# in Python. The information can be along the line of:
# "A tensorflow graph defined in Python is stripped of unused ops
# according to the feeds and fetches and divided into a number of
# partition graphs that may be distributed among multiple devices and
# hosts. The partition graphs are what's actually executed by the C++
# runtime during a run() call."
lines = ["Node %s" % node_name]
font_attr_segs = {
0: [(len(lines[-1]) - len(node_name), len(lines[-1]), "bold")]
}
lines.append("")
lines.append(" Op: %s" % self._debug_dump.node_op_type(node_name))
lines.append(" Device: %s" % self._debug_dump.node_device(node_name))
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
# List node inputs (non-control and control).
inputs = self._exclude_blacklisted_ops(
self._debug_dump.node_inputs(node_name))
ctrl_inputs = self._exclude_blacklisted_ops(
self._debug_dump.node_inputs(node_name, is_control=True))
output.extend(self._format_neighbors("input", inputs, ctrl_inputs))
# List node output recipients (non-control and control).
recs = self._exclude_blacklisted_ops(
self._debug_dump.node_recipients(node_name))
ctrl_recs = self._exclude_blacklisted_ops(
self._debug_dump.node_recipients(node_name, is_control=True))
output.extend(self._format_neighbors("recipient", recs, ctrl_recs))
# Optional: List attributes of the node.
if parsed.attributes:
output.extend(self._list_node_attributes(node_name))
# Optional: List dumps available from the node.
if parsed.dumps:
output.extend(self._list_node_dumps(node_name))
if parsed.traceback:
output.extend(self._render_node_traceback(node_name))
_add_main_menu(output, node_name=node_name, enable_node_info=False)
return output
def _exclude_blacklisted_ops(self, node_names):
"""Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_BLACKLIST.
Args:
node_names: An iterable of node or graph element names.
Returns:
A list of node names that are not blacklisted.
"""
return [node_name for node_name in node_names
if self._debug_dump.node_op_type(
debug_graphs.get_node_name(node_name)) not in
self._GRAPH_STRUCT_OP_TYPE_BLACKLIST]
def _render_node_traceback(self, node_name):
"""Render traceback of a node's creation in Python, if available.
Args:
node_name: (str) name of the node.
Returns:
A RichTextLines object containing the stack trace of the node's
construction.
"""
lines = [RL(""), RL(""), RL("Traceback of node construction:", "bold")]
try:
node_stack = self._debug_dump.node_traceback(node_name)
for depth, (file_path, line, function_name, text) in enumerate(
node_stack):
lines.append("%d: %s" % (depth, file_path))
attribute = debugger_cli_common.MenuItem(
"", "ps %s -b %d" % (file_path, line)) if text else None
line_number_line = RL(" ")
line_number_line += RL("Line: %d" % line, attribute)
lines.append(line_number_line)
lines.append(" Function: %s" % function_name)
lines.append(" Text: " + (("\"%s\"" % text) if text else "None"))
lines.append("")
except KeyError:
lines.append("(Node unavailable in the loaded Python graph)")
except LookupError:
lines.append("(Unavailable because no Python graph has been loaded)")
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def list_inputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_inputs"].parse_args(args)
output = self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=False)
node_name = debug_graphs.get_node_name(parsed.node_name)
_add_main_menu(output, node_name=node_name, enable_list_inputs=False)
return output
def print_tensor(self, args, screen_info=None):
"""Command handler for print_tensor.
Print value of a given dumped tensor.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
parsed = self._arg_parsers["print_tensor"].parse_args(args)
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
# Determine if any range-highlighting is required.
highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)
tensor_name, tensor_slicing = (
command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name)
if (self._debug_dump.loaded_partition_graphs() and
not self._debug_dump.node_exists(node_name)):
output = cli_shared.error(
"Node \"%s\" does not exist in partition graphs" % node_name)
_add_main_menu(
output,
node_name=None,
enable_list_tensors=True,
enable_print_tensor=False)
return output
watch_keys = self._debug_dump.debug_watch_keys(node_name)
if output_slot is None:
output_slots = set()
for watch_key in watch_keys:
output_slots.add(int(watch_key.split(":")[1]))
if len(output_slots) == 1:
# There is only one dumped tensor from this node, so there is no
# ambiguity. Proceed to show the only dumped tensor.
output_slot = list(output_slots)[0]
else:
# There are more than one dumped tensors from this node. Indicate as
# such.
# TODO(cais): Provide an output screen with command links for
# convenience.
lines = [
"Node \"%s\" generated debug dumps from %s output slots:" %
(node_name, len(output_slots)),
"Please specify the output slot: %s:x." % node_name
]
output = debugger_cli_common.RichTextLines(lines)
_add_main_menu(
output,
node_name=node_name,
enable_list_tensors=True,
enable_print_tensor=False)
return output
# Find debug dump data that match the tensor name (node name + output
# slot).
matching_data = []
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
if datum.output_slot == output_slot:
matching_data.append(datum)
if not matching_data:
# No dump for this tensor.
output = cli_shared.error("Tensor \"%s\" did not generate any dumps." %
parsed.tensor_name)
elif len(matching_data) == 1:
# There is only one dump for this tensor.
if parsed.number <= 0:
output = cli_shared.format_tensor(
matching_data[0].get_tensor(),
matching_data[0].watch_key,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options,
include_numeric_summary=parsed.numeric_summary,
write_path=parsed.write_path)
else:
output = cli_shared.error(
"Invalid number (%d) for tensor %s, which generated one dump." %
(parsed.number, parsed.tensor_name))
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
else:
# There are more than one dumps for this tensor.
if parsed.number < 0:
lines = [
"Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
len(matching_data))
]
font_attr_segs = {}
for i, datum in enumerate(matching_data):
rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))
command = "print_tensor %s -n %d" % (parsed.tensor_name, i)
font_attr_segs[len(lines) - 1] = [(
len(lines[-1]) - len(datum.watch_key), len(lines[-1]),
debugger_cli_common.MenuItem(None, command))]
lines.append("")
lines.append(
"You can use the -n (--number) flag to specify which dump to "
"print.")
lines.append("For example:")
lines.append(" print_tensor %s -n 0" % parsed.tensor_name)
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
elif parsed.number >= len(matching_data):
output = cli_shared.error(
"Specified number (%d) exceeds the number of available dumps "
"(%d) for tensor %s" %
(parsed.number, len(matching_data), parsed.tensor_name))
else:
output = cli_shared.format_tensor(
matching_data[parsed.number].get_tensor(),
matching_data[parsed.number].watch_key + " (dump #%d)" %
parsed.number,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options,
write_path=parsed.write_path)
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
return output
def list_outputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_outputs"].parse_args(args)
output = self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=True)
node_name = debug_graphs.get_node_name(parsed.node_name)
_add_main_menu(output, node_name=node_name, enable_list_outputs=False)
return output
def evaluate_expression(self, args, screen_info=None):
parsed = self._arg_parsers["eval"].parse_args(args)
eval_res = self._evaluator.evaluate(parsed.expression)
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
return cli_shared.format_tensor(
eval_res,
"from eval of expression '%s'" % parsed.expression,
np_printoptions,
print_all=parsed.print_all,
include_numeric_summary=True,
write_path=parsed.write_path)
def _reconstruct_print_source_command(self,
parsed,
line_begin,
max_elements_per_line_increase=0):
return "ps %s %s -b %d -m %d" % (
parsed.source_file_path, "-t" if parsed.tensors else "", line_begin,
parsed.max_elements_per_line + max_elements_per_line_increase)
def print_source(self, args, screen_info=None):
"""Print the content of a source file."""
del screen_info # Unused.
parsed = self._arg_parsers["print_source"].parse_args(args)
source_annotation = source_utils.annotate_source(
self._debug_dump,
parsed.source_file_path,
do_dumped_tensors=parsed.tensors)
source_lines, line_num_width = source_utils.load_source(
parsed.source_file_path)
labeled_source_lines = []
actual_initial_scroll_target = 0
for i, line in enumerate(source_lines):
annotated_line = RL("L%d" % (i + 1), cli_shared.COLOR_YELLOW)
annotated_line += " " * (line_num_width - len(annotated_line))
annotated_line += line
labeled_source_lines.append(annotated_line)
if i + 1 == parsed.line_begin:
actual_initial_scroll_target = len(labeled_source_lines) - 1
if i + 1 in source_annotation:
sorted_elements = sorted(source_annotation[i + 1])
for k, element in enumerate(sorted_elements):
if k >= parsed.max_elements_per_line:
omitted_info_line = RL(" (... Omitted %d of %d %s ...) " % (
len(sorted_elements) - parsed.max_elements_per_line,
len(sorted_elements),
"tensor(s)" if parsed.tensors else "op(s)"))
omitted_info_line += RL(
"+5",
debugger_cli_common.MenuItem(
None,
self._reconstruct_print_source_command(
parsed, i + 1, max_elements_per_line_increase=5)))
labeled_source_lines.append(omitted_info_line)
break
label = RL(" " * 4)
if self._debug_dump.debug_watch_keys(
debug_graphs.get_node_name(element)):
attribute = debugger_cli_common.MenuItem("", "pt %s" % element)
else:
attribute = cli_shared.COLOR_BLUE
label += RL(element, attribute)
labeled_source_lines.append(label)
output = debugger_cli_common.rich_text_lines_from_rich_line_list(
labeled_source_lines,
annotations={debugger_cli_common.INIT_SCROLL_POS_KEY:
actual_initial_scroll_target})
_add_main_menu(output, node_name=None)
return output
def _make_source_table(self, source_list, is_tf_py_library):
"""Make a table summarizing the source files that create nodes and tensors.
Args:
source_list: List of source files and related information as a list of
tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
first_line).
is_tf_py_library: (`bool`) whether this table is for files that belong
to the TensorFlow Python library.
Returns:
The table as a `debugger_cli_common.RichTextLines` object.
"""
path_head = "Source file path"
num_nodes_head = "#(nodes)"
num_tensors_head = "#(tensors)"
num_dumps_head = "#(tensor dumps)"
if is_tf_py_library:
# Use color to mark files that are guessed to belong to TensorFlow Python
# library.
color = cli_shared.COLOR_GRAY
lines = [RL("TensorFlow Python library file(s):", color)]
else:
color = cli_shared.COLOR_WHITE
lines = [RL("File(s) outside TensorFlow Python library:", color)]
if not source_list:
lines.append(RL("[No files.]"))
lines.append(RL())
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
path_column_width = max(
max([len(item[0]) for item in source_list]), len(path_head)) + 1
num_nodes_column_width = max(
max([len(str(item[2])) for item in source_list]),
len(num_nodes_head)) + 1
num_tensors_column_width = max(
max([len(str(item[3])) for item in source_list]),
len(num_tensors_head)) + 1
head = RL(path_head + " " * (path_column_width - len(path_head)), color)
head += RL(num_nodes_head + " " * (
num_nodes_column_width - len(num_nodes_head)), color)
head += RL(num_tensors_head + " " * (
num_tensors_column_width - len(num_tensors_head)), color)
head += RL(num_dumps_head, color)
lines.append(head)
for (file_path, _, num_nodes, num_tensors, num_dumps,
first_line_num) in source_list:
path_attributes = [color]
if source_utils.is_extension_uncompiled_python_source(file_path):
path_attributes.append(
debugger_cli_common.MenuItem(None, "ps %s -b %d" %
(file_path, first_line_num)))
line = RL(file_path, path_attributes)
line += " " * (path_column_width - len(line))
line += RL(
str(num_nodes) + " " * (num_nodes_column_width - len(str(num_nodes))),
color)
line += RL(
str(num_tensors) + " " *
(num_tensors_column_width - len(str(num_tensors))), color)
line += RL(str(num_dumps), color)
lines.append(line)
lines.append(RL())
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def list_source(self, args, screen_info=None):
"""List Python source files that constructed nodes and tensors."""
del screen_info # Unused.
parsed = self._arg_parsers["list_source"].parse_args(args)
source_list = source_utils.list_source_files_against_dump(
self._debug_dump,
path_regex_whitelist=parsed.path_filter,
node_name_regex_whitelist=parsed.node_name_filter)
top_lines = [
RL("List of source files that created nodes in this run", "bold")]
if parsed.path_filter:
top_lines.append(
RL("File path regex filter: \"%s\"" % parsed.path_filter))
if parsed.node_name_filter:
top_lines.append(
RL("Node name regex filter: \"%s\"" % parsed.node_name_filter))
top_lines.append(RL())
output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines)
if not source_list:
output.append("[No source file information.]")
return output
output.extend(self._make_source_table(
[item for item in source_list if not item[1]], False))
output.extend(self._make_source_table(
[item for item in source_list if item[1]], True))
_add_main_menu(output, node_name=None)
return output
def _list_inputs_or_outputs(self,
recursive,
node_name,
depth,
control,
op_type,
do_outputs=False):
"""Helper function used by list_inputs and list_outputs.
Format a list of lines to display the inputs or output recipients of a
given node.
Args:
recursive: Whether the listing is to be done recursively, as a boolean.
node_name: The name of the node in question, as a str.
depth: Maximum recursion depth, applies only if recursive == True, as an
int.
control: Whether control inputs or control recipients are included, as a
boolean.
op_type: Whether the op types of the nodes are to be included, as a
boolean.
do_outputs: Whether recipients, instead of input nodes are to be
listed, as a boolean.
Returns:
Input or recipient tree formatted as a RichTextLines object.
"""
if do_outputs:
tracker = self._debug_dump.node_recipients
type_str = "Recipients of"
short_type_str = "recipients"
else:
tracker = self._debug_dump.node_inputs
type_str = "Inputs to"
short_type_str = "inputs"
lines = []
font_attr_segs = {}
# Check if this is a tensor name, instead of a node name.
node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)
# Check if node exists.
if not self._debug_dump.node_exists(node_name):
return cli_shared.error(
"There is no node named \"%s\" in the partition graphs" % node_name)
if recursive:
max_depth = depth
else:
max_depth = 1
if control:
include_ctrls_str = ", control %s included" % short_type_str
else:
include_ctrls_str = ""
line = "%s node \"%s\"" % (type_str, node_name)
font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, "bold")
]
lines.append(line + " (Depth limit = %d%s):" % (max_depth, include_ctrls_str
))
command_template = "lo -c -r %s" if do_outputs else "li -c -r %s"
self._dfs_from_node(
lines,
font_attr_segs,
node_name,
tracker,
max_depth,
1, [],
control,
op_type,
command_template=command_template)
# Include legend.
lines.append("")
lines.append("Legend:")
lines.append(" (d): recursion depth = d.")
if control:
lines.append(" (Ctrl): Control input.")
if op_type:
lines.append(" [Op]: Input node has op type Op.")
# TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.
return debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
def _dfs_from_node(self,
lines,
attr_segs,
node_name,
tracker,
max_depth,
depth,
unfinished,
include_control=False,
show_op_type=False,
command_template=None):
"""Perform depth-first search (DFS) traversal of a node's input tree.
It recursively tracks the inputs (or output recipients) of the node called
node_name, and append these inputs (or output recipients) to a list of text
lines (lines) with proper indentation that reflects the recursion depth,
together with some formatting attributes (to attr_segs). The formatting
attributes can include command shortcuts, for example.
Args:
lines: Text lines to append to, as a list of str.
attr_segs: (dict) Attribute segments dictionary to append to.
node_name: Name of the node, as a str. This arg is updated during the
recursion.
tracker: A callable that takes one str as the node name input and
returns a list of str as the inputs/outputs.
This makes it this function general enough to be used with both
node-input and node-output tracking.
max_depth: Maximum recursion depth, as an int.
depth: Current recursion depth. This arg is updated during the
recursion.
unfinished: A stack of unfinished recursion depths, as a list of int.
include_control: Whether control dependencies are to be included as
inputs (and marked as such).
show_op_type: Whether op type of the input nodes are to be displayed
alongside the nodes' names.
command_template: (str) Template for command shortcut of the node names.
"""
# Make a shallow copy of the list because it may be extended later.
all_inputs = self._exclude_blacklisted_ops(
copy.copy(tracker(node_name, is_control=False)))
is_ctrl = [False] * len(all_inputs)
if include_control:
# Sort control inputs or recipients in alphabetical order of the node
# names.
ctrl_inputs = self._exclude_blacklisted_ops(
sorted(tracker(node_name, is_control=True)))
all_inputs.extend(ctrl_inputs)
is_ctrl.extend([True] * len(ctrl_inputs))
if not all_inputs:
if depth == 1:
lines.append(" [None]")
return
unfinished.append(depth)
# Create depth-dependent hanging indent for the line.
hang = ""
for k in xrange(depth):
if k < depth - 1:
if k + 1 in unfinished:
hang += HANG_UNFINISHED
else:
hang += HANG_FINISHED
else:
hang += HANG_SUFFIX
if all_inputs and depth > max_depth:
lines.append(hang + ELLIPSIS)
unfinished.pop()
return
hang += DEPTH_TEMPLATE % depth
for i in xrange(len(all_inputs)):
inp = all_inputs[i]
op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))
if op_type in self._GRAPH_STRUCT_OP_TYPE_BLACKLIST:
continue
if is_ctrl[i]:
ctrl_str = CTRL_LABEL
else:
ctrl_str = ""
op_type_str = ""
if show_op_type:
op_type_str = OP_TYPE_TEMPLATE % op_type
if i == len(all_inputs) - 1:
unfinished.pop()
line = hang + ctrl_str + op_type_str + inp
lines.append(line)
if command_template:
attr_segs[len(lines) - 1] = [(
len(line) - len(inp), len(line),
debugger_cli_common.MenuItem(None, command_template % inp))]
# Recursive call.
# The input's/output's name can be a tensor name, in the case of node
# with >1 output slots.
inp_node_name, _ = debug_graphs.parse_node_or_tensor_name(inp)
self._dfs_from_node(
lines,
attr_segs,
inp_node_name,
tracker,
max_depth,
depth + 1,
unfinished,
include_control=include_control,
show_op_type=show_op_type,
command_template=command_template)
def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):
"""List neighbors (inputs or recipients) of a node.
Args:
neighbor_type: ("input" | "recipient")
non_ctrls: Non-control neighbor node names, as a list of str.
ctrls: Control neighbor node names, as a list of str.
Returns:
A RichTextLines object.
"""
# TODO(cais): Return RichTextLines instead, to allow annotation of node
# names.
lines = []
font_attr_segs = {}
lines.append("")
lines.append(" %d %s(s) + %d control %s(s):" %
(len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))
lines.append(" %d %s(s):" % (len(non_ctrls), neighbor_type))
for non_ctrl in non_ctrls:
line = " [%s] %s" % (self._debug_dump.node_op_type(non_ctrl),
non_ctrl)
lines.append(line)
font_attr_segs[len(lines) - 1] = [(
len(line) - len(non_ctrl), len(line),
debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % non_ctrl))]
if ctrls:
lines.append("")
lines.append(" %d control %s(s):" % (len(ctrls), neighbor_type))
for ctrl in ctrls:
line = " [%s] %s" % (self._debug_dump.node_op_type(ctrl), ctrl)
lines.append(line)
font_attr_segs[len(lines) - 1] = [(
len(line) - len(ctrl), len(line),
debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % ctrl))]
return debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
def _list_node_attributes(self, node_name):
"""List neighbors (inputs or recipients) of a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A RichTextLines object.
"""
lines = []
lines.append("")
lines.append("Node attributes:")
attrs = self._debug_dump.node_attributes(node_name)
for attr_key in attrs:
lines.append(" %s:" % attr_key)
attr_val_str = repr(attrs[attr_key]).strip().replace("\n", " ")
lines.append(" %s" % attr_val_str)
lines.append("")
return debugger_cli_common.RichTextLines(lines)
def _list_node_dumps(self, node_name):
"""List dumped tensor data from a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A RichTextLines object.
"""
lines = []
font_attr_segs = {}
watch_keys = self._debug_dump.debug_watch_keys(node_name)
dump_count = 0
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
line = " Slot %d @ %s @ %.3f ms" % (
datum.output_slot, datum.debug_op,
(datum.timestamp - self._debug_dump.t0) / 1000.0)
lines.append(line)
command = "pt %s:%d -n %d" % (node_name, datum.output_slot, dump_count)
font_attr_segs[len(lines) - 1] = [(
2, len(line), debugger_cli_common.MenuItem(None, command))]
dump_count += 1
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
output_with_header = debugger_cli_common.RichTextLines(
["%d dumped tensor(s):" % dump_count, ""])
output_with_header.extend(output)
return output_with_header
def create_analyzer_ui(debug_dump,
tensor_filters=None,
ui_type="curses",
on_ui_exit=None,
config=None):
"""Create an instance of CursesUI based on a DebugDumpDir object.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump to use.
tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor
filter (Callable).
ui_type: (str) requested UI type, e.g., "curses", "readline".
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: A `cli_config.CLIConfig` object.
Returns:
(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer
commands and tab-completions registered.
"""
if config is None:
config = cli_config.CLIConfig()
analyzer = DebugAnalyzer(debug_dump, config=config)
if tensor_filters:
for tensor_filter_name in tensor_filters:
analyzer.add_tensor_filter(
tensor_filter_name, tensor_filters[tensor_filter_name])
cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config)
cli.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
cli.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
cli.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
cli.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
cli.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
cli.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
cli.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
cli.register_command_handler(
"eval",
analyzer.evaluate_expression,
analyzer.get_help("eval"),
prefix_aliases=["ev"])
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names)
return cli
|
eadgarchen/tensorflow
|
tensorflow/python/debug/cli/analyzer_cli.py
|
Python
|
apache-2.0
| 58,062
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from setup.installer import VMInstaller
from setup import Command
class Linux32(VMInstaller):
description = 'Build 32bit linux binary installer'
INSTALLER_EXT = 'txz'
VM_NAME = 'linux32-build'
FREEZE_COMMAND = 'linux_freeze'
FREEZE_TEMPLATE = 'python -OO setup.py {freeze_command}'
class Linux64(Linux32):
description = 'Build 64bit linux binary installer'
VM_NAME = 'linux64-build'
IS_64_BIT = True
class Linux(Command):
description = 'Build linux binary installers'
sub_commands = ['linux64', 'linux32']
|
ashang/calibre
|
setup/installer/linux/__init__.py
|
Python
|
gpl-3.0
| 792
|
import requests
import time
import dblayer
from sklearn.cluster import DBSCAN
import plotly
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import random
import testfile
# Create random colors in list
color_list = []
def generate_color(ncluster):
for i in range(ncluster):
color = '#{:02x}{:02x}{:02x}'.format(*map(lambda x: random.randint(0, 255), range(ncluster)))
color_list.append(color)
def showLatLongInCluster(data):
# Run the DBSCAN from sklearn
dbscan = DBSCAN(eps=2, min_samples=5, metric='euclidean', algorithm='auto').fit(data)
cluster_labels = dbscan.labels_
n_clusters = len(set(cluster_labels)) - (1 if -1 in cluster_labels else 0)
generate_color(n_clusters)
plot_data = []
# get the cluster
for i in range(n_clusters):
ds = data[np.where(cluster_labels == i)]
clustername = "Cluster " + str(i + 1)
trace = go.Scattergeo(lon=ds[:,0], lat=ds[:,1],mode='markers',marker=dict(color=color_list[i], size=5),
name=clustername)
plot_data.append(trace)
layout = go.Layout(showlegend=False, title='Earthquakes In North and South America',
titlefont=dict(family='Courier New, monospace',size=20,color='#7f7f7f'),
geo=dict(scope=('north america', 'south america'),
projection=dict(type='orthographic',rotation=dict(lon=-60)),
showland=True, landcolor='#191919',
showcountries=True,
showocean=True, oceancolor='rgb(217,217,255)',
showframe=False,
),
xaxis=dict(showgrid=False, zeroline=False),
yaxis=dict(showgrid=False, zeroline=False))
fig = go.Figure(data=plot_data, layout=layout)
div = plotly.offline.plot(fig, include_plotlyjs=True, output_type='div')
return div
def mkLatLong():
#### TME: Get start time
start_time = time.time()
####
sess = requests.Session()
dbobj=dblayer.classDBLayer()
projection = [{"$project": {"_id": 0, "mag": "$properties.mag",
"depth": {"$arrayElemAt": ["$geometry.coordinates", 2]},
"longitude": {"$arrayElemAt": ["$geometry.coordinates", 0]},
"latitude": {"$arrayElemAt": ["$geometry.coordinates", 1]}}}]
df = pd.DataFrame(list(dbobj.doaggregate(projection)))
df = df[['longitude', 'latitude']].copy()
#### TME: Elapsed time taken to read data from MongoDB
fileobj = testfile.classFileWrite()
elapsed = time.time() - start_time
fileobj.writeline()
str1 = str(elapsed) + " secs required to read " + str(df['latitude'].count()) + " records from database."
fileobj.writelog("Reading Longitude and Latitude")
fileobj.writelog(str1)
####
#### TME: Get start time
start_time = time.time()
####
div = showLatLongInCluster(df.values)
response = """<html><title></title><head><meta charset=\"utf8\"> </head> <body>""" + div + """</body> </html>"""
dbobj.closedb()
#### TME: Elapsed time taken to cluster and plot data
elapsed = time.time() - start_time
fileobj.writeline()
str1 = "Time taken: " + str(elapsed)
fileobj.writelog("Applying DBSCAN clustering and plotting its output")
fileobj.writelog(str1)
fileobj.writeline()
fileobj.closefile()
####
return response
|
abhishek8gupta/sp17-i524
|
project/S17-IO-3017/code/projectearth/dbscanplot.py
|
Python
|
apache-2.0
| 3,601
|
"""Support for MySensors covers."""
from homeassistant.components import mysensors
from homeassistant.components.cover import ATTR_POSITION, DOMAIN, CoverEntity
from homeassistant.const import STATE_OFF, STATE_ON
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors platform for covers."""
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
MySensorsCover,
async_add_entities=async_add_entities,
)
class MySensorsCover(mysensors.device.MySensorsEntity, CoverEntity):
"""Representation of the value of a MySensors Cover child node."""
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
@property
def is_closed(self):
"""Return True if cover is closed."""
set_req = self.gateway.const.SetReq
if set_req.V_DIMMER in self._values:
return self._values.get(set_req.V_DIMMER) == 0
return self._values.get(set_req.V_LIGHT) == STATE_OFF
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
set_req = self.gateway.const.SetReq
return self._values.get(set_req.V_DIMMER)
async def async_open_cover(self, **kwargs):
"""Move the cover up."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_UP, 1, ack=1
)
if self.gateway.optimistic:
# Optimistically assume that cover has changed state.
if set_req.V_DIMMER in self._values:
self._values[set_req.V_DIMMER] = 100
else:
self._values[set_req.V_LIGHT] = STATE_ON
self.async_write_ha_state()
async def async_close_cover(self, **kwargs):
"""Move the cover down."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_DOWN, 1, ack=1
)
if self.gateway.optimistic:
# Optimistically assume that cover has changed state.
if set_req.V_DIMMER in self._values:
self._values[set_req.V_DIMMER] = 0
else:
self._values[set_req.V_LIGHT] = STATE_OFF
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs.get(ATTR_POSITION)
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_DIMMER, position, ack=1
)
if self.gateway.optimistic:
# Optimistically assume that cover has changed state.
self._values[set_req.V_DIMMER] = position
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the device."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_STOP, 1, ack=1
)
|
tchellomello/home-assistant
|
homeassistant/components/mysensors/cover.py
|
Python
|
apache-2.0
| 3,250
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Load plugin assets from disk."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_PLUGINS_DIR = "plugins"
def _IsDirectory(parent, item):
"""Helper that returns if parent/item is a directory."""
return tf.gfile.IsDirectory(os.path.join(parent, item))
def PluginDirectory(logdir, plugin_name):
"""Returns the plugin directory for plugin_name."""
return os.path.join(logdir, _PLUGINS_DIR, plugin_name)
def ListPlugins(logdir):
"""List all the plugins that have registered assets in logdir.
If the plugins_dir does not exist, it returns an empty list. This maintains
compatibility with old directories that have no plugins written.
Args:
logdir: A directory that was created by a TensorFlow events writer.
Returns:
a list of plugin names, as strings
"""
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)
if not tf.gfile.IsDirectory(plugins_dir):
return []
entries = tf.gfile.ListDirectory(plugins_dir)
return [x for x in entries if _IsDirectory(plugins_dir, x)]
def ListAssets(logdir, plugin_name):
"""List all the assets that are available for given plugin in a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: A string name of a plugin to list assets for.
Returns:
A string list of available plugin assets. If the plugin subdirectory does
not exist (either because the logdir doesn't exist, or because the plugin
didn't register) an empty list is returned.
"""
plugin_dir = PluginDirectory(logdir, plugin_name)
if not tf.gfile.IsDirectory(plugin_dir):
return []
entries = tf.gfile.ListDirectory(plugin_dir)
return [x for x in entries if not _IsDirectory(plugin_dir, x)]
def RetrieveAsset(logdir, plugin_name, asset_name):
"""Retrieve a particular plugin asset from a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: The plugin we want an asset from.
asset_name: The name of the requested asset.
Returns:
string contents of the plugin asset.
Raises:
KeyError: if the asset does not exist.
"""
asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)
try:
with tf.gfile.Open(asset_path, "r") as f:
return f.read()
except tf.errors.NotFoundError:
raise KeyError("Asset path %s not found" % asset_path)
except tf.errors.OpError as e:
raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e))
|
sjperkins/tensorflow
|
tensorflow/tensorboard/backend/event_processing/plugin_asset_util.py
|
Python
|
apache-2.0
| 3,278
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with basic entity definitions for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import with_statement # An extra future import for testing.
def simple_function(x):
"""Docstring."""
return x # comment
def nested_functions(x):
"""Docstring."""
def inner_fn(y):
return y
return inner_fn(x)
def function_with_print():
print('foo')
simple_lambda = lambda: None
class SimpleClass(object):
def simple_method(self):
return self
def method_with_print(self):
print('foo')
def function_with_multiline_call(x):
"""Docstring."""
return range(
x,
x + 1,
)
def basic_decorator(f):
return f
@basic_decorator
@basic_decorator
def decorated_function(x):
if x > 0:
return 1
return 2
|
annarev/tensorflow
|
tensorflow/python/autograph/pyct/testing/basic_definitions.py
|
Python
|
apache-2.0
| 1,533
|
#!/usr/bin/env python
""" hg-to-git.py - A Mercurial to GIT converter
Copyright (C)2007 Stelian Pop <stelian@popies.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import os, os.path, sys
import tempfile, pickle, getopt
import re
if sys.hexversion < 0x02030000:
# The behavior of the pickle module changed significantly in 2.3
sys.stderr.write("hg-to-git.py: requires Python 2.3 or later.\n")
sys.exit(1)
# Maps hg version -> git version
hgvers = {}
# List of children for each hg revision
hgchildren = {}
# List of parents for each hg revision
hgparents = {}
# Current branch for each hg revision
hgbranch = {}
# Number of new changesets converted from hg
hgnewcsets = 0
#------------------------------------------------------------------------------
def usage():
print """\
%s: [OPTIONS] <hgprj>
options:
-s, --gitstate=FILE: name of the state to be saved/read
for incrementals
-n, --nrepack=INT: number of changesets that will trigger
a repack (default=0, -1 to deactivate)
-v, --verbose: be verbose
required:
hgprj: name of the HG project to import (directory)
""" % sys.argv[0]
#------------------------------------------------------------------------------
def getgitenv(user, date):
env = ''
elems = re.compile('(.*?)\s+<(.*)>').match(user)
if elems:
env += 'export GIT_AUTHOR_NAME="%s" ;' % elems.group(1)
env += 'export GIT_COMMITTER_NAME="%s" ;' % elems.group(1)
env += 'export GIT_AUTHOR_EMAIL="%s" ;' % elems.group(2)
env += 'export GIT_COMMITTER_EMAIL="%s" ;' % elems.group(2)
else:
env += 'export GIT_AUTHOR_NAME="%s" ;' % user
env += 'export GIT_COMMITTER_NAME="%s" ;' % user
env += 'export GIT_AUTHOR_EMAIL= ;'
env += 'export GIT_COMMITTER_EMAIL= ;'
env += 'export GIT_AUTHOR_DATE="%s" ;' % date
env += 'export GIT_COMMITTER_DATE="%s" ;' % date
return env
#------------------------------------------------------------------------------
state = ''
opt_nrepack = 0
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:n:v', ['gitstate=', 'tempdir=', 'nrepack=', 'verbose'])
for o, a in opts:
if o in ('-s', '--gitstate'):
state = a
state = os.path.abspath(state)
if o in ('-n', '--nrepack'):
opt_nrepack = int(a)
if o in ('-v', '--verbose'):
verbose = True
if len(args) != 1:
raise Exception('params')
except:
usage()
sys.exit(1)
hgprj = args[0]
os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
print 'State does exist, reading'
f = open(state, 'r')
hgvers = pickle.load(f)
else:
print 'State does not exist, first run'
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read()
if sock.close():
sys.exit(1)
if verbose:
print 'tip is', tip
# Calculate the branches
if verbose:
print 'analysing the branches...'
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
for cset in range(1, int(tip) + 1):
hgchildren[str(cset)] = ()
prnts = os.popen('hg log -r %d --template "{parents}"' % cset).read().strip().split(' ')
prnts = map(lambda x: x[:x.find(':')], prnts)
if prnts[0] != '':
parent = prnts[0].strip()
else:
parent = str(cset - 1)
hgchildren[parent] += ( str(cset), )
if len(prnts) > 1:
mparent = prnts[1].strip()
hgchildren[mparent] += ( str(cset), )
else:
mparent = None
hgparents[str(cset)] = (parent, mparent)
if mparent:
# For merge changesets, take either one, preferably the 'master' branch
if hgbranch[mparent] == 'master':
hgbranch[str(cset)] = 'master'
else:
hgbranch[str(cset)] = hgbranch[parent]
else:
# Normal changesets
# For first children, take the parent branch, for the others create a new branch
if hgchildren[parent][0] == str(cset):
hgbranch[str(cset)] = hgbranch[parent]
else:
hgbranch[str(cset)] = "branch-" + str(cset)
if not hgvers.has_key("0"):
print 'creating repository'
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
if hgvers.has_key(str(cset)):
continue
hgnewcsets += 1
# get info
log_data = os.popen('hg log -r %d --template "{tags}\n{date|date}\n{author}\n"' % cset).readlines()
tag = log_data[0].strip()
date = log_data[1].strip()
user = log_data[2].strip()
parent = hgparents[str(cset)][0]
mparent = hgparents[str(cset)][1]
#get comment
(fdcomment, filecomment) = tempfile.mkstemp()
csetcomment = os.popen('hg log -r %d --template "{desc}"' % cset).read().strip()
os.write(fdcomment, csetcomment)
os.close(fdcomment)
print '-----------------------------------------'
print 'cset:', cset
print 'branch:', hgbranch[str(cset)]
print 'user:', user
print 'date:', date
print 'comment:', csetcomment
if parent:
print 'parent:', parent
if mparent:
print 'mparent:', mparent
if tag:
print 'tag:', tag
print '-----------------------------------------'
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
print 'creating new branch', hgbranch[str(cset)]
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
print 'checking out branch', hgbranch[str(cset)]
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
if mparent:
if hgbranch[parent] == hgbranch[str(cset)]:
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
print 'merging', otherbranch, 'into', hgbranch[str(cset)]
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
os.system('find . \( -path "./.hg" -o -path "./.git" \) -prune -o ! -name "." -print | xargs rm -rf')
# repopulate with checkouted files
os.system('hg update -C %d' % cset)
# add new files
os.system('git ls-files -x .hg --others | git update-index --add --stdin')
# delete removed files
os.system('git ls-files -x .hg --deleted | git update-index --remove --stdin')
# commit
os.system(getgitenv(user, date) + 'git commit --allow-empty -a -F %s' % filecomment)
os.unlink(filecomment)
# tag
if tag and tag != 'tip':
os.system(getgitenv(user, date) + 'git tag %s' % tag)
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
print "Deleting unused branch:", otherbranch
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
print 'record', cset, '->', vvv
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
os.system('git repack -a -d')
# write the state for incrementals
if state:
if verbose:
print 'Writing state'
f = open(state, 'w')
pickle.dump(hgvers, f)
# vim: et ts=8 sw=4 sts=4
|
pniebla/test-repo-console
|
svn/git-1.8.3.3.tar/git-1.8.3.3/git-1.8.3.3/contrib/hg-to-git/hg-to-git.py
|
Python
|
mit
| 8,052
|
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright (C) 2020 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import subprocess
from .base import Installer
__author__ = "Martine S. Lenders"
__copyright__ = "Copyright (C) 2020 Freie Universität Berlin"
__credits__ = ["Martine S. Lenders"]
__license__ = "LGPLv2.1"
__maintainer__ = "Martine S. Lenders"
__email__ = "m.lenders@fu-berlin.de"
class Apt(Installer):
def _install(self, package):
subprocess.run(["apt-get", "-y", "install",
package[self.os]["name"]])
|
kYc0o/RIOT
|
dist/tools/dhcpv6-pd_ia/pkg/apt.py
|
Python
|
lgpl-2.1
| 703
|
# Copyright (c) 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.dell import dell_storagecenter_common
from cinder.volume.drivers.dell import dell_storagecenter_iscsi
from cinder.volume import volume_types
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanISCSIDriverTestCase(test.TestCase):
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
MAPPINGS = [{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-46-250',
u'description': u'Cinder Clone Replay',
u'parent': {u'instanceId': u'64702.46.249',
u'instanceName': u'64702-46-249',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.46.250',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'12/09/2014 03:52:08 PM',
u'createVolume': {u'instanceId': u'64702.46',
u'instanceName':
u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'objectType': u'ScVolume'},
u'expireTime': u'12/09/2014 04:52:08 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7910,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'12/09/2014 03:52:08 PM',
u'size': u'0.0 Bytes'
}
SCRPLAYPROFILE = {u'ruleCount': 0,
u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
u'volumeCount': 0,
u'scName': u'Storage Center 64702',
u'notes': u'Created by Dell Cinder Driver',
u'scSerialNumber': 64702,
u'userCreated': True,
u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
u'instanceId': u'64702.11',
u'enforceReplayCreationTimeout': False,
u'replayCreationTimeout': 20,
u'objectType': u'ScReplayProfile',
u'type': u'Consistent',
u'expireIncompleteReplaySets': True}
IQN = 'iqn.2002-03.com.compellent:5000D31000000001'
ISCSI_PROPERTIES = {'access_mode': 'rw',
'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe44'],
'target_lun': 1,
'target_luns': [1, 1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260',
u'192.168.0.22:3260']}
def setUp(self):
super(DellSCSanISCSIDriverTestCase, self).setUp()
# configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
self.driver = dell_storagecenter_iscsi.DellStorageCenterISCSIDriver(
configuration=self.configuration)
self.driver.do_setup(None)
self.driver._stats = {'QoS_support': False,
'volume_backend_name': 'dell-1',
'free_capacity_gb': 12123,
'driver_version': '1.0.1',
'total_capacity_gb': 12388,
'reserved_percentage': 0,
'vendor_name': 'Dell',
'storage_protocol': 'iSCSI'}
self.volid = str(uuid.uuid4())
self.volume_name = "volume" + self.volid
self.connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost'}
self.connector_multipath = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost',
'multipath': True}
self.access_record_output = [
"ID Initiator Ipaddress AuthMethod UserName Apply-To",
"--- --------------- ------------- ---------- ---------- --------",
"1 iqn.1993-08.org.debian:01:222 *.*.*.* none both",
" 7dab76162"]
self.fake_iqn = 'iqn.2002-03.com.compellent:5000D31000000001'
self.properties = {
'target_discoverd': True,
'target_portal': '%s:3260'
% self.driver.configuration.dell_sc_iscsi_ip,
'target_iqn': self.fake_iqn,
'volume_id': 1}
self._model_update = {
'provider_location': "%s:3260,1 %s 0"
% (self.driver.configuration.dell_sc_iscsi_ip,
self.fake_iqn)
# ,
# 'provider_auth': 'CHAP %s %s' % (
# self.configuration.eqlx_chap_login,
# self.configuration.eqlx_chap_password)
}
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume(self,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume_consistency_group(self,
mock_find_sc,
mock_create_volume,
mock_update_cg_volumes,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1,
'consistencygroup_id': 'guid'}
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
None)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype:storageprofile': 'HighPriority'})
def test_create_volume_storage_profile(self,
mock_extra,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1, 'volume_type_id': 'abc'}
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
"HighPriority")
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume_failure(self,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_delete_volume(self,
mock_find_sc,
mock_delete_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.driver.delete_volume(volume)
mock_delete_volume.assert_called_once_with(self.volume_name)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_delete_volume_failure(self,
mock_find_sc,
mock_delete_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume,
volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = self.connector
data = self.driver.initialize_connection(volume, connector)
self.assertEqual('iscsi', data['driver_volume_type'])
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(self.volume_name)
assert mock_find_volume.call_count == 2
expected = {'data': self.ISCSI_PROPERTIES,
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, data, 'Unexpected return value')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection_multi_path(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where connection is multipath
volume = {'id': self.volume_name}
connector = self.connector_multipath
data = self.driver.initialize_connection(volume, connector)
self.assertEqual('iscsi', data['driver_volume_type'])
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(self.volume_name)
assert mock_find_volume.call_count == 2
props = self.ISCSI_PROPERTIES
expected = {'data': props,
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, data, 'Unexpected return value')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=None)
def test_initialize_connection_no_iqn(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = {}
mock_find_iscsi_properties.side_effect = Exception('abc')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=None)
def test_initialize_connection_no_server(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=None)
def test_initialize_connection_vol_not_found(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection_map_vol_fail(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where map_volume returns None (no mappings)
volume = {'id': self.volume_name}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection_no_server(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection_no_volume(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=False)
def test_terminate_connection_failure(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value='fake')
def test_create_snapshot(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.driver.create_snapshot(snapshot)
self.assertEqual('available', snapshot['status'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_snapshot_no_volume(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_snapshot_failure(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
def test_create_volume_from_snapshot(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.driver.create_volume_from_snapshot(volume, snapshot)
mock_create_view_volume.assert_called_once_with('fake',
'fake')
self.assertTrue(mock_find_replay.called)
self.assertTrue(mock_find_volume.called)
self.assertFalse(mock_find_replay_profile.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
def test_create_volume_from_snapshot_cg(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_update_cg_volumes,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake', 'consistencygroup_id': 'guid'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.driver.create_volume_from_snapshot(volume, snapshot)
mock_create_view_volume.assert_called_once_with('fake',
'fake')
self.assertTrue(mock_find_replay.called)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=None)
def test_create_volume_from_snapshot_failed(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volume, snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
def test_create_volume_from_snapshot_no_replay(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volume, snapshot)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay.called)
self.assertFalse(mock_create_view_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_cloned_volume',
return_value=VOLUME)
def test_create_cloned_volume(self,
mock_create_cloned_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name + '_clone'}
src_vref = {'id': self.volume_name}
self.driver.create_cloned_volume(volume, src_vref)
mock_create_cloned_volume.assert_called_once_with(
self.volume_name + '_clone',
self.VOLUME)
self.assertTrue(mock_find_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_cloned_volume',
return_value=VOLUME)
def test_create_cloned_volume_consistency_group(self,
mock_create_cloned_volume,
mock_find_volume,
mock_find_sc,
mock_update_cg_volumes,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name + '_clone',
'consistencygroup_id': 'guid'}
src_vref = {'id': self.volume_name}
self.driver.create_cloned_volume(volume, src_vref)
mock_create_cloned_volume.assert_called_once_with(
self.volume_name + '_clone',
self.VOLUME)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_cloned_volume',
return_value=VOLUME)
def test_create_cloned_volume_no_volume(self,
mock_create_cloned_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name + '_clone'}
src_vref = {'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
volume, src_vref)
self.assertTrue(mock_find_volume.called)
self.assertFalse(mock_create_cloned_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay',
return_value=True)
def test_delete_snapshot(self,
mock_delete_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.driver.delete_snapshot(snapshot)
mock_delete_replay.assert_called_once_with(
self.VOLUME, self.volume_name)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay',
return_value=True)
def test_delete_snapshot_no_volume(self,
mock_delete_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
def test_ensure_export(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
volume = {'id': self.VOLUME.get(u'name')}
self.driver.ensure_export(context, volume)
mock_find_volume.assert_called_once_with(
self.VOLUME.get(u'name'))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_ensure_export_failed(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
volume = {'id': self.VOLUME.get(u'name')}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.ensure_export,
context, volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_ensure_export_no_volume(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
volume = {'id': self.VOLUME.get(u'name')}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.ensure_export,
context,
volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'expand_volume',
return_value=VOLUME)
def test_extend_volume(self,
mock_expand_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name, 'size': 1}
new_size = 2
self.driver.extend_volume(volume, new_size)
mock_expand_volume.assert_called_once_with(self.VOLUME, new_size)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'expand_volume',
return_value=None)
def test_extend_volume_no_volume(self,
mock_expand_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name, 'size': 1}
new_size = 2
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
volume, new_size)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=64702)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_update_volume_stats_with_refresh(self,
mock_get_storage_usage,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(True)
self.assertEqual('iSCSI', stats['storage_protocol'])
mock_get_storage_usage.called_once_with(64702)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=64702)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_get_volume_stats_no_refresh(self,
mock_get_storage_usage,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(False)
self.assertEqual('iSCSI', stats['storage_protocol'])
assert mock_get_storage_usage.called is False
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'rename_volume',
return_value=True)
def test_update_migrated_volume(self,
mock_rename_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 111}
backend_volume = {'id': 112}
model_update = {'_name_id': None}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
mock_rename_volume.assert_called_once_with(self.VOLUME,
volume['id'])
self.assertEqual(model_update, rt)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'rename_volume',
return_value=False)
def test_update_migrated_volume_rename_fail(self,
mock_rename_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 111}
backend_volume = {'id': 112, '_name_id': 113}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
mock_rename_volume.assert_called_once_with(self.VOLUME,
volume['id'])
self.assertEqual({'_name_id': 113}, rt)
def test_update_migrated_volume_no_volume_id(self,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': None}
backend_volume = {'id': 112, '_name_id': 113}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
self.assertEqual({'_name_id': 113}, rt)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_update_migrated_volume_no_backend_id(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 111}
backend_volume = {'id': None, '_name_id': None}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
mock_find_sc.assert_called_once_with()
mock_find_volume.assert_called_once_with(None)
self.assertEqual({'_name_id': None}, rt)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay_profile',
return_value=SCRPLAYPROFILE)
def test_create_consistencygroup(self,
mock_create_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
self.driver.create_consistencygroup(context, group)
mock_create_replay_profile.assert_called_once_with(group['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay_profile',
return_value=None)
def test_create_consistencygroup_fail(self,
mock_create_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_consistencygroup, context, group)
mock_create_replay_profile.assert_called_once_with(group['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
'delete_volume')
def test_delete_consistencygroup(self,
mock_delete_volume,
mock_find_replay_profile,
mock_delete_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
self.driver.db = mock.Mock()
mock_volume = mock.MagicMock()
expected_volumes = [mock_volume]
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'status': 'deleted'}
model_update, volumes = self.driver.delete_consistencygroup(context,
group)
mock_find_replay_profile.assert_called_once_with(group['id'])
mock_delete_replay_profile.assert_called_once_with(self.SCRPLAYPROFILE)
mock_delete_volume.assert_called_once_with(mock_volume)
self.assertEqual(group['status'], model_update['status'])
self.assertEqual(expected_volumes, volumes)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
'delete_volume')
def test_delete_consistencygroup_not_found(self,
mock_delete_volume,
mock_find_replay_profile,
mock_delete_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
self.driver.db = mock.Mock()
mock_volume = mock.MagicMock()
expected_volumes = [mock_volume]
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'status': 'deleted'}
model_update, volumes = self.driver.delete_consistencygroup(context,
group)
mock_find_replay_profile.assert_called_once_with(group['id'])
self.assertFalse(mock_delete_replay_profile.called)
mock_delete_volume.assert_called_once_with(mock_volume)
self.assertEqual(group['status'], model_update['status'])
self.assertEqual(expected_volumes, volumes)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_update_consistencygroup(self,
mock_find_replay_profile,
mock_update_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
add_volumes = [{'id': '101'}]
remove_volumes = [{'id': '102'}]
rt1, rt2, rt3 = self.driver.update_consistencygroup(context,
group,
add_volumes,
remove_volumes)
mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE,
add_volumes,
remove_volumes)
mock_find_replay_profile.assert_called_once_with(group['id'])
self.assertIsNone(rt1)
self.assertIsNone(rt2)
self.assertIsNone(rt3)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
def test_update_consistencygroup_not_found(self,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
add_volumes = [{'id': '101'}]
remove_volumes = [{'id': '102'}]
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.update_consistencygroup,
context,
group,
add_volumes,
remove_volumes)
mock_find_replay_profile.assert_called_once_with(group['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_update_consistencygroup_error(self,
mock_find_replay_profile,
mock_update_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
add_volumes = [{'id': '101'}]
remove_volumes = [{'id': '102'}]
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.update_consistencygroup,
context,
group,
add_volumes,
remove_volumes)
mock_find_replay_profile.assert_called_once_with(group['id'])
mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE,
add_volumes,
remove_volumes)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'snap_cg_replay',
return_value={'instanceId': '100'})
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_create_cgsnapshot(self,
mock_get_all_for_cgsnapshot,
mock_find_replay_profile,
mock_snap_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
mock_snapshot = mock.MagicMock()
expected_snapshots = [mock_snapshot]
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
context = {}
cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100'}
model_update, snapshots = self.driver.create_cgsnapshot(context, cggrp)
mock_find_replay_profile.assert_called_once_with(
cggrp['consistencygroup_id'])
mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cggrp['id'],
0)
self.assertEqual('available', model_update['status'])
self.assertEqual(expected_snapshots, snapshots)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
def test_create_cgsnapshot_profile_not_found(self,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cgsnapshot,
context,
cggrp)
mock_find_replay_profile.assert_called_once_with(
cggrp['consistencygroup_id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'snap_cg_replay',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_create_cgsnapshot_fail(self,
mock_find_replay_profile,
mock_snap_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cgsnapshot,
context,
cggrp)
mock_find_replay_profile.assert_called_once_with(
cggrp['consistencygroup_id'])
mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cggrp['id'],
0)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_cg_replay',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_delete_cgsnapshot(self,
mock_get_all_for_cgsnapshot,
mock_find_replay_profile,
mock_delete_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
mock_snapshot = mock.MagicMock()
expected_snapshots = [mock_snapshot]
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
context = {}
cgsnap = {'consistencygroup_id':
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100',
'status': 'deleted'}
model_update, snapshots = self.driver.delete_cgsnapshot(context,
cgsnap)
mock_find_replay_profile.assert_called_once_with(
cgsnap['consistencygroup_id'])
mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cgsnap['id'])
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual(expected_snapshots, snapshots)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_cg_replay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_delete_cgsnapshot_profile_not_found(self,
mock_get_all_for_cgsnapshot,
mock_find_replay_profile,
mock_delete_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
mock_snapshot = mock.MagicMock()
expected_snapshots = [mock_snapshot]
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
context = {}
cgsnap = {'consistencygroup_id':
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100',
'status': 'deleted'}
model_update, snapshots = self.driver.delete_cgsnapshot(context,
cgsnap)
mock_find_replay_profile.assert_called_once_with(
cgsnap['consistencygroup_id'])
self.assertFalse(mock_delete_cg_replay.called)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual(expected_snapshots, snapshots)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_cg_replay',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_delete_cgsnapshot_profile_failed_delete(self,
mock_find_replay_profile,
mock_delete_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
cgsnap = {'consistencygroup_id':
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100',
'status': 'available'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_cgsnapshot,
context,
cgsnap)
mock_find_replay_profile.assert_called_once_with(
cgsnap['consistencygroup_id'])
mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cgsnap['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'manage_existing')
def test_manage_existing(self,
mock_manage_existing,
mock_close_connection,
mock_open_connection,
mock_init):
# Very little to do in this one. The call is sent
# straight down.
volume = {'id': 'guid'}
existing_ref = {'source-name': 'imavolumename'}
self.driver.manage_existing(volume, existing_ref)
mock_manage_existing.assert_called_once_with(volume['id'],
existing_ref)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'manage_existing')
def test_manage_existing_id(self,
mock_manage_existing,
mock_close_connection,
mock_open_connection,
mock_init):
# Very little to do in this one. The call is sent
# straight down.
volume = {'id': 'guid'}
existing_ref = {'source-id': 'imadeviceid'}
self.driver.manage_existing(volume, existing_ref)
mock_manage_existing.assert_called_once_with(volume['id'],
existing_ref)
def test_manage_existing_bad_ref(self,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
existing_ref = {'banana-name': 'imavolumename'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
volume,
existing_ref)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_unmanaged_volume_size',
return_value=4)
def test_manage_existing_get_size(self,
mock_get_unmanaged_volume_size,
mock_close_connection,
mock_open_connection,
mock_init):
# Almost nothing to test here. Just that we call our function.
volume = {'id': 'guid'}
existing_ref = {'source-name': 'imavolumename'}
res = self.driver.manage_existing_get_size(volume, existing_ref)
mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref)
# The above is 4GB and change.
self.assertEqual(4, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_unmanaged_volume_size',
return_value=4)
def test_manage_existing_get_size_id(self,
mock_get_unmanaged_volume_size,
mock_close_connection,
mock_open_connection,
mock_init):
# Almost nothing to test here. Just that we call our function.
volume = {'id': 'guid'}
existing_ref = {'source-id': 'imadeviceid'}
res = self.driver.manage_existing_get_size(volume, existing_ref)
mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref)
# The above is 4GB and change.
self.assertEqual(4, res)
def test_manage_existing_get_size_bad_ref(self,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
existing_ref = {'banana-name': 'imavolumename'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume,
existing_ref)
def test_retype_not_extra_specs(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, None, None, {'extra_specs': None}, None)
self.assertFalse(res)
def test_retype_not_storage_profile(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, None, None, {'extra_specs': {'something': 'else'}}, None)
self.assertFalse(res)
def test_retype_same(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, None, None,
{'extra_specs': {'storagetype:storageprofile': ['A', 'A']}},
None)
self.assertTrue(res)
def test_retype_malformed(self,
mock_close_connection,
mock_open_connection,
mock_init):
LOG = self.mock_object(dell_storagecenter_common, "LOG")
res = self.driver.retype(
None, None, None,
{'extra_specs': {
'storagetype:storageprofile': ['something',
'not',
'right']}},
None)
self.assertFalse(res)
self.assertEqual(1, LOG.warning.call_count)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmanage')
def test_unmanage(self,
mock_unmanage,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
self.driver.unmanage(volume)
mock_find_volume.assert_called_once_with(volume['id'])
mock_unmanage.assert_called_once_with(self.VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmanage')
def test_unmanage_volume_not_found(self,
mock_unmanage,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
self.driver.unmanage(volume)
mock_find_volume.assert_called_once_with(volume['id'])
self.assertFalse(mock_unmanage.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_storage_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_retype(self,
mock_find_sc,
mock_find_volume,
mock_update_storage_profile,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, {'id': 'volid'}, None,
{'extra_specs': {'storagetype:storageprofile': ['A', 'B']}},
None)
mock_update_storage_profile.ssert_called_once_with(
self.VOLUME, 'B')
self.assertTrue(res)
|
nikesh-mahalka/cinder
|
cinder/tests/unit/test_dellsc.py
|
Python
|
apache-2.0
| 85,579
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: default
type: stdout
short_description: default Ansible screen output
version_added: historical
description:
- This is the default output callback for ansible-playbook.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible import constants as C
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self):
self._play = None
self._last_task_banner = None
super(CallbackModule, self).__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result):
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
if self._play.strategy != 'free':
self._print_task_banner(task)
def _print_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it thereyet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
self._display.banner(u"TASK [%s%s]" % (task.get_name().strip(), args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
self._last_task_banner = task._uuid
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
def v2_playbook_on_handler_task_start(self, task):
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = u"PLAY"
else:
msg = u"PLAY [%s]" % name
self._play = play
self._display.banner(msg)
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
for res in result._result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
self._display.display(diff)
elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
diff = self._get_diff(result._result['diff'])
if diff:
self._display.display(diff)
def v2_runner_item_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
msg = 'changed'
color = C.COLOR_CHANGED
else:
msg = 'ok'
color = C.COLOR_OK
if delegated_vars:
msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += ": [%s]" % result._host.get_name()
msg += " => (item=%s)" % (self._get_item(result._result),)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_runner_item_on_failed(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
self._handle_exception(result._result)
msg = "failed: "
if delegated_vars:
msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += "[%s]" % (result._host.get_name())
self._handle_warnings(result._result)
self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
def v2_runner_item_on_skipped(self, result):
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result))
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats
if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom: # fallback on constants for inherited plugins missing docs
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
def v2_playbook_on_start(self, playbook):
if self._display.verbosity > 1:
from os.path import basename
self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
if self._display.verbosity > 3:
# show CLI options
if self._options is not None:
for option in dir(self._options):
if option.startswith('_') or option in ['read_file', 'ensure_value', 'read_module']:
continue
val = getattr(self._options, option)
if val:
self._display.vvvv('%s: %s' % (option, val))
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name, result._result['retries'] - result._result['attempts'])
if (self._display.verbosity > 2 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += "Result was: %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_DEBUG)
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/callback/default.py
|
Python
|
bsd-3-clause
| 13,463
|
# coding: utf-8
"""
Compatibility functions for unified behavior between Python 2.x and 3.x.
:author: Alex Grönholm
"""
from __future__ import unicode_literals, absolute_import
import inspect
import sys
from threading import Thread
if sys.version_info[0] < 3:
def items(d):
return d.items()
def iteritems(d):
return d.iteritems()
def next(x):
return x.next()
range = xrange # noqa
long = long # noqa
basestring = basestring # noqa
unicode = unicode # noqa
bytearray2 = bytearray
unichr = unichr # noqa
bytestr = str
tobytestr = str
def isbytestr(s):
return isinstance(s, str)
def ispython3bytestr(s):
return False
def isbytearray(s):
return isinstance(s, bytearray)
def bytetoint(b):
return ord(b)
def bytetostr(b):
return b
def strtobyte(b):
return b
import Queue
Queue = Queue.Queue
else:
def items(d):
return list(d.items())
def iteritems(d):
return d.items()
next = next
range = range
long = int
basestring = str
unicode = str
bytearray2 = bytes
unichr = chr
bytestr = bytes
def tobytestr(s):
return bytes(s, "ascii")
def isbytestr(s):
return isinstance(s, bytes)
def ispython3bytestr(s):
return isinstance(s, bytes)
def isbytearray(s):
return isinstance(s, bytearray)
def bytetoint(b):
return b
def bytetostr(b):
return str(b, encoding="ascii")
def strtobyte(s):
return bytes(s, encoding="ascii")
import queue
Queue = queue.Queue
if hasattr(inspect, "getattr_static"):
def hasattr2(obj, attr):
return bool(inspect.getattr_static(obj, attr, False))
else:
hasattr2 = hasattr
class CompatThread(Thread):
"""Compatibility Thread class.
Allows Python 2 Thread class to accept daemon kwarg in init.
"""
def __init__(self, *args, **kwargs):
daemon = None
try:
daemon = kwargs.pop("daemon")
except KeyError:
pass
super(CompatThread, self).__init__(*args, **kwargs)
if daemon:
self.daemon = daemon
|
fouzelddin/py4j
|
py4j-python/src/py4j/compat.py
|
Python
|
bsd-3-clause
| 2,249
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ImportedFile'
db.create_table('projects_importedfile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='imported_files', to=orm['projects.Project'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('projects', ['ImportedFile'])
def backwards(self, orm):
# Deleting model 'ImportedFile'
db.delete_table('projects_importedfile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.file': {
'Meta': {'ordering': "('denormalized_path',)", 'object_name': 'File'},
'content': ('django.db.models.fields.TextField', [], {}),
'denormalized_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['projects.File']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
'projects.filerevision': {
'Meta': {'ordering': "('-revision_number',)", 'object_name': 'FileRevision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diff': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['projects.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'projects.project': {
'Meta': {'ordering': "('-modified_date', 'name')", 'object_name': 'Project'},
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'docs_directory': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'extensions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'whitelisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['projects']
|
KamranMackey/readthedocs.org
|
readthedocs/projects/migrations/0006_add_imported_file.py
|
Python
|
mit
| 8,964
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob
from nova.api.openstack.compute import image_metadata
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
class ImageMetaDataTest(test.TestCase):
def setUp(self):
super(ImageMetaDataTest, self).setUp()
fakes.stub_out_glance(self.stubs)
self.controller = image_metadata.Controller()
def test_index(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
def test_show(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
def test_show_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
def test_show_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
def test_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
def test_create_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body)
def test_update_all(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
def test_update_all_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
def test_update_item_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1', body)
def test_update_item_bad_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = ''
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1', body)
def test_update_item_too_many_keys(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
overload = {}
for num in range(CONF.quota_metadata_items + 1):
overload['key%s' % num] = 'value%s' % num
body = {'meta': overload}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1', body)
def test_update_item_body_uri_mismatch(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad', body)
def test_delete(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
self.assertIsNone(res)
def test_delete_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
def test_delete_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
def test_too_many_metadata_items_on_create(self):
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, '123', data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, '123', data)
def test_too_many_metadata_items_on_put(self):
self.flags(quota_metadata_items=1)
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update, req, '123', 'blah', body)
def test_image_not_authorized_update(self):
image_id = 131
# see nova.tests.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, image_id, 'key1', body)
def test_image_not_authorized_update_all(self):
image_id = 131
# see nova.tests.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id, body)
def test_image_not_authorized_create(self):
image_id = 131
# see nova.tests.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'POST'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id, body)
|
eharney/nova
|
nova/tests/api/openstack/compute/test_image_metadata.py
|
Python
|
apache-2.0
| 9,569
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=not-callable
# pylint: disable=redefined-builtin
"""Layers can merge several input tensors into a single output tensor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.engine.topology import Layer
from tensorflow.python.framework import tensor_shape
class _Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
Arguments:
**kwargs: standard layer keyword arguments.
"""
def __init__(self, **kwargs):
super(_Merge, self).__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Arguments:
shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor
Returns:
expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: if shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[:-len(shape2)])
for i, j in zip(shape1[-len(shape2):], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError('Operands could not be broadcast '
'together with shapes ' + str(shape1) + ' ' +
str(shape2))
output_shape.append(i)
return tuple(output_shape)
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('A merge layer should be called ' 'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A merge layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
input_shape = [tensor_shape.TensorShape(s).as_list() for s in input_shape]
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) > 1:
raise ValueError('Can not merge tensors with different '
'batch sizes. Got tensors with shapes : ' +
str(input_shape))
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
self.built = True
def call(self, inputs):
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(K.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = K.ndim(x)
for _ in range(max_ndim - x_ndim):
x = K.expand_dims(x, 1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
transposed = False
for x in inputs:
x_ndim = K.ndim(x)
if x_ndim is None:
x_shape = K.shape(x)
batch_size = x_shape[0]
new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
x_transposed = K.reshape(x,
K.stack([batch_size, K.prod(x_shape[1:])]))
x_transposed = K.permute_dimensions(x_transposed, (1, 0))
x_transposed = K.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(K.permute_dimensions(x, dims))
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = K.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the output too.
if y_ndim is None:
y_shape = K.shape(y)
y_ndim = K.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = K.concatenate(
[K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
y = K.reshape(y, (-1, batch_size))
y = K.permute_dimensions(y, (1, 0))
y = K.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = K.permute_dimensions(y, dims)
return y
else:
return self._merge_function(inputs)
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all([m is None for m in mask]):
return None
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
class Add(_Merge):
"""Layer that adds a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output
class Multiply(_Merge):
"""Layer that multiplies (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output *= inputs[i]
return output
class Average(_Merge):
"""Layer that averages a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output / len(inputs)
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = K.maximum(output, inputs[i])
return output
class Concatenate(_Merge):
"""Layer that concatenates a list of inputs.
It takes as input a list of tensors,
all of the same shape expect for the concatenation axis,
and returns a single tensor, the concatenation of all inputs.
Arguments:
axis: Axis along which to concatenate.
**kwargs: standard layer keyword arguments.
"""
def __init__(self, axis=-1, **kwargs):
super(Concatenate, self).__init__(**kwargs)
self.axis = axis
self.supports_masking = True
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('`Concatenate` layer should be called '
'on a list of inputs')
if all([shape is None for shape in input_shape]):
return
reduced_inputs_shapes = [
tensor_shape.TensorShape(shape).as_list() for shape in input_shape
]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) > 1:
raise ValueError('`Concatenate` layer requires '
'inputs with matching shapes '
'except for the concat axis. '
'Got inputs shapes: %s' % (input_shape))
self.built = True
def call(self, inputs):
if not isinstance(inputs, list):
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
return K.concatenate(inputs, axis=self.axis)
def _compute_output_shape(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
output_shape = tensor_shape.TensorShape(input_shapes[0]).as_list()
for shape in input_shapes[1:]:
shape = tensor_shape.TensorShape(shape).as_list()
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all([m is None for m in mask]):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
# but cast it to bool first
masks.append(K.cast(K.ones_like(input_i), 'bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(K.expand_dims(mask_i))
else:
masks.append(mask_i)
concatenated = K.concatenate(masks, axis=self.axis)
return K.all(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {
'axis': self.axis,
}
base_config = super(Concatenate, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dot(_Merge):
"""Layer that computes a dot product between samples in two tensors.
E.g. if applied to two tensors `a` and `b` of shape `(batch_size, n)`,
the output will be a tensor of shape `(batch_size, 1)`
where each entry `i` will be the dot product between
`a[i]` and `b[i]`.
Arguments:
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
"""
def __init__(self, axes, normalize=False, **kwargs):
super(Dot, self).__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError('Invalid type for `axes` - '
'should be a list or an int.')
if len(axes) != 2:
raise ValueError('Invalid format for `axes` - '
'should contain two elements.')
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError('Invalid format for `axes` - '
'list elements should be "int".')
self.axes = axes
self.normalize = normalize
self.supports_masking = True
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = tensor_shape.TensorShape(input_shape[0]).as_list()
shape2 = tensor_shape.TensorShape(input_shape[1]).as_list()
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError('Dimension incompatibility '
'%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) +
'Layer shapes: %s, %s' % (shape1, shape2))
self.built = True
def call(self, inputs):
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % K.ndim(inputs[i]))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = K.l2_normalize(x1, axis=axes[0])
x2 = K.l2_normalize(x2, axis=axes[1])
output = K.batch_dot(x1, x2, axes)
return output
def _compute_output_shape(self, input_shape):
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = tensor_shape.TensorShape(input_shape[0]).as_list()
shape2 = tensor_shape.TensorShape(input_shape[1]).as_list()
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
'axes': self.axes,
'normalize': self.normalize,
}
base_config = super(Dot, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def add(inputs, **kwargs):
"""Functional interface to the `Add` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the sum of the inputs.
"""
return Add(**kwargs)(inputs)
def multiply(inputs, **kwargs):
"""Functional interface to the `Multiply` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise product of the inputs.
"""
return Multiply(**kwargs)(inputs)
def average(inputs, **kwargs):
"""Functional interface to the `Average` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the average of the inputs.
"""
return Average(**kwargs)(inputs)
def maximum(inputs, **kwargs):
"""Functional interface to the `Maximum` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise maximum of the inputs.
"""
return Maximum(**kwargs)(inputs)
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/contrib/keras/python/keras/layers/merge.py
|
Python
|
bsd-2-clause
| 18,999
|
"""AXScript Client Framework
This module provides a core framework for an ActiveX Scripting client.
Derived classes actually implement the AX Client itself, including the
scoping rules, etc.
There are classes defined for the engine itself, and for ScriptItems
"""
import sys
from win32com.axscript import axscript
import win32com.server.util
import win32com.client.connect # Need simple connection point support
import win32api, winerror
import pythoncom
import types
import re
def RemoveCR(text):
# No longer just "RemoveCR" - should be renamed to
# FixNewlines, or something. Idea is to fix arbitary newlines into
# something Python can compile...
return re.sub('(\r\n)|\r|(\n\r)','\n',text)
SCRIPTTEXT_FORCEEXECUTION = -2147483648 # 0x80000000
SCRIPTTEXT_ISEXPRESSION = 0x00000020
SCRIPTTEXT_ISPERSISTENT = 0x00000040
from win32com.server.exception import Exception, IsCOMServerException
import error # ax.client.error
state_map = {
axscript.SCRIPTSTATE_UNINITIALIZED: "SCRIPTSTATE_UNINITIALIZED",
axscript.SCRIPTSTATE_INITIALIZED: "SCRIPTSTATE_INITIALIZED",
axscript.SCRIPTSTATE_STARTED: "SCRIPTSTATE_STARTED",
axscript.SCRIPTSTATE_CONNECTED: "SCRIPTSTATE_CONNECTED",
axscript.SCRIPTSTATE_DISCONNECTED: "SCRIPTSTATE_DISCONNECTED",
axscript.SCRIPTSTATE_CLOSED: "SCRIPTSTATE_CLOSED",
}
def profile(fn, *args):
import profile
prof = profile.Profile()
try:
# roll on 1.6 :-)
# return prof.runcall(fn, *args)
return apply(prof.runcall, (fn,) + args)
finally:
import pstats
# Damn - really want to send this to Excel!
# width, list = pstats.Stats(prof).strip_dirs().get_print_list([])
pstats.Stats(prof).strip_dirs().sort_stats("time").print_stats()
class SafeOutput:
softspace=1
def __init__(self, redir=None):
if redir is None: redir = sys.stdout
self.redir=redir
def write(self,message):
try:
self.redir.write(message)
except:
win32api.OutputDebugString(message.encode('mbcs'))
def flush(self):
pass
def close(self):
pass
# Make sure we have a valid sys.stdout/stderr, otherwise out
# print and trace statements may raise an exception
def MakeValidSysOuts():
if not isinstance(sys.stdout, SafeOutput):
sys.stdout = sys.stderr = SafeOutput()
# and for the sake of working around something I can't understand...
# prevent keyboard interrupts from killing IIS
import signal
def noOp(a,b):
# it would be nice to get to the bottom of this, so a warning to
# the debug console can't hurt.
print "WARNING: Ignoring keyboard interrupt from ActiveScripting engine"
# If someone else has already redirected, then assume they know what they are doing!
if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
try:
signal.signal(signal.SIGINT, noOp)
except ValueError:
# Not the main thread - can't do much.
pass
def trace(*args):
"""A function used instead of "print" for debugging output.
"""
for arg in args:
print arg,
print
def RaiseAssert(scode, desc):
"""A debugging function that raises an exception considered an "Assertion".
"""
print "**************** ASSERTION FAILED *******************"
print desc
raise Exception(scode, desc)
class AXScriptCodeBlock:
"""An object which represents a chunk of code in an AX Script
"""
def __init__(self, name, codeText, sourceContextCookie, startLineNumber, flags):
self.name = name
self.codeText = codeText
self.codeObject = None
self.sourceContextCookie = sourceContextCookie
self.startLineNumber = startLineNumber
self.flags = flags
self.beenExecuted = 0
def GetFileName(self):
# Gets the "file name" for Python - uses <...> so Python doesnt think
# it is a real file.
return "<%s>" % self.name
def GetDisplayName(self):
return self.name
def GetLineNo(self, no):
pos = -1
for i in range(no-1):
pos = self.codeText.find('\n', pos+1)
if pos==-1: pos=len(self.codeText)
epos = self.codeText.find('\n', pos+1)
if epos==-1:
epos=len(self.codeText)
return self.codeText[pos+1:epos].strip()
class Event:
"""A single event for a ActiveX named object.
"""
def __init__(self):
self.name = "<None>"
def __repr__(self):
return "<%s at %d: %s>" % (self.__class__.__name__, id(self), self.name)
def Reset(self):
pass
def Close(self):
pass
def Build(self, typeinfo, funcdesc):
self.dispid = funcdesc[0]
self.name = typeinfo.GetNames(self.dispid)[0]
# print "Event.Build() - Event Name is ", self.name
class EventSink:
"""A set of events against an item. Note this is a COM client for connection points.
"""
_public_methods_ = []
def __init__(self, myItem, coDispatch):
self.events = {}
self.connection = None
self.coDispatch = coDispatch
self.myScriptItem = myItem
self.myInvokeMethod = myItem.GetEngine().ProcessScriptItemEvent
self.iid = None
def Reset(self):
self.Disconnect()
def Close(self):
self.iid = None
self.myScriptItem = None
self.myInvokeMethod = None
self.coDispatch = None
for event in self.events.values():
event.Reset()
self.events = {}
self.Disconnect()
# COM Connection point methods.
def _query_interface_(self, iid):
if iid==self.iid:
return win32com.server.util.wrap(self)
def _invoke_(self, dispid, lcid, wFlags, args):
try:
event = self.events[dispid]
except:
raise Exception(scode=winerror.DISP_E_MEMBERNOTFOUND)
#print "Invoke for ", event, "on", self.myScriptItem, " - calling", self.myInvokeMethod
return self.myInvokeMethod(self.myScriptItem, event, lcid, wFlags, args)
def GetSourceTypeInfo(self, typeinfo):
"""Gets the typeinfo for the Source Events for the passed typeinfo"""
attr = typeinfo.GetTypeAttr()
cFuncs = attr[6]
typeKind = attr[5]
if typeKind not in [pythoncom.TKIND_COCLASS, pythoncom.TKIND_INTERFACE]:
RaiseAssert(winerror.E_UNEXPECTED, "The typeKind of the object is unexpected")
cImplType = attr[8]
for i in xrange(cImplType):
# Look for the [source, default] interface on the coclass
# that isn't marked as restricted.
flags = typeinfo.GetImplTypeFlags(i)
flagsNeeded = pythoncom.IMPLTYPEFLAG_FDEFAULT | pythoncom.IMPLTYPEFLAG_FSOURCE
if (flags & ( flagsNeeded | pythoncom.IMPLTYPEFLAG_FRESTRICTED))==(flagsNeeded):
# Get the handle to the implemented interface.
href = typeinfo.GetRefTypeOfImplType(i)
return typeinfo.GetRefTypeInfo(href)
def BuildEvents(self):
# See if it is an extender object.
try:
mainTypeInfo = self.coDispatch.QueryInterface(axscript.IID_IProvideMultipleClassInfo)
isMulti = 1
numTypeInfos = mainTypeInfo.GetMultiTypeInfoCount()
except pythoncom.com_error:
isMulti = 0
numTypeInfos = 1
try:
mainTypeInfo = self.coDispatch.QueryInterface(pythoncom.IID_IProvideClassInfo)
except pythoncom.com_error:
numTypeInfos = 0
# Create an event handler for the item.
for item in xrange(numTypeInfos):
if isMulti:
typeinfo, flags = mainTypeInfo.GetInfoOfIndex(item, axscript.MULTICLASSINFO_GETTYPEINFO)
else:
typeinfo = mainTypeInfo.GetClassInfo()
sourceType = self.GetSourceTypeInfo(typeinfo)
cFuncs = 0
if sourceType:
attr = sourceType.GetTypeAttr()
self.iid = attr[0]
cFuncs = attr[6]
for i in xrange(cFuncs):
funcdesc = sourceType.GetFuncDesc(i)
event = Event()
event.Build(sourceType, funcdesc)
self.events[event.dispid] = event
def Connect(self):
if self.connection is not None or self.iid is None: return
# trace("Connect for sink item", self.myScriptItem.name, "with IID",str(self.iid))
self.connection = win32com.client.connect.SimpleConnection(self.coDispatch, self, self.iid)
def Disconnect(self):
if self.connection:
try:
self.connection.Disconnect()
except pythoncom.com_error:
pass # Ignore disconnection errors.
self.connection = None
class ScriptItem:
"""An item (or subitem) that is exposed to the ActiveX script
"""
def __init__(self, parentItem, name, dispatch, flags):
self.parentItem = parentItem
self.dispatch = dispatch
self.name = name
self.flags = flags
self.eventSink = None
self.subItems = {}
self.createdConnections = 0
self.isRegistered = 0
# trace("Creating ScriptItem", name, "of parent", parentItem,"with dispatch", dispatch)
def __repr__(self):
flagsDesc=""
if self.flags is not None and self.flags & axscript.SCRIPTITEM_GLOBALMEMBERS:
flagsDesc = "/Global"
return "<%s at %d: %s%s>" % (self.__class__.__name__, id(self), self.name,flagsDesc)
def _dump_(self, level):
flagDescs = []
if self.flags is not None and self.flags & axscript.SCRIPTITEM_GLOBALMEMBERS:
flagDescs.append("GLOBAL!")
if self.flags is None or self.flags & axscript.SCRIPTITEM_ISVISIBLE == 0:
flagDescs.append("NOT VISIBLE")
if self.flags is not None and self.flags & axscript.SCRIPTITEM_ISSOURCE:
flagDescs.append("EVENT SINK")
if self.flags is not None and self.flags & axscript.SCRIPTITEM_CODEONLY:
flagDescs.append("CODE ONLY")
print " " * level, "Name=", self.name, ", flags=", "/".join(flagDescs), self
for subItem in self.subItems.values():
subItem._dump_(level+1)
def Reset(self):
self.Disconnect()
if self.eventSink:
self.eventSink.Reset()
self.isRegistered = 0
for subItem in self.subItems.values():
subItem.Reset()
def Close(self):
self.Reset()
self.dispatch = None
self.parentItem = None
if self.eventSink:
self.eventSink.Close()
self.eventSink = None
for subItem in self.subItems.values():
subItem.Close()
self.subItems = []
self.createdConnections = 0
def Register(self):
if self.isRegistered: return
# Get the type info to use to build this item.
# if not self.dispatch:
# id = self.parentItem.dispatch.GetIDsOfNames(self.name)
# print "DispID of me is", id
# result = self.parentItem.dispatch.Invoke(id, 0, pythoncom.DISPATCH_PROPERTYGET,1)
# if type(result)==pythoncom.TypeIIDs[pythoncom.IID_IDispatch]:
# self.dispatch = result
# else:
# print "*** No dispatch"
# return
# print "**** Made dispatch"
self.isRegistered = 1
# Register the sub-items.
for item in self.subItems.values():
if not item.isRegistered:
item.Register()
def IsGlobal(self):
return self.flags & axscript.SCRIPTITEM_GLOBALMEMBERS
def IsVisible(self):
return (self.flags & (axscript.SCRIPTITEM_ISVISIBLE | axscript.SCRIPTITEM_ISSOURCE)) != 0
def GetEngine(self):
item = self
while item.parentItem.__class__==self.__class__:
item = item.parentItem
return item.parentItem
def _GetFullItemName(self):
ret = self.name
if self.parentItem:
try:
ret = self.parentItem._GetFullItemName() + "." + ret
except AttributeError:
pass
return ret
def GetSubItemClass(self):
return self.__class__
def GetSubItem(self, name):
return self.subItems[name.lower()]
def GetCreateSubItem(self, parentItem, name, dispatch, flags):
keyName = name.lower()
try:
rc = self.subItems[keyName]
# No changes allowed to existing flags.
if not rc.flags is None and not flags is None and rc.flags != flags:
raise Exception(scode=winerror.E_INVALIDARG)
# Existing item must not have a dispatch.
if not rc.dispatch is None and not dispatch is None:
raise Exception(scode=winerror.E_INVALIDARG)
rc.flags = flags # Setup the real flags.
rc.dispatch = dispatch
except KeyError:
rc = self.subItems[keyName] = self.GetSubItemClass()(parentItem, name, dispatch, flags)
return rc
# if self.dispatch is None:
# RaiseAssert(winerror.E_UNEXPECTED, "??")
def CreateConnections(self):
# Create (but do not connect to) the connection points.
if self.createdConnections: return
self.createdConnections = 1
# Nothing to do unless this is an event source
# This flags means self, _and_ children, are connectable.
if self.flags & axscript.SCRIPTITEM_ISSOURCE:
self.BuildEvents()
self.FindBuildSubItemEvents()
def Connect(self):
# Connect to the already created connection points.
if self.eventSink:
self.eventSink.Connect()
for subItem in self.subItems.values():
subItem.Connect()
def Disconnect(self):
# Disconnect from the connection points.
if self.eventSink:
self.eventSink.Disconnect()
for subItem in self.subItems.values():
subItem.Disconnect()
def BuildEvents(self):
if self.eventSink is not None or self.dispatch is None:
RaiseAssert(winerror.E_UNEXPECTED, "Item already has built events, or no dispatch available?")
# trace("BuildEvents for named item", self._GetFullItemName())
self.eventSink = EventSink(self, self.dispatch)
self.eventSink.BuildEvents()
def FindBuildSubItemEvents(self):
# Called during connection to event source. Seeks out and connects to
# all children. As per the AX spec, this is not recursive
# (ie, children sub-items are not seeked)
try:
multiTypeInfo = self.dispatch.QueryInterface(axscript.IID_IProvideMultipleClassInfo)
numTypeInfos = multiTypeInfo.GetMultiTypeInfoCount()
except pythoncom.com_error:
return
for item in xrange(numTypeInfos):
typeinfo, flags = multiTypeInfo.GetInfoOfIndex(item, axscript.MULTICLASSINFO_GETTYPEINFO)
defaultType = self.GetDefaultSourceTypeInfo(typeinfo)
index = 0
while 1:
try:
fdesc = defaultType.GetFuncDesc(index)
except pythoncom.com_error:
break # No more funcs
index = index + 1
dispid = fdesc[0]
funckind = fdesc[3]
invkind = fdesc[4]
elemdesc = fdesc[8]
funcflags = fdesc[9]
try:
isSubObject = not (funcflags & pythoncom.FUNCFLAG_FRESTRICTED) and \
funckind == pythoncom.FUNC_DISPATCH and \
invkind == pythoncom.INVOKE_PROPERTYGET and \
elemdesc[0][0] == pythoncom.VT_PTR and \
elemdesc[0][1][0] == pythoncom.VT_USERDEFINED
except:
isSubObject = 0
if isSubObject:
try:
# We found a sub-object.
names = typeinfo.GetNames(dispid);
result = self.dispatch.Invoke(dispid, 0x0, pythoncom.DISPATCH_PROPERTYGET, 1)
# IE has an interesting problem - there are lots of synonyms for the same object. Eg
# in a simple form, "window.top", "window.window", "window.parent", "window.self"
# all refer to the same object. Our event implementation code does not differentiate
# eg, "window_onload" will fire for *all* objects named "window". Thus,
# "window" and "window.window" will fire the same event handler :(
# One option would be to check if the sub-object is indeed the
# parent object - however, this would stop "top_onload" from firing,
# as no event handler for "top" would work.
# I think we simply need to connect to a *single* event handler.
# As use in IE is deprecated, I am not solving this now.
if type(result)==pythoncom.TypeIIDs[pythoncom.IID_IDispatch]:
name = names[0]
subObj = self.GetCreateSubItem(self, name, result, axscript.SCRIPTITEM_ISVISIBLE)
#print "subobj", name, "flags are", subObj.flags, "mydisp=", self.dispatch, "result disp=", result, "compare=", self.dispatch==result
subObj.BuildEvents()
subObj.Register()
except pythoncom.com_error:
pass
def GetDefaultSourceTypeInfo(self, typeinfo):
"""Gets the typeinfo for the Default Dispatch for the passed typeinfo"""
attr = typeinfo.GetTypeAttr()
cFuncs = attr[6]
typeKind = attr[5]
if typeKind not in [pythoncom.TKIND_COCLASS, pythoncom.TKIND_INTERFACE]:
RaiseAssert(winerror.E_UNEXPECTED, "The typeKind of the object is unexpected")
cImplType = attr[8]
for i in xrange(cImplType):
# Look for the [source, default] interface on the coclass
# that isn't marked as restricted.
flags = typeinfo.GetImplTypeFlags(i)
if (flags & ( pythoncom.IMPLTYPEFLAG_FDEFAULT | pythoncom.IMPLTYPEFLAG_FSOURCE | pythoncom.IMPLTYPEFLAG_FRESTRICTED))==pythoncom.IMPLTYPEFLAG_FDEFAULT:
# Get the handle to the implemented interface.
href = typeinfo.GetRefTypeOfImplType(i)
defTypeInfo = typeinfo.GetRefTypeInfo(href)
attr = defTypeInfo.GetTypeAttr()
typeKind = attr[5]
typeFlags = attr[11]
if typeKind == pythoncom.TKIND_INTERFACE and typeFlags & pythoncom.TYPEFLAG_FDUAL:
# Get corresponding Disp interface
# -1 is a special value which does this for us.
href = typeinfo.GetRefTypeOfImplType(-1)
return defTypeInfo.GetRefTypeInfo(href)
else:
return defTypeInfo
IActiveScriptMethods = [
"SetScriptSite", "GetScriptSite", "SetScriptState", "GetScriptState",
"Close", "AddNamedItem", "AddTypeLib", "GetScriptDispatch",
"GetCurrentScriptThreadID", "GetScriptThreadID", "GetScriptThreadState",
"InterruptScriptThread", "Clone" ]
IActiveScriptParseMethods = [
"InitNew", "AddScriptlet", "ParseScriptText" ]
IObjectSafetyMethods = [
"GetInterfaceSafetyOptions", "SetInterfaceSafetyOptions"]
# ActiveScriptParseProcedure is a new interface with IIS4/IE4.
IActiveScriptParseProcedureMethods = ['ParseProcedureText']
class COMScript:
"""An ActiveX Scripting engine base class.
This class implements the required COM interfaces for ActiveX scripting.
"""
_public_methods_ = IActiveScriptMethods + IActiveScriptParseMethods + IObjectSafetyMethods + IActiveScriptParseProcedureMethods
_com_interfaces_ = [axscript.IID_IActiveScript, axscript.IID_IActiveScriptParse, axscript.IID_IObjectSafety] #, axscript.IID_IActiveScriptParseProcedure]
def __init__(self):
# Make sure we can print/trace wihout an exception!
MakeValidSysOuts()
# trace("AXScriptEngine object created", self)
self.baseThreadId = -1
self.debugManager = None
self.threadState = axscript.SCRIPTTHREADSTATE_NOTINSCRIPT
self.scriptState = axscript.SCRIPTSTATE_UNINITIALIZED
self.scriptSite = None
self.safetyOptions = 0
self.lcid = 0
self.subItems = {}
self.scriptCodeBlocks = {}
def _query_interface_(self, iid):
if self.debugManager:
return self.debugManager._query_interface_for_debugger_(iid)
# trace("ScriptEngine QI - unknown IID", iid)
return 0
# IActiveScriptParse
def InitNew(self):
if self.scriptSite is not None:
self.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
def AddScriptlet(self, defaultName, code, itemName, subItemName, eventName, delimiter, sourceContextCookie, startLineNumber):
# trace ("AddScriptlet", defaultName, code, itemName, subItemName, eventName, delimiter, sourceContextCookie, startLineNumber)
self.DoAddScriptlet(defaultName, code, itemName, subItemName, eventName, delimiter,sourceContextCookie, startLineNumber)
def ParseScriptText(self, code, itemName, context, delimiter, sourceContextCookie, startLineNumber, flags, bWantResult):
# trace ("ParseScriptText", code[:20],"...", itemName, context, delimiter, sourceContextCookie, startLineNumber, flags, bWantResult)
if bWantResult or self.scriptState == axscript.SCRIPTSTATE_STARTED \
or self.scriptState == axscript.SCRIPTSTATE_CONNECTED \
or self.scriptState == axscript.SCRIPTSTATE_DISCONNECTED :
flags = flags | SCRIPTTEXT_FORCEEXECUTION
else:
flags = flags & (~SCRIPTTEXT_FORCEEXECUTION)
if flags & SCRIPTTEXT_FORCEEXECUTION:
# About to execute the code.
self.RegisterNewNamedItems()
return self.DoParseScriptText(code, sourceContextCookie, startLineNumber, bWantResult, flags)
#
# IActiveScriptParseProcedure
def ParseProcedureText( self, code, formalParams, procName, itemName, unkContext, delimiter, contextCookie, startingLineNumber, flags):
trace("ParseProcedureText", code, formalParams, procName, itemName, unkContext, delimiter, contextCookie, startingLineNumber, flags)
# NOTE - this is never called, as we have disabled this interface.
# Problem is, once enabled all even code comes via here, rather than AddScriptlet.
# However, the "procName" is always an empty string - ie, itemName is the object whose event we are handling,
# but no idea what the specific event is!?
# Problem is disabling this block is that AddScriptlet is _not_ passed
# <SCRIPT for="whatever" event="onClick" language="Python">
# (but even for those blocks, the "onClick" information is still missing!?!?!?)
# self.DoAddScriptlet(None, code, itemName, subItemName, eventName, delimiter,sourceContextCookie, startLineNumber)
return None
#
# IActiveScript
def SetScriptSite(self, site):
# We should still work with an existing site (or so MSXML believes :)
self.scriptSite = site
if self.debugManager is not None:
self.debugManager.Close()
import traceback
try:
import win32com.axdebug.axdebug # see if the core exists.
import debug
self.debugManager = debug.DebugManager(self)
except pythoncom.com_error:
# COM errors will occur if the debugger interface has never been
# seen on the target system
trace("Debugging interfaces not available - debugging is disabled..")
self.debugManager = None
except ImportError:
trace("Debugging extensions (axdebug) module does not exist - debugging is disabled..")
self.debugManager = None
except:
traceback.print_exc()
trace("*** Debugger Manager could not initialize - %s: %s" % (sys.exc_info()[0],sys.exc_info()[1]))
self.debugManager = None
try:
self.lcid = site.GetLCID()
except pythoncom.com_error:
self.lcid = win32api.GetUserDefaultLCID()
self.Reset()
def GetScriptSite(self, iid):
if self.scriptSite is None: raise Exception(scode=winerror.S_FALSE)
return self.scriptSite.QueryInterface(iid)
def SetScriptState(self, state):
#print "SetScriptState with %s - currentstate = %s" % (state_map.get(state),state_map.get(self.scriptState))
if state == self.scriptState: return
# If closed, allow no other state transitions
if self.scriptState==axscript.SCRIPTSTATE_CLOSED:
raise Exception(scode=winerror.E_INVALIDARG)
if state==axscript.SCRIPTSTATE_INITIALIZED:
# Re-initialize - shutdown then reset.
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_STARTED]:
self.Stop()
elif state==axscript.SCRIPTSTATE_STARTED:
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
if self.scriptState == axscript.SCRIPTSTATE_DISCONNECTED:
self.Reset()
self.Run()
self.ChangeScriptState(axscript.SCRIPTSTATE_STARTED)
elif state==axscript.SCRIPTSTATE_CONNECTED:
if self.scriptState in [axscript.SCRIPTSTATE_UNINITIALIZED,axscript.SCRIPTSTATE_INITIALIZED]:
self.ChangeScriptState(axscript.SCRIPTSTATE_STARTED) # report transition through started
self.Run()
if self.scriptState == axscript.SCRIPTSTATE_STARTED:
self.Connect()
self.ChangeScriptState(state)
elif state==axscript.SCRIPTSTATE_DISCONNECTED:
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
elif state==axscript.SCRIPTSTATE_CLOSED:
self.Close()
elif state==axscript.SCRIPTSTATE_UNINITIALIZED:
if self.scriptState == axscript.SCRIPTSTATE_STARTED:
self.Stop()
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
if self.scriptState == axscript.SCRIPTSTATE_DISCONNECTED:
self.Reset()
self.ChangeScriptState(state)
else:
raise Exception(scode=winerror.E_INVALIDARG)
def GetScriptState(self):
return self.scriptState
def Close(self):
# trace("Close")
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED]:
self.Stop()
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED, axscript.SCRIPTSTATE_INITIALIZED, axscript.SCRIPTSTATE_STARTED]:
pass # engine.close??
if self.scriptState in [axscript.SCRIPTSTATE_UNINITIALIZED, axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED, axscript.SCRIPTSTATE_INITIALIZED, axscript.SCRIPTSTATE_STARTED]:
self.ChangeScriptState(axscript.SCRIPTSTATE_CLOSED)
# Completely reset all named items (including persistent)
for item in self.subItems.values():
item.Close()
self.subItems = {}
self.baseThreadId = -1
if self.debugManager:
self.debugManager.Close()
self.debugManager = None
self.scriptSite = None
self.scriptCodeBlocks = {}
self.persistLoaded = 0
def AddNamedItem(self, name, flags):
if self.scriptSite is None: raise Exception(scode=winerror.E_INVALIDARG)
try:
unknown = self.scriptSite.GetItemInfo(name, axscript.SCRIPTINFO_IUNKNOWN)[0]
dispatch = unknown.QueryInterface(pythoncom.IID_IDispatch)
except pythoncom.com_error:
raise Exception(scode=winerror.E_NOINTERFACE, desc="Object has no dispatch interface available.")
newItem = self.subItems[name] = self.GetNamedItemClass()(self, name, dispatch, flags)
if newItem.IsGlobal():
newItem.CreateConnections()
def GetScriptDispatch(self, name):
# Base classes should override.
raise Exception(scode=winerror.E_NOTIMPL)
def GetCurrentScriptThreadID(self):
return self.baseThreadId
def GetScriptThreadID(self, win32ThreadId):
if self.baseThreadId == -1:
raise Exception(scode=winerror.E_UNEXPECTED)
if self.baseThreadId != win32ThreadId:
raise Exception(scode=winerror.E_INVALIDARG)
return self.baseThreadId
def GetScriptThreadState(self, scriptThreadId):
if self.baseThreadId == -1:
raise Exception(scode=winerror.E_UNEXPECTED)
if scriptThreadId != self.baseThreadId:
raise Exception(scode=winerror.E_INVALIDARG)
return self.threadState
def AddTypeLib(self, uuid, major, minor, flags):
# Get the win32com gencache to register this library.
from win32com.client import gencache
gencache.EnsureModule(uuid, self.lcid, major, minor, bForDemand = 1)
# This is never called by the C++ framework - it does magic.
# See PyGActiveScript.cpp
#def InterruptScriptThread(self, stidThread, exc_info, flags):
# raise Exception("Not Implemented", scode=winerror.E_NOTIMPL)
def Clone(self):
raise Exception("Not Implemented", scode=winerror.E_NOTIMPL)
#
# IObjectSafety
# Note that IE seems to insist we say we support all the flags, even tho
# we dont accept them all. If unknown flags come in, they are ignored, and never
# reflected in GetInterfaceSafetyOptions and the QIs obviously fail, but still IE
# allows our engine to initialize.
def SetInterfaceSafetyOptions(self, iid, optionsMask, enabledOptions):
# trace ("SetInterfaceSafetyOptions", iid, optionsMask, enabledOptions)
if optionsMask & enabledOptions == 0:
return
# See comments above.
# if (optionsMask & enabledOptions & \
# ~(axscript.INTERFACESAFE_FOR_UNTRUSTED_DATA | axscript.INTERFACESAFE_FOR_UNTRUSTED_CALLER)):
# # request for options we don't understand
# RaiseAssert(scode=winerror.E_FAIL, desc="Unknown safety options")
if iid in [pythoncom.IID_IPersist, pythoncom.IID_IPersistStream, pythoncom.IID_IPersistStreamInit,
axscript.IID_IActiveScript, axscript.IID_IActiveScriptParse]:
supported = self._GetSupportedInterfaceSafetyOptions()
self.safetyOptions = supported & optionsMask & enabledOptions
else:
raise Exception(scode=winerror.E_NOINTERFACE)
def _GetSupportedInterfaceSafetyOptions(self):
return 0
def GetInterfaceSafetyOptions(self, iid):
if iid in [pythoncom.IID_IPersist, pythoncom.IID_IPersistStream, pythoncom.IID_IPersistStreamInit,
axscript.IID_IActiveScript, axscript.IID_IActiveScriptParse]:
supported = self._GetSupportedInterfaceSafetyOptions()
return supported, self.safetyOptions
else:
raise Exception(scode=winerror.E_NOINTERFACE)
#
# Other helpers.
def ExecutePendingScripts(self):
self.RegisterNewNamedItems()
self.DoExecutePendingScripts()
def ProcessScriptItemEvent(self, item, event, lcid, wFlags, args):
# trace("ProcessScriptItemEvent", item, event, lcid, wFlags, args)
self.RegisterNewNamedItems()
return self.DoProcessScriptItemEvent(item, event, lcid, wFlags, args)
def _DumpNamedItems_(self):
for item in self.subItems.values():
item._dump_(0)
def ResetNamedItems(self):
# Due to the way we work, we re-create persistent ones.
si = self.subItems.items()
self.subItems = {}
for name, item in si:
item.Close()
if item.flags & axscript.SCRIPTITEM_ISPERSISTENT:
self.AddNamedItem(item.name, item.flags)
def GetCurrentSafetyOptions(self):
return self.safetyOptions
def ProcessNewNamedItemsConnections(self):
# Process all sub-items.
for item in self.subItems.values():
if not item.createdConnections: # Fast-track!
item.CreateConnections()
def RegisterNewNamedItems(self):
# Register all sub-items.
for item in self.subItems.values():
if not item.isRegistered: # Fast-track!
self.RegisterNamedItem(item)
def RegisterNamedItem(self, item):
item.Register()
def CheckConnectedOrDisconnected(self):
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED]:
return
RaiseAssert(winerror.E_UNEXPECTED, "Not connected or disconnected - %d" % self.scriptState)
def Connect(self):
self.ProcessNewNamedItemsConnections()
self.RegisterNewNamedItems()
self.ConnectEventHandlers()
def Run(self):
# trace("AXScript running...")
if self.scriptState != axscript.SCRIPTSTATE_INITIALIZED and self.scriptState != axscript.SCRIPTSTATE_STARTED:
raise Exception(scode=winerror.E_UNEXPECTED)
# self._DumpNamedItems_()
self.ExecutePendingScripts()
self.DoRun()
def Stop(self):
# Stop all executing scripts, and disconnect.
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
# Reset back to initialized.
self.Reset()
def Disconnect(self):
self.CheckConnectedOrDisconnected()
try:
self.DisconnectEventHandlers()
except pythoncom.com_error:
# Ignore errors when disconnecting.
pass
self.ChangeScriptState(axscript.SCRIPTSTATE_DISCONNECTED)
def ConnectEventHandlers(self):
# trace ("Connecting to event handlers")
for item in self.subItems.values():
item.Connect()
self.ChangeScriptState(axscript.SCRIPTSTATE_CONNECTED);
def DisconnectEventHandlers(self):
# trace ("Disconnecting from event handlers")
for item in self.subItems.values():
item.Disconnect()
def Reset(self):
# Keeping persistent engine state, reset back an initialized state
self.ResetNamedItems()
self.ChangeScriptState(axscript.SCRIPTSTATE_INITIALIZED)
def ChangeScriptState(self, state):
#print " ChangeScriptState with %s - currentstate = %s" % (state_map.get(state),state_map.get(self.scriptState))
self.DisableInterrupts()
try:
self.scriptState = state
try:
if self.scriptSite: self.scriptSite.OnStateChange(state)
except pythoncom.com_error, (hr, desc, exc, arg):
pass # Ignore all errors here - E_NOTIMPL likely from scriptlets.
finally:
self.EnableInterrupts()
# This stack frame is debugged - therefore we do as little as possible in it.
def _ApplyInScriptedSection(self, fn, args):
if self.debugManager:
self.debugManager.OnEnterScript()
if self.debugManager.adb.appDebugger:
return self.debugManager.adb.runcall(fn, *args)
else:
return apply(fn, args)
else:
return apply(fn, args)
def ApplyInScriptedSection(self, codeBlock, fn, args):
self.BeginScriptedSection()
try:
try:
# print "ApplyInSS", codeBlock, fn, args
return self._ApplyInScriptedSection(fn, args)
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
# This stack frame is debugged - therefore we do as little as possible in it.
def _CompileInScriptedSection(self, code, name, type):
if self.debugManager: self.debugManager.OnEnterScript()
return compile(code, name, type)
def CompileInScriptedSection(self, codeBlock, type, realCode = None):
if codeBlock.codeObject is not None: # already compiled
return 1
if realCode is None:
code = codeBlock.codeText
else:
code = realCode
name = codeBlock.GetFileName()
self.BeginScriptedSection()
try:
try:
codeObject = self._CompileInScriptedSection(RemoveCR(code), name, type)
codeBlock.codeObject = codeObject
return 1
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
# This stack frame is debugged - therefore we do as little as possible in it.
def _ExecInScriptedSection(self, codeObject, globals, locals = None):
if self.debugManager:
self.debugManager.OnEnterScript()
if self.debugManager.adb.appDebugger:
return self.debugManager.adb.run(codeObject, globals, locals)
else:
exec codeObject in globals, locals
else:
exec codeObject in globals, locals
def ExecInScriptedSection(self, codeBlock, globals, locals = None):
if locals is None: locals = globals
assert not codeBlock.beenExecuted, "This code block should not have been executed"
codeBlock.beenExecuted = 1
self.BeginScriptedSection()
try:
try:
self._ExecInScriptedSection(codeBlock.codeObject, globals, locals)
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
def _EvalInScriptedSection(self, codeBlock, globals, locals = None):
if self.debugManager:
self.debugManager.OnEnterScript()
if self.debugManager.adb.appDebugger:
return self.debugManager.adb.runeval(codeBlock, globals, locals)
else:
return eval(codeBlock, globals, locals)
else:
return eval(codeBlock, globals, locals)
def EvalInScriptedSection(self, codeBlock, globals, locals = None):
if locals is None: locals = globals
assert not codeBlock.beenExecuted, "This code block should not have been executed"
codeBlock.beenExecuted = 1
self.BeginScriptedSection()
try:
try:
return self._EvalInScriptedSection(codeBlock.codeObject, globals, locals)
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
def HandleException(self, codeBlock):
# NOTE - Never returns - raises a ComException
exc_type, exc_value, exc_traceback = sys.exc_info()
# If a SERVER exception, re-raise it. If a client side COM error, it is
# likely to have originated from the script code itself, and therefore
# needs to be reported like any other exception.
if IsCOMServerException(exc_type):
# Ensure the traceback doesnt cause a cycle.
exc_traceback = None
raise
# It could be an error by another script.
if issubclass(pythoncom.com_error, exc_type) and exc_value[0]==axscript.SCRIPT_E_REPORTED:
# Ensure the traceback doesnt cause a cycle.
exc_traceback = None
raise Exception(scode=exc_value[0])
exception = error.AXScriptException(self, \
codeBlock, exc_type, exc_value, exc_traceback)
# Ensure the traceback doesnt cause a cycle.
exc_traceback = None
result_exception = error.ProcessAXScriptException(self.scriptSite, self.debugManager, exception)
if result_exception is not None:
try:
self.scriptSite.OnScriptTerminate(None, result_exception)
except pythoncom.com_error:
pass # Ignore errors telling engine we stopped.
# reset ourselves to 'connected' so further events continue to fire.
self.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
raise result_exception
# I think that in some cases this should just return - but the code
# that could return None above is disabled, so it never happens.
RaiseAssert(winerror.E_UNEXPECTED, "Don't have an exception to raise to the caller!")
def BeginScriptedSection(self):
if self.scriptSite is None:
raise Exception(E_UNEXPECTED)
self.scriptSite.OnEnterScript()
def EndScriptedSection(self):
if self.scriptSite is None:
raise Exception(E_UNEXPECTED)
self.scriptSite.OnLeaveScript()
def DisableInterrupts(self):
pass
def EnableInterrupts(self):
pass
def GetNamedItem(self, name):
try:
return self.subItems[name]
except KeyError:
raise Exception(scode=winerror.E_INVALIDARG)
def GetNamedItemClass(self):
return ScriptItem
def _AddScriptCodeBlock(self, codeBlock):
self.scriptCodeBlocks[codeBlock.GetFileName()] = codeBlock
if self.debugManager:
self.debugManager.AddScriptBlock(codeBlock)
if __name__=='__main__':
print "This is a framework class - please use pyscript.py etc"
def dumptypeinfo(typeinfo):
return
attr = typeinfo.GetTypeAttr()
# Loop over all methods
print "Methods"
for j in xrange(attr[6]):
fdesc = list(typeinfo.GetFuncDesc(j))
id = fdesc[0]
try:
names = typeinfo.GetNames(id)
except pythoncom.ole_error:
names = None
doc = typeinfo.GetDocumentation(id)
print " ", names, "has attr", fdesc
# Loop over all variables (ie, properties)
print "Variables"
for j in xrange(attr[7]):
fdesc = list(typeinfo.GetVarDesc(j))
names = typeinfo.GetNames(id)
print " ", names, "has attr", fdesc
|
leighpauls/k2cro4
|
third_party/python_26/Lib/site-packages/win32comext/axscript/client/framework.py
|
Python
|
bsd-3-clause
| 36,696
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator, ValidationResult
from ansible.module_utils.errors import AnsibleValidationErrorMultiple
from ansible.module_utils.six import PY2
# Each item is id, argument_spec, parameters, expected, unsupported parameters, error test string
INVALID_SPECS = [
(
'invalid-list',
{'packages': {'type': 'list'}},
{'packages': {'key': 'value'}},
{'packages': {'key': 'value'}},
set(),
"unable to convert to list: <class 'dict'> cannot be converted to a list",
),
(
'invalid-dict',
{'users': {'type': 'dict'}},
{'users': ['one', 'two']},
{'users': ['one', 'two']},
set(),
"unable to convert to dict: <class 'list'> cannot be converted to a dict",
),
(
'invalid-bool',
{'bool': {'type': 'bool'}},
{'bool': {'k': 'v'}},
{'bool': {'k': 'v'}},
set(),
"unable to convert to bool: <class 'dict'> cannot be converted to a bool",
),
(
'invalid-float',
{'float': {'type': 'float'}},
{'float': 'hello'},
{'float': 'hello'},
set(),
"unable to convert to float: <class 'str'> cannot be converted to a float",
),
(
'invalid-bytes',
{'bytes': {'type': 'bytes'}},
{'bytes': 'one'},
{'bytes': 'one'},
set(),
"unable to convert to bytes: <class 'str'> cannot be converted to a Byte value",
),
(
'invalid-bits',
{'bits': {'type': 'bits'}},
{'bits': 'one'},
{'bits': 'one'},
set(),
"unable to convert to bits: <class 'str'> cannot be converted to a Bit value",
),
(
'invalid-jsonargs',
{'some_json': {'type': 'jsonarg'}},
{'some_json': set()},
{'some_json': set()},
set(),
"unable to convert to jsonarg: <class 'set'> cannot be converted to a json string",
),
(
'invalid-parameter',
{'name': {}},
{
'badparam': '',
'another': '',
},
{
'name': None,
'badparam': '',
'another': '',
},
set(('another', 'badparam')),
"another, badparam. Supported parameters include: name.",
),
(
'invalid-elements',
{'numbers': {'type': 'list', 'elements': 'int'}},
{'numbers': [55, 33, 34, {'key': 'value'}]},
{'numbers': [55, 33, 34]},
set(),
"Elements value for option 'numbers' is of type <class 'dict'> and we were unable to convert to int: <class 'dict'> cannot be converted to an int"
),
(
'required',
{'req': {'required': True}},
{},
{'req': None},
set(),
"missing required arguments: req"
)
]
@pytest.mark.parametrize(
('arg_spec', 'parameters', 'expected', 'unsupported', 'error'),
(i[1:] for i in INVALID_SPECS),
ids=[i[0] for i in INVALID_SPECS]
)
def test_invalid_spec(arg_spec, parameters, expected, unsupported, error):
v = ArgumentSpecValidator(arg_spec)
result = v.validate(parameters)
with pytest.raises(AnsibleValidationErrorMultiple) as exc_info:
raise result.errors
if PY2:
error = error.replace('class', 'type')
assert isinstance(result, ValidationResult)
assert error in exc_info.value.msg
assert error in result.error_messages[0]
assert result.unsupported_parameters == unsupported
assert result.validated_parameters == expected
|
privateip/ansible
|
test/units/module_utils/common/arg_spec/test_validate_invalid.py
|
Python
|
gpl-3.0
| 3,830
|
# Copyright 2011 Shinichiro Hamaji. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY Shinichiro Hamaji ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Shinichiro Hamaji OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import gdb
import os
import re
import sys
def bt(demangle=True):
# Find the newest frame.
frame = gdb.selected_frame()
while True:
next = frame.newer()
if not next:
break
frame = next
if demangle:
pipe = os.popen('c++filt', 'w')
else:
pipe = sys.stdout
i = 0
while frame:
s = gdb.execute('p dumpSymbol((void*)0x%x)' % frame.pc(),
to_string=True)
m = re.match(r'.*"(.*)"$', s)
if m:
pipe.write("#%-2d %s\n" % (i, m.group(1)))
else:
sal = frame.find_sal()
lineno = ''
if sal.symtab:
lineno = 'at %s:%d' % (sal.symtab, sal.line)
else:
soname = gdb.solib_name(frame.pc())
if soname:
lineno = 'from %s' % (soname)
framename = frame.name()
if not framename:
framename = '??'
pipe.write("#%-2d 0x%016x in %s () %s\n" %
(i, frame.pc(), framename, lineno))
frame = frame.older()
i += 1
pipe.close()
|
lebauce/darling
|
tools/gdb_maloader.py
|
Python
|
gpl-3.0
| 2,497
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, an open source suite of business apps
# This module copyright (C) 2014-2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Reset a chart of accounts",
"summary": ("Delete the accounting setup from an otherwise reusable "
"database"),
"version": "1.0",
"author": "Therp BV,Odoo Community Association (OCA)",
"category": 'Accounting & Finance',
"depends": [
'account',
],
'license': 'AGPL-3'
}
|
amoya-dx/account-financial-tools
|
account_reset_chart/__openerp__.py
|
Python
|
agpl-3.0
| 1,329
|
import os
from django.contrib.auth import authenticate
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.context_processors import PermWrapper, PermLookupDict
from django.db.models import Q
from django.test import TestCase, override_settings
from django.utils._os import upath
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(TestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertTrue('mockapp' in perms)
self.assertFalse('nonexisting' in perms)
self.assertTrue('mockapp.someperm' in perms)
self.assertFalse('mockapp.nonexisting' in perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
@skipIfCustomUser
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
ROOT_URLCONF='django.contrib.auth.tests.urls',
USE_TZ=False, # required for loading the fixture
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
TEMPLATE_CONTEXT_PROCESSORS=(
'django.contrib.auth.context_processors.auth',
),
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
TEMPLATE_CONTEXT_PROCESSORS=(
'django.contrib.auth.context_processors.auth',
),
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
|
simbha/mAngE-Gin
|
lib/django/contrib/auth/tests/test_context_processors.py
|
Python
|
mit
| 7,020
|
#!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_port_cos_bw
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to modify port-cos-bw
description:
- This module can be used to update bw settings for CoS queues.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(update) to modify the port-cos-bw.
required: True
type: str
choices: ['update']
pn_max_bw_limit:
description:
- Maximum b/w in percentage.
required: False
type: str
pn_cos:
description:
- CoS priority.
required: False
type: str
pn_port:
description:
- physical port number.
required: False
type: str
pn_weight:
description:
- Scheduling weight (1 to 127) after b/w guarantee met.
required: False
type: str
choices: ['priority', 'no-priority']
pn_min_bw_guarantee:
description:
- Minimum b/w in precentage.
required: False
type: str
"""
EXAMPLES = """
- name: port cos bw modify
pn_port_cos_bw:
pn_cliswitch: "sw01"
state: "update"
pn_port: "1"
pn_cos: "0"
pn_min_bw_guarantee: "60"
- name: port cos bw modify
pn_port_cos_bw:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_cos: "0"
pn_weight: "priority"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the port-cos-bw command.
returned: always
type: list
stderr:
description: set of error responses from the port-cos-bw command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
def main():
""" This section is for arguments parsing """
state_map = dict(
update='port-cos-bw-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_max_bw_limit=dict(required=False, type='str'),
pn_cos=dict(required=False, type='str'),
pn_port=dict(required=False, type='str'),
pn_weight=dict(required=False, type='str',
choices=['priority', 'no-priority']),
pn_min_bw_guarantee=dict(required=False, type='str'),
),
required_if=(
['state', 'update', ['pn_cos', 'pn_port']],
),
required_one_of=[['pn_max_bw_limit', 'pn_min_bw_guarantee', 'pn_weight']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
max_bw_limit = module.params['pn_max_bw_limit']
cos = module.params['pn_cos']
port = module.params['pn_port']
weight = module.params['pn_weight']
min_bw_guarantee = module.params['pn_min_bw_guarantee']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if command == 'port-cos-bw-modify':
cli += ' %s ' % command
if max_bw_limit:
cli += ' max-bw-limit ' + max_bw_limit
if cos:
cli += ' cos ' + cos
if port:
cli += ' port ' + port
if weight:
cli += ' weight ' + weight
if min_bw_guarantee:
cli += ' min-bw-guarantee ' + min_bw_guarantee
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/netvisor/pn_port_cos_bw.py
|
Python
|
gpl-3.0
| 4,163
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from django.db import connection
from taiga.projects import models
def update_projects_order_in_bulk(bulk_data:list, field:str, user):
"""
Update the order of user projects in the user membership.
`bulk_data` should be a list of tuples with the following format:
[(<project id>, {<field>: <value>, ...}), ...]
"""
membership_ids = []
new_order_values = []
for membership_data in bulk_data:
project_id = membership_data["project_id"]
membership = user.memberships.get(project_id=project_id)
membership_ids.append(membership.id)
new_order_values.append({field: membership_data["order"]})
from taiga.base.utils import db
db.update_in_bulk_with_ids(membership_ids, new_order_values, model=models.Membership)
@transaction.atomic
def bulk_update_userstory_status_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_userstorystatus set "order" = $1
where projects_userstorystatus.id = $2 and
projects_userstorystatus.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_points_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_points set "order" = $1
where projects_points.id = $2 and
projects_points.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_task_status_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_taskstatus set "order" = $1
where projects_taskstatus.id = $2 and
projects_taskstatus.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_status_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_issuestatus set "order" = $1
where projects_issuestatus.id = $2 and
projects_issuestatus.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_type_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_issuetype set "order" = $1
where projects_issuetype.id = $2 and
projects_issuetype.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_priority_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_priority set "order" = $1
where projects_priority.id = $2 and
projects_priority.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_severity_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_severity set "order" = $1
where projects_severity.id = $2 and
projects_severity.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
|
WALR/taiga-back
|
taiga/projects/services/bulk_update_order.py
|
Python
|
agpl-3.0
| 5,428
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class FileToGoogleCloudStorageOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage
:param src: Path to the local file
:type src: string
:param dst: Destination path within the specified bucket
:type dst: string
:param bucket: The bucket to upload to
:type bucket: string
:param google_cloud_storage_conn_id: The Airflow connection ID to upload with
:type google_cloud_storage_conn_id: string
:param mime_type: The mime-type string
:type mime_type: string
:param delegate_to: The account to impersonate, if any
:type delegate_to: string
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
google_cloud_storage_conn_id='google_cloud_storage_default',
mime_type='application/octet-stream',
delegate_to=None,
*args,
**kwargs):
super(FileToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.src = src
self.dst = dst
self.bucket = bucket
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
def execute(self, context):
"""
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket=self.bucket,
object=self.dst,
mime_type=self.mime_type,
filename=self.src)
|
MetrodataTeam/incubator-airflow
|
airflow/contrib/operators/file_to_gcs.py
|
Python
|
apache-2.0
| 2,453
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from profile_creators import cookie_profile_extender
from profile_creators import history_profile_extender
from profile_creators import profile_extender
class LargeProfileExtender(profile_extender.ProfileExtender):
"""This class creates a large profile by performing a large number of url
navigations."""
def Run(self):
extender = history_profile_extender.HistoryProfileExtender(
self.finder_options)
extender.Run()
extender = cookie_profile_extender.CookieProfileExtender(
self.finder_options)
extender.Run()
|
guorendong/iridium-browser-ubuntu
|
tools/perf/profile_creators/large_profile_extender.py
|
Python
|
bsd-3-clause
| 713
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-08 09:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', tinymce.models.HTMLField(blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', tinymce.models.HTMLField()),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('Subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threads', to='threads.Subject')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threads', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='post',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='threads.Thread'),
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL),
),
]
|
GunnerJnr/_CodeInstitute
|
Stream-3/Full-Stack-Development/20.Deployment/4.Populating-our-database/we_are_social/threads/migrations/0001_initial.py
|
Python
|
mit
| 2,276
|
from __future__ import unicode_literals
import copy
import sys
from functools import update_wrapper
from django.utils.six.moves import zip
import django.db.models.manager # Imported to register signal handler.
from django.conf import settings
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db import (router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.text import get_text_list, capfirst
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'),
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,),
module, attached_to=new_class))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,),
module, attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if not six.PY3 and hasattr(self, '__unicode__'):
if type(self).__unicode__ == Model.__str__:
klass_name = type(self).__name__
raise RuntimeError("%s.__unicode__ is aliased to __str__. Did"
" you apply @python_2_unicode_compatible"
" without defining __str__?" % klass_name)
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
if not self._deferred:
return super(Model, self).__reduce__()
data = self.__dict__
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
return (model_unpickle, (model, defers), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if f.attname not in self.__dict__
and isinstance(self.__class__.__dict__[f.attname],
DeferredAttribute)]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using,
update_fields=update_fields)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy, we
# set attname directly, bypassing the descriptor.
# Invalidate the related object cache, in case it's been
# accidentally populated. A fresh instance will be
# re-built from the database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
if pk_set:
# Determine if we should do an update (pk already exists, forced update,
# no force_insert)
if ((force_update or update_fields) or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
if values:
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not rows:
raise DatabaseError("Save with update_fields did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_fields
if not pk_set:
if force_update or update_fields:
raise ValueError("Cannot force an update in save() with no primary key.")
fields = [f for f in fields if not isinstance(f, AutoField)]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not record_exists),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)),
'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field = opts.get_field(field_name)
field_label = capfirst(field.verbose_name)
# Insert the error into the error dict, very sneaky
return field.error_messages['unique'] % {
'model_name': six.text_type(model_name),
'field_label': six.text_type(field_label)
}
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
field_labels = get_text_list(field_labels, _('and'))
return _("%(model_name)s with this %(field_label)s already exists.") % {
'model_name': six.text_type(model_name),
'field_label': six.text_type(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def model_unpickle(model, attrs):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = deferred_class_factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
|
havard024/prego
|
venv/lib/python2.7/site-packages/django/db/models/base.py
|
Python
|
mit
| 44,041
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create the piplinee, ball and spikes
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(7)
sphere.SetPhiResolution(7)
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor2 = vtk.vtkActor()
sphereActor2.SetMapper(sphereMapper)
cone = vtk.vtkConeSource()
cone.SetResolution(5)
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(sphere.GetOutputPort())
glyph.SetSourceConnection(cone.GetOutputPort())
glyph.SetVectorModeToUseNormal()
glyph.SetScaleModeToScaleByVector()
glyph.SetScaleFactor(0.25)
spikeMapper = vtk.vtkPolyDataMapper()
spikeMapper.SetInputConnection(glyph.GetOutputPort())
spikeActor = vtk.vtkActor()
spikeActor.SetMapper(spikeMapper)
spikeActor2 = vtk.vtkActor()
spikeActor2.SetMapper(spikeMapper)
# set the actors position and scale
spikeActor.SetPosition(0,0.7,0)
sphereActor.SetPosition(0,0.7,0)
spikeActor2.SetPosition(0,-1,-10)
sphereActor2.SetPosition(0,-1,-10)
spikeActor2.SetScale(1.5,1.5,1.5)
sphereActor2.SetScale(1.5,1.5,1.5)
ren1.AddActor(sphereActor)
ren1.AddActor(spikeActor)
ren1.AddActor(sphereActor2)
ren1.AddActor(spikeActor2)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(200,200)
# do the first render and then zoom in a little
renWin.Render()
ren1.GetActiveCamera().SetFocalPoint(0,0,0)
ren1.GetActiveCamera().Zoom(1.8)
ren1.GetActiveCamera().SetFocalDisk(0.05)
renWin.SetFDFrames(11)
renWin.Render()
iren.Initialize()
#renWin SetFileName CamBlur.tcl.ppm
#renWin SaveImageAsPPM
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/CamBlur.py
|
Python
|
gpl-3.0
| 1,969
|
# -*- coding: utf-8 -*-
from . import test_convert
from . import test_env
|
ddico/odoo
|
odoo/addons/test_convert/tests/__init__.py
|
Python
|
agpl-3.0
| 74
|
"""
This module contains a set of functions for vectorized string
operations and methods.
.. note::
The `chararray` class exists for backwards compatibility with
Numarray, it is not recommended for new development. Starting from numpy
1.4, if one needs arrays of strings, it is recommended to use arrays of
`dtype` `object_`, `string_` or `unicode_`, and use the free functions
in the `numpy.char` module for fast vectorized string operations.
Some methods will only be available if the corresponding string method is
available in your version of Python.
The preferred alias for `defchararray` is `numpy.char`.
"""
from __future__ import division, absolute_import, print_function
import sys
from .numerictypes import string_, unicode_, integer, object_, bool_, character
from .numeric import ndarray, compare_chararrays
from .numeric import array as narray
from numpy.core.multiarray import _vec_string
from numpy.compat import asbytes, long
import numpy
__all__ = [
'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal',
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
'array', 'asarray'
]
_globalvar = 0
if sys.version_info[0] >= 3:
_unicode = str
_bytes = bytes
else:
_unicode = unicode
_bytes = str
_len = len
def _use_unicode(*args):
"""
Helper function for determining the output type of some string
operations.
For an operation on two ndarrays, if at least one is unicode, the
result should be unicode.
"""
for x in args:
if (isinstance(x, _unicode) or
issubclass(numpy.asarray(x).dtype.type, unicode_)):
return unicode_
return string_
def _to_string_or_unicode_array(result):
"""
Helper function to cast a result back into a string or unicode array
if an object array must be used as an intermediary.
"""
return numpy.asarray(result.tolist())
def _clean_args(*args):
"""
Helper function for delegating arguments to Python string
functions.
Many of the Python string operations that have optional arguments
do not use 'None' to indicate a default value. In these cases,
we need to remove all `None` arguments, and those following them.
"""
newargs = []
for chk in args:
if chk is None:
break
newargs.append(chk)
return newargs
def _get_num_chars(a):
"""
Helper function that returns the number of characters per field in
a string or unicode array. This is to abstract out the fact that
for a unicode array this is itemsize / 4.
"""
if issubclass(a.dtype.type, unicode_):
return a.itemsize // 4
return a.itemsize
def equal(x1, x2):
"""
Return (x1 == x2) element-wise.
Unlike `numpy.equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '==', True)
def not_equal(x1, x2):
"""
Return (x1 != x2) element-wise.
Unlike `numpy.not_equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, greater_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '!=', True)
def greater_equal(x1, x2):
"""
Return (x1 >= x2) element-wise.
Unlike `numpy.greater_equal`, this comparison is performed by
first stripping whitespace characters from the end of the string.
This behavior is provided for backward-compatibility with
numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '>=', True)
def less_equal(x1, x2):
"""
Return (x1 <= x2) element-wise.
Unlike `numpy.less_equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, greater, less
"""
return compare_chararrays(x1, x2, '<=', True)
def greater(x1, x2):
"""
Return (x1 > x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, less_equal, less
"""
return compare_chararrays(x1, x2, '>', True)
def less(x1, x2):
"""
Return (x1 < x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, less_equal, greater
"""
return compare_chararrays(x1, x2, '<', True)
def str_len(a):
"""
Return len(a) element-wise.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of integers
See also
--------
__builtin__.len
"""
return _vec_string(a, integer, '__len__')
def add(x1, x2):
"""
Return element-wise string concatenation for two arrays of str or unicode.
Arrays `x1` and `x2` must have the same shape.
Parameters
----------
x1 : array_like of str or unicode
Input array.
x2 : array_like of str or unicode
Input array.
Returns
-------
add : ndarray
Output array of `string_` or `unicode_`, depending on input types
of the same shape as `x1` and `x2`.
"""
arr1 = numpy.asarray(x1)
arr2 = numpy.asarray(x2)
out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
dtype = _use_unicode(arr1, arr2)
return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))
def multiply(a, i):
"""
Return (a * i), that is string multiple concatenation,
element-wise.
Values in `i` of less than 0 are treated as 0 (which yields an
empty string).
Parameters
----------
a : array_like of str or unicode
i : array_like of ints
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
"""
a_arr = numpy.asarray(a)
i_arr = numpy.asarray(i)
if not issubclass(i_arr.dtype.type, integer):
raise ValueError("Can only multiply by integers")
out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0)
return _vec_string(
a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
def mod(a, values):
"""
Return (a % i), that is pre-Python 2.6 string formatting
(iterpolation), element-wise for a pair of array_likes of str
or unicode.
Parameters
----------
a : array_like of str or unicode
values : array_like of values
These values will be element-wise interpolated into the string.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
See also
--------
str.__mod__
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, '__mod__', (values,)))
def capitalize(a):
"""
Return a copy of `a` with only the first character of each element
capitalized.
Calls `str.capitalize` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Input array of strings to capitalize.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input
types
See also
--------
str.capitalize
Examples
--------
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
array(['a1b2', '1b2a', 'b2a1', '2a1b'],
dtype='|S4')
>>> np.char.capitalize(c)
array(['A1b2', '1b2a', 'B2a1', '2a1b'],
dtype='|S4')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
def center(a, width, fillchar=' '):
"""
Return a copy of `a` with its elements centered in a string of
length `width`.
Calls `str.center` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The padding character to use (default is space).
Returns
-------
out : ndarray
Output array of str or unicode, depending on input
types
See also
--------
str.center
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
def count(a, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
substring `sub` in the range [`start`, `end`].
Calls `str.count` element-wise.
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
The substring to search for.
start, end : int, optional
Optional arguments `start` and `end` are interpreted as slice
notation to specify the range in which to count.
Returns
-------
out : ndarray
Output array of ints.
See also
--------
str.count
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> np.char.count(c, 'A')
array([3, 1, 1])
>>> np.char.count(c, 'aA')
array([3, 1, 0])
>>> np.char.count(c, 'A', start=1, end=4)
array([2, 1, 1])
>>> np.char.count(c, 'A', start=1, end=3)
array([1, 0, 0])
"""
return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))
def decode(a, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
The set of available codecs comes from the Python standard library,
and may be extended at runtime. For more information, see the
:mod:`codecs` module.
Parameters
----------
a : array_like of str or unicode
encoding : str, optional
The name of an encoding
errors : str, optional
Specifies how to handle encoding errors
Returns
-------
out : ndarray
See also
--------
str.decode
Notes
-----
The type of the result will depend on the encoding specified.
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> np.char.encode(c, encoding='cp037')
array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@',
'\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'],
dtype='|S7')
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
def encode(a, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
The set of available codecs comes from the Python standard library,
and may be extended at runtime. For more information, see the codecs
module.
Parameters
----------
a : array_like of str or unicode
encoding : str, optional
The name of an encoding
errors : str, optional
Specifies how to handle encoding errors
Returns
-------
out : ndarray
See also
--------
str.encode
Notes
-----
The type of the result will depend on the encoding specified.
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
def endswith(a, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `a` ends with `suffix`, otherwise `False`.
Calls `str.endswith` element-wise.
Parameters
----------
a : array_like of str or unicode
suffix : str
start, end : int, optional
With optional `start`, test beginning at that position. With
optional `end`, stop comparing at that position.
Returns
-------
out : ndarray
Outputs an array of bools.
See also
--------
str.endswith
Examples
--------
>>> s = np.array(['foo', 'bar'])
>>> s[0] = 'foo'
>>> s[1] = 'bar'
>>> s
array(['foo', 'bar'],
dtype='|S3')
>>> np.char.endswith(s, 'ar')
array([False, True], dtype=bool)
>>> np.char.endswith(s, 'a', start=1, end=2)
array([False, True], dtype=bool)
"""
return _vec_string(
a, bool_, 'endswith', [suffix, start] + _clean_args(end))
def expandtabs(a, tabsize=8):
"""
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
Calls `str.expandtabs` element-wise.
Return a copy of each string element where all tab characters are
replaced by one or more spaces, depending on the current column
and the given `tabsize`. The column number is reset to zero after
each newline occurring in the string. This doesn't understand other
non-printing characters or escape sequences.
Parameters
----------
a : array_like of str or unicode
Input array
tabsize : int, optional
Replace tabs with `tabsize` number of spaces. If not given defaults
to 8 spaces.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.expandtabs
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'expandtabs', (tabsize,)))
def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring `sub` is found.
Calls `str.find` element-wise.
For each element, return the lowest index in the string where
substring `sub` is found, such that `sub` is contained in the
range [`start`, `end`].
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray or int
Output array of ints. Returns -1 if `sub` is not found.
See also
--------
str.find
"""
return _vec_string(
a, integer, 'find', [sub, start] + _clean_args(end))
def index(a, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
Calls `str.index` element-wise.
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Returns
-------
out : ndarray
Output array of ints. Returns -1 if `sub` is not found.
See also
--------
find, str.find
"""
return _vec_string(
a, integer, 'index', [sub, start] + _clean_args(end))
def isalnum(a):
"""
Returns true for each element if all characters in the string are
alphanumeric and there is at least one character, false otherwise.
Calls `str.isalnum` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.isalnum
"""
return _vec_string(a, bool_, 'isalnum')
def isalpha(a):
"""
Returns true for each element if all characters in the string are
alphabetic and there is at least one character, false otherwise.
Calls `str.isalpha` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isalpha
"""
return _vec_string(a, bool_, 'isalpha')
def isdigit(a):
"""
Returns true for each element if all characters in the string are
digits and there is at least one character, false otherwise.
Calls `str.isdigit` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isdigit
"""
return _vec_string(a, bool_, 'isdigit')
def islower(a):
"""
Returns true for each element if all cased characters in the
string are lowercase and there is at least one cased character,
false otherwise.
Calls `str.islower` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.islower
"""
return _vec_string(a, bool_, 'islower')
def isspace(a):
"""
Returns true for each element if there are only whitespace
characters in the string and there is at least one character,
false otherwise.
Calls `str.isspace` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isspace
"""
return _vec_string(a, bool_, 'isspace')
def istitle(a):
"""
Returns true for each element if the element is a titlecased
string and there is at least one character, false otherwise.
Call `str.istitle` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.istitle
"""
return _vec_string(a, bool_, 'istitle')
def isupper(a):
"""
Returns true for each element if all cased characters in the
string are uppercase and there is at least one character, false
otherwise.
Call `str.isupper` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isupper
"""
return _vec_string(a, bool_, 'isupper')
def join(sep, seq):
"""
Return a string which is the concatenation of the strings in the
sequence `seq`.
Calls `str.join` element-wise.
Parameters
----------
sep : array_like of str or unicode
seq : array_like of str or unicode
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
See also
--------
str.join
"""
return _to_string_or_unicode_array(
_vec_string(sep, object_, 'join', (seq,)))
def ljust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` left-justified in a
string of length `width`.
Calls `str.ljust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.ljust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
def lower(a):
"""
Return an array with the elements converted to lowercase.
Call `str.lower` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.lower
Examples
--------
>>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
array(['A1B C', '1BCA', 'BCA1'],
dtype='|S5')
>>> np.char.lower(c)
array(['a1b c', '1bca', 'bca1'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lower')
def lstrip(a, chars=None):
"""
For each element in `a`, return a copy with the leading characters
removed.
Calls `str.lstrip` element-wise.
Parameters
----------
a : array-like, {str, unicode}
Input array.
chars : {str, unicode}, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a prefix; rather, all combinations of its values are
stripped.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.lstrip
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
The 'a' variable is unstripped from c[1] because whitespace leading.
>>> np.char.lstrip(c, 'a')
array(['AaAaA', ' aA ', 'bBABba'],
dtype='|S7')
>>> np.char.lstrip(c, 'A') # leaves c unchanged
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
... # XXX: is this a regression? this line now returns False
... # np.char.lstrip(c,'') does not modify c at all.
True
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
True
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
def partition(a, sep):
"""
Partition each element in `a` around `sep`.
Calls `str.partition` element-wise.
For each element in `a`, split the element as the first
occurrence of `sep`, and return 3 strings containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, return 3 strings
containing the string itself, followed by two empty strings.
Parameters
----------
a : array_like, {str, unicode}
Input array
sep : {str, unicode}
Separator to split each string element in `a`.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type.
The output array will have an extra dimension with 3
elements per input element.
See also
--------
str.partition
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'partition', (sep,)))
def replace(a, old, new, count=None):
"""
For each element in `a`, return a copy of the string with all
occurrences of substring `old` replaced by `new`.
Calls `str.replace` element-wise.
Parameters
----------
a : array-like of str or unicode
old, new : str or unicode
count : int, optional
If the optional argument `count` is given, only the first
`count` occurrences are replaced.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.replace
"""
return _to_string_or_unicode_array(
_vec_string(
a, object_, 'replace', [old, new] + _clean_args(count)))
def rfind(a, sub, start=0, end=None):
"""
For each element in `a`, return the highest index in the string
where substring `sub` is found, such that `sub` is contained
within [`start`, `end`].
Calls `str.rfind` element-wise.
Parameters
----------
a : array-like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray
Output array of ints. Return -1 on failure.
See also
--------
str.rfind
"""
return _vec_string(
a, integer, 'rfind', [sub, start] + _clean_args(end))
def rindex(a, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
not found.
Calls `str.rindex` element-wise.
Parameters
----------
a : array-like of str or unicode
sub : str or unicode
start, end : int, optional
Returns
-------
out : ndarray
Output array of ints.
See also
--------
rfind, str.rindex
"""
return _vec_string(
a, integer, 'rindex', [sub, start] + _clean_args(end))
def rjust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` right-justified in a
string of length `width`.
Calls `str.rjust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.rjust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
def rpartition(a, sep):
"""
Partition (split) each element around the right-most separator.
Calls `str.rpartition` element-wise.
For each element in `a`, split the element as the last
occurrence of `sep`, and return 3 strings containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, return 3 strings
containing the string itself, followed by two empty strings.
Parameters
----------
a : array_like of str or unicode
Input array
sep : str or unicode
Right-most separator to split each element in array.
Returns
-------
out : ndarray
Output array of string or unicode, depending on input
type. The output array will have an extra dimension with
3 elements per input element.
See also
--------
str.rpartition
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'rpartition', (sep,)))
def rsplit(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls `str.rsplit` element-wise.
Except for splitting from the right, `rsplit`
behaves like `split`.
Parameters
----------
a : array_like of str or unicode
sep : str or unicode, optional
If `sep` is not specified or `None`, any whitespace string
is a separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done,
the rightmost ones.
Returns
-------
out : ndarray
Array of list objects
See also
--------
str.rsplit, split
"""
# This will return an array of lists of different sizes, so we
# leave it as an object array
return _vec_string(
a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
def rstrip(a, chars=None):
"""
For each element in `a`, return a copy with the trailing
characters removed.
Calls `str.rstrip` element-wise.
Parameters
----------
a : array-like of str or unicode
chars : str or unicode, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a suffix; rather, all combinations of its values are
stripped.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.rstrip
Examples
--------
>>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
array(['aAaAaA', 'abBABba'],
dtype='|S7')
>>> np.char.rstrip(c, 'a')
array(['aAaAaA', 'abBABb'],
dtype='|S7')
>>> np.char.rstrip(c, 'A')
array(['aAaAa', 'abBABba'],
dtype='|S7')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
def split(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls `str.split` element-wise.
Parameters
----------
a : array_like of str or unicode
sep : str or unicode, optional
If `sep` is not specified or `None`, any whitespace string is a
separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done.
Returns
-------
out : ndarray
Array of list objects
See also
--------
str.split, rsplit
"""
# This will return an array of lists of different sizes, so we
# leave it as an object array
return _vec_string(
a, object_, 'split', [sep] + _clean_args(maxsplit))
def splitlines(a, keepends=None):
"""
For each element in `a`, return a list of the lines in the
element, breaking at line boundaries.
Calls `str.splitlines` element-wise.
Parameters
----------
a : array_like of str or unicode
keepends : bool, optional
Line breaks are not included in the resulting list unless
keepends is given and true.
Returns
-------
out : ndarray
Array of list objects
See also
--------
str.splitlines
"""
return _vec_string(
a, object_, 'splitlines', _clean_args(keepends))
def startswith(a, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `a` starts with `prefix`, otherwise `False`.
Calls `str.startswith` element-wise.
Parameters
----------
a : array_like of str or unicode
prefix : str
start, end : int, optional
With optional `start`, test beginning at that position. With
optional `end`, stop comparing at that position.
Returns
-------
out : ndarray
Array of booleans
See also
--------
str.startswith
"""
return _vec_string(
a, bool_, 'startswith', [prefix, start] + _clean_args(end))
def strip(a, chars=None):
"""
For each element in `a`, return a copy with the leading and
trailing characters removed.
Calls `str.strip` element-wise.
Parameters
----------
a : array-like of str or unicode
chars : str or unicode, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a prefix or suffix; rather, all combinations of its
values are stripped.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.strip
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> np.char.strip(c)
array(['aAaAaA', 'aA', 'abBABba'],
dtype='|S7')
>>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
array(['AaAaA', ' aA ', 'bBABb'],
dtype='|S7')
>>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
array(['aAaAa', ' aA ', 'abBABba'],
dtype='|S7')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
def swapcase(a):
"""
Return element-wise a copy of the string with
uppercase characters converted to lowercase and vice versa.
Calls `str.swapcase` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.swapcase
Examples
--------
>>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
dtype='|S5')
>>> np.char.swapcase(c)
array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'swapcase')
def title(a):
"""
Return element-wise title cased version of string or unicode.
Title case words start with uppercase characters, all remaining cased
characters are lowercase.
Calls `str.title` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.title
Examples
--------
>>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
dtype='|S5')
>>> np.char.title(c)
array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'title')
def translate(a, table, deletechars=None):
"""
For each element in `a`, return a copy of the string where all
characters occurring in the optional argument `deletechars` are
removed, and the remaining characters have been mapped through the
given translation table.
Calls `str.translate` element-wise.
Parameters
----------
a : array-like of str or unicode
table : str of length 256
deletechars : str
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.translate
"""
a_arr = numpy.asarray(a)
if issubclass(a_arr.dtype.type, unicode_):
return _vec_string(
a_arr, a_arr.dtype, 'translate', (table,))
else:
return _vec_string(
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
def upper(a):
"""
Return an array with the elements converted to uppercase.
Calls `str.upper` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.upper
Examples
--------
>>> c = np.array(['a1b c', '1bca', 'bca1']); c
array(['a1b c', '1bca', 'bca1'],
dtype='|S5')
>>> np.char.upper(c)
array(['A1B C', '1BCA', 'BCA1'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'upper')
def zfill(a, width):
"""
Return the numeric string left-filled with zeros
Calls `str.zfill` element-wise.
Parameters
----------
a : array_like, {str, unicode}
Input array.
width : int
Width of string to left-fill elements in `a`.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.zfill
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
def isnumeric(a):
"""
For each element, return True if there are only numeric
characters in the element.
Calls `unicode.isnumeric` element-wise.
Numeric characters include digit characters, and all characters
that have the Unicode numeric value property, e.g. ``U+2155,
VULGAR FRACTION ONE FIFTH``.
Parameters
----------
a : array_like, unicode
Input array.
Returns
-------
out : ndarray, bool
Array of booleans of same shape as `a`.
See also
--------
unicode.isnumeric
"""
if _use_unicode(a) != unicode_:
raise TypeError("isnumeric is only available for Unicode strings and arrays")
return _vec_string(a, bool_, 'isnumeric')
def isdecimal(a):
"""
For each element, return True if there are only decimal
characters in the element.
Calls `unicode.isdecimal` element-wise.
Decimal characters include digit characters, and all characters
that that can be used to form decimal-radix numbers,
e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
Parameters
----------
a : array_like, unicode
Input array.
Returns
-------
out : ndarray, bool
Array of booleans identical in shape to `a`.
See also
--------
unicode.isdecimal
"""
if _use_unicode(a) != unicode_:
raise TypeError("isnumeric is only available for Unicode strings and arrays")
return _vec_string(a, bool_, 'isdecimal')
class chararray(ndarray):
"""
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
strides=None, order=None)
Provides a convenient view on arrays of string and unicode values.
.. note::
The `chararray` class exists for backwards compatibility with
Numarray, it is not recommended for new development. Starting from numpy
1.4, if one needs arrays of strings, it is recommended to use arrays of
`dtype` `object_`, `string_` or `unicode_`, and use the free functions
in the `numpy.char` module for fast vectorized string operations.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
chararrays should be created using `numpy.char.array` or
`numpy.char.asarray`, rather than this constructor directly.
This constructor creates the array, using `buffer` (with `offset`
and `strides`) if it is not ``None``. If `buffer` is ``None``, then
constructs a new array with `strides` in "C order", unless both
``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides`
is in "Fortran order".
Methods
-------
astype
argsort
copy
count
decode
dump
dumps
encode
endswith
expandtabs
fill
find
flatten
getfield
index
isalnum
isalpha
isdecimal
isdigit
islower
isnumeric
isspace
istitle
isupper
item
join
ljust
lower
lstrip
nonzero
put
ravel
repeat
replace
reshape
resize
rfind
rindex
rjust
rsplit
rstrip
searchsorted
setfield
setflags
sort
split
splitlines
squeeze
startswith
strip
swapaxes
swapcase
take
title
tofile
tolist
tostring
translate
transpose
upper
view
zfill
Parameters
----------
shape : tuple
Shape of the array.
itemsize : int, optional
Length of each array element, in number of characters. Default is 1.
unicode : bool, optional
Are the array elements of type unicode (True) or string (False).
Default is False.
buffer : int, optional
Memory address of the start of the array data. Default is None,
in which case a new array is created.
offset : int, optional
Fixed stride displacement from the beginning of an axis?
Default is 0. Needs to be >=0.
strides : array_like of ints, optional
Strides for the array (see `ndarray.strides` for full description).
Default is None.
order : {'C', 'F'}, optional
The order in which the array data is stored in memory: 'C' ->
"row major" order (the default), 'F' -> "column major"
(Fortran) order.
Examples
--------
>>> charar = np.chararray((3, 3))
>>> charar[:] = 'a'
>>> charar
chararray([['a', 'a', 'a'],
['a', 'a', 'a'],
['a', 'a', 'a']],
dtype='|S1')
>>> charar = np.chararray(charar.shape, itemsize=5)
>>> charar[:] = 'abc'
>>> charar
chararray([['abc', 'abc', 'abc'],
['abc', 'abc', 'abc'],
['abc', 'abc', 'abc']],
dtype='|S5')
"""
def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
offset=0, strides=None, order='C'):
global _globalvar
if unicode:
dtype = unicode_
else:
dtype = string_
# force itemsize to be a Python long, since using NumPy integer
# types results in itemsize.itemsize being used as the size of
# strings in the new array.
itemsize = long(itemsize)
if sys.version_info[0] >= 3 and isinstance(buffer, _unicode):
# On Py3, unicode objects do not have the buffer interface
filler = buffer
buffer = None
else:
filler = None
_globalvar = 1
if buffer is None:
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
order=order)
else:
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
buffer=buffer,
offset=offset, strides=strides,
order=order)
if filler is not None:
self[...] = filler
_globalvar = 0
return self
def __array_finalize__(self, obj):
# The b is a special case because it is used for reconstructing.
if not _globalvar and self.dtype.char not in 'SUbc':
raise ValueError("Can only create a chararray from string data.")
def __getitem__(self, obj):
val = ndarray.__getitem__(self, obj)
if isinstance(val, character):
temp = val.rstrip()
if _len(temp) == 0:
val = ''
else:
val = temp
return val
# IMPLEMENTATION NOTE: Most of the methods of this class are
# direct delegations to the free functions in this module.
# However, those that return an array of strings should instead
# return a chararray, so some extra wrapping is required.
def __eq__(self, other):
"""
Return (self == other) element-wise.
See also
--------
equal
"""
return equal(self, other)
def __ne__(self, other):
"""
Return (self != other) element-wise.
See also
--------
not_equal
"""
return not_equal(self, other)
def __ge__(self, other):
"""
Return (self >= other) element-wise.
See also
--------
greater_equal
"""
return greater_equal(self, other)
def __le__(self, other):
"""
Return (self <= other) element-wise.
See also
--------
less_equal
"""
return less_equal(self, other)
def __gt__(self, other):
"""
Return (self > other) element-wise.
See also
--------
greater
"""
return greater(self, other)
def __lt__(self, other):
"""
Return (self < other) element-wise.
See also
--------
less
"""
return less(self, other)
def __add__(self, other):
"""
Return (self + other), that is string concatenation,
element-wise for a pair of array_likes of str or unicode.
See also
--------
add
"""
return asarray(add(self, other))
def __radd__(self, other):
"""
Return (other + self), that is string concatenation,
element-wise for a pair of array_likes of `string_` or `unicode_`.
See also
--------
add
"""
return asarray(add(numpy.asarray(other), self))
def __mul__(self, i):
"""
Return (self * i), that is string multiple concatenation,
element-wise.
See also
--------
multiply
"""
return asarray(multiply(self, i))
def __rmul__(self, i):
"""
Return (self * i), that is string multiple concatenation,
element-wise.
See also
--------
multiply
"""
return asarray(multiply(self, i))
def __mod__(self, i):
"""
Return (self % i), that is pre-Python 2.6 string formatting
(iterpolation), element-wise for a pair of array_likes of `string_`
or `unicode_`.
See also
--------
mod
"""
return asarray(mod(self, i))
def __rmod__(self, other):
return NotImplemented
def argsort(self, axis=-1, kind='quicksort', order=None):
"""
Return the indices that sort the array lexicographically.
For full documentation see `numpy.argsort`, for which this method is
in fact merely a "thin wrapper."
Examples
--------
>>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
>>> c = c.view(np.chararray); c
chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
dtype='|S5')
>>> c[c.argsort()]
chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
dtype='|S5')
"""
return self.__array__().argsort(axis, kind, order)
argsort.__doc__ = ndarray.argsort.__doc__
def capitalize(self):
"""
Return a copy of `self` with only the first character of each element
capitalized.
See also
--------
char.capitalize
"""
return asarray(capitalize(self))
def center(self, width, fillchar=' '):
"""
Return a copy of `self` with its elements centered in a
string of length `width`.
See also
--------
center
"""
return asarray(center(self, width, fillchar))
def count(self, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
substring `sub` in the range [`start`, `end`].
See also
--------
char.count
"""
return count(self, sub, start, end)
def decode(self, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
See also
--------
char.decode
"""
return decode(self, encoding, errors)
def encode(self, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
See also
--------
char.encode
"""
return encode(self, encoding, errors)
def endswith(self, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `self` ends with `suffix`, otherwise `False`.
See also
--------
char.endswith
"""
return endswith(self, suffix, start, end)
def expandtabs(self, tabsize=8):
"""
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
See also
--------
char.expandtabs
"""
return asarray(expandtabs(self, tabsize))
def find(self, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring `sub` is found.
See also
--------
char.find
"""
return find(self, sub, start, end)
def index(self, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
See also
--------
char.index
"""
return index(self, sub, start, end)
def isalnum(self):
"""
Returns true for each element if all characters in the string
are alphanumeric and there is at least one character, false
otherwise.
See also
--------
char.isalnum
"""
return isalnum(self)
def isalpha(self):
"""
Returns true for each element if all characters in the string
are alphabetic and there is at least one character, false
otherwise.
See also
--------
char.isalpha
"""
return isalpha(self)
def isdigit(self):
"""
Returns true for each element if all characters in the string are
digits and there is at least one character, false otherwise.
See also
--------
char.isdigit
"""
return isdigit(self)
def islower(self):
"""
Returns true for each element if all cased characters in the
string are lowercase and there is at least one cased character,
false otherwise.
See also
--------
char.islower
"""
return islower(self)
def isspace(self):
"""
Returns true for each element if there are only whitespace
characters in the string and there is at least one character,
false otherwise.
See also
--------
char.isspace
"""
return isspace(self)
def istitle(self):
"""
Returns true for each element if the element is a titlecased
string and there is at least one character, false otherwise.
See also
--------
char.istitle
"""
return istitle(self)
def isupper(self):
"""
Returns true for each element if all cased characters in the
string are uppercase and there is at least one character, false
otherwise.
See also
--------
char.isupper
"""
return isupper(self)
def join(self, seq):
"""
Return a string which is the concatenation of the strings in the
sequence `seq`.
See also
--------
char.join
"""
return join(self, seq)
def ljust(self, width, fillchar=' '):
"""
Return an array with the elements of `self` left-justified in a
string of length `width`.
See also
--------
char.ljust
"""
return asarray(ljust(self, width, fillchar))
def lower(self):
"""
Return an array with the elements of `self` converted to
lowercase.
See also
--------
char.lower
"""
return asarray(lower(self))
def lstrip(self, chars=None):
"""
For each element in `self`, return a copy with the leading characters
removed.
See also
--------
char.lstrip
"""
return asarray(lstrip(self, chars))
def partition(self, sep):
"""
Partition each element in `self` around `sep`.
See also
--------
partition
"""
return asarray(partition(self, sep))
def replace(self, old, new, count=None):
"""
For each element in `self`, return a copy of the string with all
occurrences of substring `old` replaced by `new`.
See also
--------
char.replace
"""
return asarray(replace(self, old, new, count))
def rfind(self, sub, start=0, end=None):
"""
For each element in `self`, return the highest index in the string
where substring `sub` is found, such that `sub` is contained
within [`start`, `end`].
See also
--------
char.rfind
"""
return rfind(self, sub, start, end)
def rindex(self, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
not found.
See also
--------
char.rindex
"""
return rindex(self, sub, start, end)
def rjust(self, width, fillchar=' '):
"""
Return an array with the elements of `self`
right-justified in a string of length `width`.
See also
--------
char.rjust
"""
return asarray(rjust(self, width, fillchar))
def rpartition(self, sep):
"""
Partition each element in `self` around `sep`.
See also
--------
rpartition
"""
return asarray(rpartition(self, sep))
def rsplit(self, sep=None, maxsplit=None):
"""
For each element in `self`, return a list of the words in
the string, using `sep` as the delimiter string.
See also
--------
char.rsplit
"""
return rsplit(self, sep, maxsplit)
def rstrip(self, chars=None):
"""
For each element in `self`, return a copy with the trailing
characters removed.
See also
--------
char.rstrip
"""
return asarray(rstrip(self, chars))
def split(self, sep=None, maxsplit=None):
"""
For each element in `self`, return a list of the words in the
string, using `sep` as the delimiter string.
See also
--------
char.split
"""
return split(self, sep, maxsplit)
def splitlines(self, keepends=None):
"""
For each element in `self`, return a list of the lines in the
element, breaking at line boundaries.
See also
--------
char.splitlines
"""
return splitlines(self, keepends)
def startswith(self, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `self` starts with `prefix`, otherwise `False`.
See also
--------
char.startswith
"""
return startswith(self, prefix, start, end)
def strip(self, chars=None):
"""
For each element in `self`, return a copy with the leading and
trailing characters removed.
See also
--------
char.strip
"""
return asarray(strip(self, chars))
def swapcase(self):
"""
For each element in `self`, return a copy of the string with
uppercase characters converted to lowercase and vice versa.
See also
--------
char.swapcase
"""
return asarray(swapcase(self))
def title(self):
"""
For each element in `self`, return a titlecased version of the
string: words start with uppercase characters, all remaining cased
characters are lowercase.
See also
--------
char.title
"""
return asarray(title(self))
def translate(self, table, deletechars=None):
"""
For each element in `self`, return a copy of the string where
all characters occurring in the optional argument
`deletechars` are removed, and the remaining characters have
been mapped through the given translation table.
See also
--------
char.translate
"""
return asarray(translate(self, table, deletechars))
def upper(self):
"""
Return an array with the elements of `self` converted to
uppercase.
See also
--------
char.upper
"""
return asarray(upper(self))
def zfill(self, width):
"""
Return the numeric string left-filled with zeros in a string of
length `width`.
See also
--------
char.zfill
"""
return asarray(zfill(self, width))
def isnumeric(self):
"""
For each element in `self`, return True if there are only
numeric characters in the element.
See also
--------
char.isnumeric
"""
return isnumeric(self)
def isdecimal(self):
"""
For each element in `self`, return True if there are only
decimal characters in the element.
See also
--------
char.isdecimal
"""
return isdecimal(self)
def array(obj, itemsize=None, copy=True, unicode=None, order=None):
"""
Create a `chararray`.
.. note::
This class is provided for numarray backward-compatibility.
New code (not concerned with numarray compatibility) should use
arrays of type `string_` or `unicode_` and use the free functions
in :mod:`numpy.char <numpy.core.defchararray>` for fast
vectorized string operations instead.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`itemsize`, unicode, `order`, etc.).
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
`None` and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or `unicode`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
"""
if isinstance(obj, (_bytes, _unicode)):
if unicode is None:
if isinstance(obj, _unicode):
unicode = True
else:
unicode = False
if itemsize is None:
itemsize = _len(obj)
shape = _len(obj) // itemsize
if unicode:
if sys.maxunicode == 0xffff:
# On a narrow Python build, the buffer for Unicode
# strings is UCS2, which doesn't match the buffer for
# NumPy Unicode types, which is ALWAYS UCS4.
# Therefore, we need to convert the buffer. On Python
# 2.6 and later, we can use the utf_32 codec. Earlier
# versions don't have that codec, so we convert to a
# numerical array that matches the input buffer, and
# then use NumPy to convert it to UCS4. All of this
# should happen in native endianness.
obj = obj.encode('utf_32')
else:
obj = _unicode(obj)
else:
# Let the default Unicode -> string encoding (if any) take
# precedence.
obj = _bytes(obj)
return chararray(shape, itemsize=itemsize, unicode=unicode,
buffer=obj, order=order)
if isinstance(obj, (list, tuple)):
obj = numpy.asarray(obj)
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
# If we just have a vanilla chararray, create a chararray
# view around it.
if not isinstance(obj, chararray):
obj = obj.view(chararray)
if itemsize is None:
itemsize = obj.itemsize
# itemsize is in 8-bit chars, so for Unicode, we need
# to divide by the size of a single Unicode character,
# which for NumPy is always 4
if issubclass(obj.dtype.type, unicode_):
itemsize //= 4
if unicode is None:
if issubclass(obj.dtype.type, unicode_):
unicode = True
else:
unicode = False
if unicode:
dtype = unicode_
else:
dtype = string_
if order is not None:
obj = numpy.asarray(obj, order=order)
if (copy or
(itemsize != obj.itemsize) or
(not unicode and isinstance(obj, unicode_)) or
(unicode and isinstance(obj, string_))):
obj = obj.astype((dtype, long(itemsize)))
return obj
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
if itemsize is None:
# Since no itemsize was specified, convert the input array to
# a list so the ndarray constructor will automatically
# determine the itemsize for us.
obj = obj.tolist()
# Fall through to the default case
if unicode:
dtype = unicode_
else:
dtype = string_
if itemsize is None:
val = narray(obj, dtype=dtype, order=order, subok=True)
else:
val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
return val.view(chararray)
def asarray(obj, itemsize=None, unicode=None, order=None):
"""
Convert the input to a `chararray`, copying the data only if
necessary.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
`None` and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or 'unicode`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest).
"""
return array(obj, itemsize, copy=False,
unicode=unicode, order=order)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/numpy/core/defchararray.py
|
Python
|
mit
| 67,393
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import models
|
akretion/stock-logistics-workflow
|
stock_picking_manual_procurement_group/__init__.py
|
Python
|
agpl-3.0
| 149
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting configuration property editor and REST operations."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import urllib
from controllers import sites
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import models
from models import roles
from models import transforms
from modules.oeditor import oeditor
from google.appengine.api import users
from google.appengine.ext import db
# This is a template because the value type is not yet known.
SCHEMA_JSON_TEMPLATE = """
{
"id": "Configuration Property",
"type": "object",
"description": "Configuration Property Override",
"properties": {
"name" : {"type": "string"},
"value": {"optional": true, "type": "%s"},
"is_draft": {"type": "boolean"}
}
}
"""
# This is a template because the doc_string is not yet known.
SCHEMA_ANNOTATIONS_TEMPLATE = [
(['title'], 'Configuration Property Override'),
(['properties', 'name', '_inputex'], {
'label': 'Name', '_type': 'uneditable'}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Pending', 'Active',
description='<strong>Active</strong>: This value is active and '
'overrides all other defaults.<br/><strong>Pending</strong>: This '
'value is not active yet, and the default settings still apply.')]
class ConfigPropertyRights(object):
"""Manages view/edit rights for configuration properties."""
@classmethod
def can_view(cls):
return cls.can_edit()
@classmethod
def can_edit(cls):
return roles.Roles.is_super_admin()
@classmethod
def can_delete(cls):
return cls.can_edit()
@classmethod
def can_add(cls):
return cls.can_edit()
class ConfigPropertyEditor(object):
"""An editor for any configuration property."""
# Map of configuration property type into inputex type.
type_map = {str: 'string', int: 'integer', bool: 'boolean'}
@classmethod
def get_schema_annotations(cls, config_property):
"""Gets editor specific schema annotations."""
doc_string = '%s Default: \'%s\'.' % (
config_property.doc_string, config_property.default_value)
item_dict = [] + SCHEMA_ANNOTATIONS_TEMPLATE
item_dict.append((
['properties', 'value', '_inputex'], {
'label': 'Value', '_type': '%s' % cls.get_value_type(
config_property),
'description': doc_string}))
return item_dict
@classmethod
def get_value_type(cls, config_property):
"""Gets an editor specific type for the property."""
value_type = cls.type_map[config_property.value_type]
if not value_type:
raise Exception('Unknown type: %s', config_property.value_type)
if config_property.value_type == str and config_property.multiline:
return 'text'
return value_type
@classmethod
def get_schema_json(cls, config_property):
"""Gets JSON schema for configuration property."""
return SCHEMA_JSON_TEMPLATE % cls.get_value_type(config_property)
def get_add_course(self):
"""Handles 'add_course' action and renders new course entry editor."""
exit_url = '/admin?action=courses'
rest_url = CoursesItemRESTHandler.URI
template_values = {}
template_values[
'page_title'] = 'Course Builder - Add Course'
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, CoursesItemRESTHandler.SCHEMA_JSON,
CoursesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Add New Course')
self.render_page(template_values)
def get_config_edit(self):
"""Handles 'edit' property action."""
key = self.request.get('name')
if not key:
self.redirect('/admin?action=settings')
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
template_values = {}
template_values[
'page_title'] = 'Course Builder - Edit Settings'
exit_url = '/admin?action=settings#%s' % cgi.escape(key)
rest_url = '/rest/config/item'
delete_url = '/admin?%s' % urllib.urlencode({
'action': 'config_reset',
'name': key,
'xsrf_token': cgi.escape(self.create_xsrf_token('config_reset'))})
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, ConfigPropertyEditor.get_schema_json(item),
ConfigPropertyEditor.get_schema_annotations(item),
key, rest_url, exit_url, delete_url=delete_url)
self.render_page(template_values)
def post_config_override(self):
"""Handles 'override' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Add new entity if does not exist.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
except db.BadKeyError:
entity = None
if not entity:
entity = config.ConfigPropertyEntity(key_name=name)
entity.value = str(item.value)
entity.is_draft = True
entity.put()
models.EventEntity.record(
'override-property', users.get_current_user(), transforms.dumps({
'name': name, 'value': str(entity.value)}))
self.redirect('/admin?%s' % urllib.urlencode(
{'action': 'config_edit', 'name': name}))
def post_config_reset(self):
"""Handles 'reset' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Delete if exists.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
if entity:
old_value = entity.value
entity.delete()
models.EventEntity.record(
'delete-property', users.get_current_user(),
transforms.dumps({
'name': name, 'value': str(old_value)}))
except db.BadKeyError:
pass
self.redirect('/admin?action=settings')
class CoursesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for course entries."""
URI = '/rest/courses/item'
SCHEMA_JSON = """
{
"id": "Course Entry",
"type": "object",
"description": "Course Entry",
"properties": {
"name": {"type": "string"},
"title": {"type": "string"},
"admin_email": {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'New Course Entry'),
(['properties', 'name', '_inputex'], {'label': 'Unique Name'}),
(['properties', 'title', '_inputex'], {'label': 'Course Title'}),
(['properties', 'admin_email', '_inputex'], {
'label': 'Course Admin Email'})]
def get(self):
"""Handles HTTP GET verb."""
if not ConfigPropertyRights.can_view():
transforms.send_json_response(
self, 401, 'Access denied.')
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={
'name': 'new_course',
'title': 'My New Course',
'admin_email': self.get_user().email()},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'add-course-put'))
def put(self):
"""Handles HTTP PUT verb."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'add-course-put', {}):
return
if not ConfigPropertyRights.can_edit():
transforms.send_json_response(
self, 401, 'Access denied.')
return
payload = request.get('payload')
json_object = transforms.loads(payload)
name = json_object.get('name')
title = json_object.get('title')
admin_email = json_object.get('admin_email')
# Add the new course entry.
errors = []
entry = sites.add_new_course_entry(name, title, admin_email, errors)
if not entry:
errors.append('Error adding a new course entry.')
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# We can't expect our new configuration being immediately available due
# to datastore queries consistency limitations. So we will instantiate
# our new course here and not use the normal sites.get_all_courses().
app_context = sites.get_all_courses(entry)[0]
# Update course with a new title and admin email.
new_course = courses.Course(None, app_context=app_context)
if not new_course.init_new_course_settings(title, admin_email):
transforms.send_json_response(
self, 412,
'Added new course entry, but failed to update title and/or '
'admin email. The course.yaml file already exists and must be '
'updated manually.')
return
transforms.send_json_response(
self, 200, 'Added.', {'entry': entry})
class ConfigPropertyItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a configuration property."""
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
if not ConfigPropertyRights.can_view():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
else:
entity_dict = {'name': key, 'is_draft': entity.is_draft}
entity_dict['value'] = transforms.string_to_value(
entity.value, item.value_type)
json_payload = transforms.dict_to_json(
entity_dict,
transforms.loads(
ConfigPropertyEditor.get_schema_json(item)))
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'config-property-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'config-property-put', {'key': key}):
return
if not ConfigPropertyRights.can_edit():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
json_object = transforms.loads(payload)
new_value = item.value_type(json_object['value'])
# Validate the value.
errors = []
if item.validator:
item.validator(new_value, errors)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# Update entity.
old_value = entity.value
entity.value = str(new_value)
entity.is_draft = json_object['is_draft']
entity.put()
models.EventEntity.record(
'put-property', users.get_current_user(), transforms.dumps({
'name': key,
'before': str(old_value), 'after': str(entity.value)}))
transforms.send_json_response(self, 200, 'Saved.')
|
haoyuchen1992/CourseBuilder
|
modules/admin/config.py
|
Python
|
apache-2.0
| 13,933
|
### main - create and run lexer from stdin
if __name__ == '__main__' :
import sys
import antlr
import rewrite_l
### create lexer - shall read from stdin
L = rewrite_l.Lexer()
try:
L.mSTART(1);
token = L.getTokenObject()
except antlr.TokenStreamException, e:
print "error: exception caught while lexing: "
### end of main
|
scottstephens/boo
|
lib/antlr-2.7.5/examples/python/lexRewrite/rewrite.py
|
Python
|
bsd-3-clause
| 355
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# from python and deps
from six.moves import StringIO
import json
import os
import shlex
# from Ansible
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = '# -*- coding: utf-8 -*-'
# we've moved the module_common relative to the snippets, so fix the path
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % path)
fd = open(path)
data = fd.read()
fd.close()
return data
def _find_snippet_imports(module_data, module_path, strip_comments):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_style = 'old'
if REPLACER in module_data:
module_style = 'new'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
elif REPLACER_JSONARGS in module_data:
module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
module_style = 'non_native_want_json'
output = StringIO()
lines = module_data.split('\n')
snippet_names = []
for line in lines:
if REPLACER in line:
output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
snippet_names.append('basic')
if REPLACER_WINDOWS in line:
ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
output.write(ps_data)
snippet_names.append('powershell')
elif line.startswith('from ansible.module_utils.'):
tokens=line.split(".")
import_error = False
if len(tokens) != 3:
import_error = True
if " import *" not in line:
import_error = True
if import_error:
raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
snippet_name = tokens[2].split()[0]
snippet_names.append(snippet_name)
output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
else:
if strip_comments and line.startswith("#") or line == '':
pass
output.write(line)
output.write("\n")
if not module_path.endswith(".ps1"):
# Unixy modules
if len(snippet_names) > 0 and not 'basic' in snippet_names:
raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
else:
# Windows modules
if len(snippet_names) > 0 and not 'powershell' in snippet_names:
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
return (output.getvalue(), module_style)
# ******************************************************************************
def modify_module(module_path, module_args, task_vars=dict(), strip_comments=False):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
For powershell, there's equivalent conventions like this:
# POWERSHELL_COMMON
which results in the inclusion of the common code from powershell.ps1
"""
### TODO: Optimization ideas if this code is actually a source of slowness:
# * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)
# * Use pyminifier if installed
# * comment stripping/pyminifier needs to have config setting to turn it
# off for debugging purposes (goes along with keep remote but should be
# separate otherwise users wouldn't be able to get info on what the
# minifier output)
# * Only split into lines and recombine into strings once
# * Cache the modified module? If only the args are different and we do
# that as the last step we could cache sll the work up to that point.
with open(module_path) as f:
# read in the module source
module_data = f.read()
(module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
module_args_json = json.dumps(module_args).encode('utf-8')
python_repred_args = repr(module_args_json)
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
module_data = module_data.replace(REPLACER_WINARGS, module_args_json)
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in task_vars:
facility = task_vars['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split(b"\n", 1)
shebang = None
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in task_vars:
interpreter = to_bytes(task_vars[interpreter_config], errors='strict')
lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
if os.path.basename(interpreter).startswith('python'):
lines.insert(1, ENCODING_STRING)
else:
# No shebang, assume a binary module?
pass
module_data = b"\n".join(lines)
return (module_data, module_style, shebang)
|
attakei/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 8,252
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Finds revisions from the Thunderbird migration that don't have based_on
set correctly, and are still relavent, and fixes that.
Run this script like `./manage.py runscript fix_tb_basedon`.
"""
import sys
from traceback import print_exc
from django.db.models import Q
from kitsune.wiki.models import Document, Revision
def run():
try:
run_()
except Exception:
print_exc()
raise
class Progress():
def __init__(self, total):
self.current = 0
self.total = total
def tick(self, incr=1):
self.current += incr
self.draw()
def draw(self):
self._wr('{0.current} / {0.total}\r'.format(self))
def _wr(self, s):
sys.stdout.write(s)
sys.stdout.flush()
def run_():
to_process = list(Document.objects.filter(
~Q(parent=None),
current_revision__based_on=None,
products__slug='thunderbird'))
if len(to_process) == 0:
print 'Nothing to do.'
prog = Progress(len(to_process))
for doc in to_process:
prog.tick()
oldest_parent_rev = (Revision.objects.filter(document=doc.parent)
.order_by('id')[0])
# It has localizations, clearly it should be localizable.
if not doc.parent.is_localizable:
doc.parent.is_localizable = True
doc.parent.save()
doc.current_revision.based_on = oldest_parent_rev
doc.current_revision.save()
|
feer56/Kitsune1
|
scripts/fix_tb_basedon.py
|
Python
|
bsd-3-clause
| 1,520
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, IBM Corp
# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_local_user_manager
short_description: Manage local users on an ESXi host
description:
- Manage local users on an ESXi host
version_added: "2.2"
author:
- Andreas Nafpliotis (@nafpliot-ibm)
notes:
- Tested on ESXi 6.0
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
requirements:
- "python >= 2.6"
- PyVmomi installed
options:
local_user_name:
description:
- The local user name to be changed.
required: True
local_user_password:
description:
- The password to be set.
required: False
local_user_description:
description:
- Description for the user.
required: False
state:
description:
- Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
choices: ['present', 'absent']
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_user_manager command from Ansible Playbooks
- name: Add local user to ESXi
local_action:
module: vmware_local_user_manager
hostname: esxi_hostname
username: root
password: vmware
local_user_name: foo
'''
RETURN = '''# '''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VMwareLocalUserManager(PyVmomi):
def __init__(self, module):
super(VMwareLocalUserManager, self).__init__(module)
self.local_user_name = self.module.params['local_user_name']
self.local_user_password = self.module.params['local_user_password']
self.local_user_description = self.module.params['local_user_description']
self.state = self.module.params['state']
if self.is_vcenter():
self.module.fail_json(msg="Failed to get local account manager settings "
"from ESXi server: %s" % self.module.params['hostname'],
details="It seems that %s is a vCenter server instead of an "
"ESXi server" % self.module.params['hostname'])
def process_state(self):
try:
local_account_manager_states = {
'absent': {
'present': self.state_remove_user,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_user,
'absent': self.state_create_user,
}
}
local_account_manager_states[self.state][self.check_local_user_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_user_manager_state(self):
user_account = self.find_user_account()
if not user_account:
return 'absent'
else:
return 'present'
def find_user_account(self):
searchStr = self.local_user_name
exactMatch = True
findUsers = True
findGroups = False
user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
return user_account
def create_account_spec(self):
account_spec = vim.host.LocalAccountManager.AccountSpecification()
account_spec.id = self.local_user_name
account_spec.password = self.local_user_password
account_spec.description = self.local_user_description
return account_spec
def state_create_user(self):
account_spec = self.create_account_spec()
try:
self.content.accountManager.CreateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_update_user(self):
account_spec = self.create_account_spec()
try:
self.content.accountManager.UpdateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_remove_user(self):
try:
self.content.accountManager.RemoveUser(self.local_user_name)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
local_user_password=dict(type='str', no_log=True),
local_user_description=dict(type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
vmware_local_user_manager = VMwareLocalUserManager(module)
vmware_local_user_manager.process_state()
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py
|
Python
|
gpl-3.0
| 6,371
|
#!/usr/bin/env python2.7
from __future__ import print_function
import struct
import sys
import numpy
class Analyzer:
"""
The binary format is
time since the beginning of the measurement : double
unknown and irrelevant field : double
momentary consumption calculated for the current time segment : double
"""
def __init__(self):
self.duration = 0.0
self.consumption = []
self.mean = 0.0
self.std = 0.0
self.avg = 0.0
self.averages = []
def read_file(self, file_path):
binary = bytearray()
with open(file_path, "r") as f:
binary = bytearray(f.read())
for i in range(0, len(binary) - 24, 24):
res = struct.unpack(">ddd", binary[i:i+24])
current_duration = res[0]
if not current_duration > self.duration:
print("Unexpected elapsed time value, lower than the previous one.")
exit(2) # this should never happen because the file is written sequentially
current_consumption = res[2]
self.averages.append(current_consumption / (current_duration - self.duration))
self.duration = current_duration
self.consumption.append(current_consumption)
self.calculate_stats()
def calculate_stats(self):
self.mean = numpy.mean(self.averages)
self.std = numpy.std(self.averages)
self.avg = sum(self.consumption) / self.duration
if __name__ == "__main__":
for file_path in sys.argv[1:]:
analyzer = Analyzer()
analyzer.read_file(file_path)
print("{}\n\tavg: {}\n\tmean: {}\n\tstd: {}".format(file_path, analyzer.avg, analyzer.mean, analyzer.std))
|
rokuz/omim
|
tools/python/InstrumentsTraceParser.py
|
Python
|
apache-2.0
| 1,746
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from shutil import copy
from .host_test_plugins import HostTestPluginBase
from time import sleep
class HostTestPluginCopyMethod_Mbed(HostTestPluginBase):
def generic_mbed_copy(self, image_path, destination_disk):
""" Generic mbed copy method for "mbed enabled" devices.
It uses standard python shuitl function to copy
image_file (target specific binary) to device's disk.
"""
result = True
if not destination_disk.endswith('/') and not destination_disk.endswith('\\'):
destination_disk += '/'
try:
copy(image_path, destination_disk)
except Exception as e:
self.print_plugin_error("shutil.copy('%s', '%s')"% (image_path, destination_disk))
self.print_plugin_error("Error: %s"% str(e))
result = False
return result
# Plugin interface
name = 'HostTestPluginCopyMethod_Mbed'
type = 'CopyMethod'
stable = True
capabilities = ['shutil', 'default']
required_parameters = ['image_path', 'destination_disk', 'program_cycle_s']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
# Capability 'default' is a dummy capability
if capability == 'shutil':
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
program_cycle_s = kwargs['program_cycle_s']
# Wait for mount point to be ready
self.check_mount_point_ready(destination_disk) # Blocking
result = self.generic_mbed_copy(image_path, destination_disk)
# Allow mbed to cycle
sleep(program_cycle_s)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Mbed()
|
c1728p9/mbed-os
|
tools/host_tests/host_tests_plugins/module_copy_mbed.py
|
Python
|
apache-2.0
| 2,899
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from java.awt import GridLayout
from java.awt.event import WindowAdapter
from javax.swing import JLabel, JOptionPane, JPanel, JPasswordField, JTextField
from javax.swing.JOptionPane import PLAIN_MESSAGE, UNINITIALIZED_VALUE, \
YES_NO_OPTION, OK_CANCEL_OPTION, OK_OPTION, DEFAULT_OPTION
class _SwingDialog(object):
def __init__(self, pane):
self._pane = pane
def show(self):
self._show_dialog(self._pane)
return self._get_value(self._pane)
def _show_dialog(self, pane):
dialog = pane.createDialog(None, 'Robot Framework')
dialog.setModal(False)
dialog.setAlwaysOnTop(True)
dialog.addWindowFocusListener(pane.focus_listener)
dialog.show()
while dialog.isShowing():
time.sleep(0.2)
dialog.dispose()
def _get_value(self, pane):
value = pane.getInputValue()
return value if value != UNINITIALIZED_VALUE else None
class MessageDialog(_SwingDialog):
def __init__(self, message):
pane = WrappedOptionPane(message, PLAIN_MESSAGE, DEFAULT_OPTION)
_SwingDialog.__init__(self, pane)
class InputDialog(_SwingDialog):
def __init__(self, message, default, hidden=False):
self._input_field = JPasswordField() if hidden else JTextField()
self._input_field.setText(default)
self._input_field.selectAll()
panel = JPanel(layout=GridLayout(2, 1))
panel.add(JLabel(message))
panel.add(self._input_field)
pane = WrappedOptionPane(panel, PLAIN_MESSAGE, OK_CANCEL_OPTION)
pane.set_focus_listener(self._input_field)
_SwingDialog.__init__(self, pane)
def _get_value(self, pane):
if pane.getValue() != OK_OPTION:
return None
return self._input_field.getText()
class SelectionDialog(_SwingDialog):
def __init__(self, message, options):
pane = WrappedOptionPane(message, PLAIN_MESSAGE, OK_CANCEL_OPTION)
pane.setWantsInput(True)
pane.setSelectionValues(options)
_SwingDialog.__init__(self, pane)
class PassFailDialog(_SwingDialog):
def __init__(self, message):
pane = WrappedOptionPane(message, PLAIN_MESSAGE, YES_NO_OPTION,
None, ['PASS', 'FAIL'], 'PASS')
_SwingDialog.__init__(self, pane)
def _get_value(self, pane):
return pane.getValue() == 'PASS'
class WrappedOptionPane(JOptionPane):
focus_listener = None
def getMaxCharactersPerLineCount(self):
return 120
def set_focus_listener(self, component):
self.focus_listener = WindowFocusListener(component)
class WindowFocusListener(WindowAdapter):
def __init__(self, component):
self.component = component
def windowGainedFocus(self, event):
self.component.requestFocusInWindow()
|
caio2k/RIDE
|
src/robotide/lib/robot/libraries/dialogs_jy.py
|
Python
|
apache-2.0
| 3,458
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This is very different to AboutModules in Ruby Koans
# Our AboutMultipleInheritance class is a little more comparable
#
from runner.koan import *
#
# Package hierarchy of Python Koans project:
#
# contemplate_koans.py
# koans/
# __init__.py
# about_asserts.py
# about_attribute_access.py
# about_class_attributes.py
# about_classes.py
# ...
# a_package_folder/
# __init__.py
# a_module.py
class AboutPackages(Koan):
def test_subfolders_can_form_part_of_a_module_package(self):
# Import ./a_package_folder/a_module.py
from .a_package_folder.a_module import Duck
duck = Duck()
self.assertEqual(__, duck.name)
def test_subfolders_become_modules_if_they_have_an_init_module(self):
# Import ./a_package_folder/__init__.py
from .a_package_folder import an_attribute
self.assertEqual(__, an_attribute)
# ------------------------------------------------------------------
def test_use_absolute_imports_to_import_upper_level_modules(self):
# Import /contemplate_koans.py
import contemplate_koans
self.assertEqual(__, contemplate_koans.__name__)
# contemplate_koans.py is the root module in this package because its
# the first python module called in koans.
#
# If contemplate_koan.py was based in a_package_folder that would be
# the root folder, which would make reaching the koans folder
# almost impossible. So always leave the starting python script in
# a folder which can reach everything else.
def test_import_a_module_in_a_subfolder_folder_using_an_absolute_path(self):
# Import contemplate_koans.py/koans/a_package_folder/a_module.py
from koans.a_package_folder.a_module import Duck
self.assertEqual(__, Duck.__module__)
|
tokyo-jesus/university
|
src/python/koans/python3/koans/about_packages.py
|
Python
|
unlicense
| 1,909
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class DenseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.test_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# Each hessian is a diagonal of a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive, so it shouldn't return any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testGenerateFeatureSplitCandidatesWithTreeComplexity(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0.5,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain - 0.5,
gains[0], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesWithMinNodeWeight(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 2.0) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 2])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0.5,
min_node_weight=1.5,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the gain on partition 0 to be -0.5.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(-0.5, gains[0], 0.00001)
self.assertEqual(0, split_node.feature_column)
# Check the split on partition 1.
# (-4 + 0.1) / (2 + 1)
expected_left_weight = -1.3
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
class SparseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant(
[[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.test_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant(
[[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]])
# Each hessian is a diagonal from a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
sparse_float_column=sparse_column,
init_stamp_token=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive so it shouldn't any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testEmpty(self):
with self.test_session() as sess:
indices = array_ops.constant([], dtype=dtypes.int64, shape=[0, 2])
# No values in this feature column in this mini-batch.
values = array_ops.constant([], dtype=dtypes.float32)
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
if __name__ == "__main__":
googletest.main()
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
|
Python
|
mit
| 44,613
|
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about one or more compute instances
author: Monty
version_added: "2.0"
description:
- Retrieve facts about server instances from OpenStack.
notes:
- This module creates a new top-level C(openstack_servers) fact, which
contains a list of servers.
requirements:
- "python >= 2.6"
- "shade"
options:
server:
description:
- restrict results to servers with names or UUID matching
this glob expression (e.g., C<web*>).
required: false
default: None
detailed:
description:
- when true, return additional detail about servers at the expense
of additional API calls.
required: false
default: false
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all servers named C<web*>:
- os_server_facts:
cloud: rax-dfw
server: web*
- debug:
var: openstack_servers
'''
import fnmatch
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=False),
detailed=dict(required=False, type='bool'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
openstack_servers = cloud.list_servers(
detailed=module.params['detailed'])
if module.params['server']:
# filter servers by name
pattern = module.params['server']
openstack_servers = [server for server in openstack_servers
if fnmatch.fnmatch(server['name'], pattern) or fnmatch.fnmatch(server['id'], pattern)]
module.exit_json(changed=False, ansible_facts=dict(
openstack_servers=openstack_servers))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
Tatsh-ansible/ansible
|
lib/ansible/modules/cloud/openstack/os_server_facts.py
|
Python
|
gpl-3.0
| 3,271
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls \
import forms as fw_forms
from openstack_dashboard.dashboards.project.firewalls \
import tabs as fw_tabs
from openstack_dashboard.dashboards.project.firewalls \
import workflows as fw_workflows
AddRouterToFirewall = fw_forms.AddRouterToFirewall
InsertRuleToPolicy = fw_forms.InsertRuleToPolicy
RemoveRouterFromFirewall = fw_forms.RemoveRouterFromFirewall
RemoveRuleFromPolicy = fw_forms.RemoveRuleFromPolicy
UpdateFirewall = fw_forms.UpdateFirewall
UpdatePolicy = fw_forms.UpdatePolicy
UpdateRule = fw_forms.UpdateRule
FirewallDetailsTabs = fw_tabs.FirewallDetailsTabs
FirewallTabs = fw_tabs.FirewallTabs
PolicyDetailsTabs = fw_tabs.PolicyDetailsTabs
RuleDetailsTabs = fw_tabs.RuleDetailsTabs
AddFirewall = fw_workflows.AddFirewall
AddPolicy = fw_workflows.AddPolicy
AddRule = fw_workflows.AddRule
class IndexView(tabs.TabView):
tab_group_class = (FirewallTabs)
template_name = 'project/firewalls/details_tabs.html'
page_title = _("Firewalls")
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
obj_type = re.search('.delete([a-z]+)', action).group(1)
if not obj_ids:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if obj_type == 'rule':
for obj_id in obj_ids:
try:
api.fwaas.rule_delete(request, obj_id)
messages.success(request, _('Deleted rule %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete rule. %s') % e)
if obj_type == 'policy':
for obj_id in obj_ids:
try:
api.fwaas.policy_delete(request, obj_id)
messages.success(request, _('Deleted policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete policy. %s') % e)
if obj_type == 'firewall':
for obj_id in obj_ids:
try:
api.fwaas.firewall_delete(request, obj_id)
messages.success(request,
_('Deleted firewall %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete firewall. %s') % e)
return self.get(request, *args, **kwargs)
class AddRuleView(workflows.WorkflowView):
workflow_class = AddRule
template_name = "project/firewalls/addrule.html"
page_title = _("Add New Rule")
class AddPolicyView(workflows.WorkflowView):
workflow_class = AddPolicy
template_name = "project/firewalls/addpolicy.html"
page_title = _("Add New Policy")
class AddFirewallView(workflows.WorkflowView):
workflow_class = AddFirewall
template_name = "project/firewalls/addfirewall.html"
page_title = _("Add New Firewall")
def get_workflow(self):
if api.neutron.is_extension_supported(self.request,
'fwaasrouterinsertion'):
AddFirewall.register(fw_workflows.SelectRoutersStep)
workflow = super(AddFirewallView, self).get_workflow()
return workflow
class FireWallDetailTabs(tabs.TabView):
template_name = 'project/firewalls/details_tabs.html'
class RuleDetailsView(FireWallDetailTabs):
tab_group_class = (RuleDetailsTabs)
page_title = _("Firewall Rule Details")
class PolicyDetailsView(FireWallDetailTabs):
tab_group_class = (PolicyDetailsTabs)
page_title = _("Firewall Policy Details")
class FirewallDetailsView(FireWallDetailTabs):
tab_group_class = (FirewallDetailsTabs)
page_title = _("Firewall Details")
class UpdateRuleView(forms.ModalFormView):
form_class = UpdateRule
form_id = "update_rule_form"
template_name = "project/firewalls/updaterule.html"
context_object_name = 'rule'
modal_header = _("Edit Rule")
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updaterule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Rule {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateRuleView, self).get_context_data(**kwargs)
context['rule_id'] = self.kwargs['rule_id']
args = (self.kwargs['rule_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rule_id = self.kwargs['rule_id']
try:
rule = api.fwaas.rule_get(self.request, rule_id)
return rule
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rule details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rule = self._get_object()
initial = rule.get_dict()
protocol = initial['protocol']
initial['protocol'] = protocol.upper() if protocol else 'ANY'
initial['action'] = initial['action'].upper()
return initial
class UpdatePolicyView(forms.ModalFormView):
form_class = UpdatePolicy
form_id = "update_policy_form"
template_name = "project/firewalls/updatepolicy.html"
context_object_name = 'policy'
modal_header = _("Edit Policy")
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatepolicy"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Policy {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdatePolicyView, self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
return initial
class UpdateFirewallView(forms.ModalFormView):
form_class = UpdateFirewall
form_id = "update_firewall_form"
template_name = "project/firewalls/updatefirewall.html"
context_object_name = 'firewall'
modal_header = _("Edit Firewall")
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatefirewall"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Firewall {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateFirewallView, self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request,
firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class InsertRuleToPolicyView(forms.ModalFormView):
form_class = InsertRuleToPolicy
form_id = "update_policy_form"
modal_header = _("Insert Rule into Policy")
template_name = "project/firewalls/insert_rule_to_policy.html"
context_object_name = 'policy'
submit_url = "horizon:project:firewalls:insertrule"
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Insert Rule to Policy")
def get_context_data(self, **kwargs):
context = super(InsertRuleToPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RemoveRuleFromPolicyView(forms.ModalFormView):
form_class = RemoveRuleFromPolicy
form_id = "update_policy_form"
modal_header = _("Remove Rule from Policy")
template_name = "project/firewalls/remove_rule_from_policy.html"
context_object_name = 'policy'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:removerule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Remove Rule from Policy")
def get_context_data(self, **kwargs):
context = super(RemoveRuleFromPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RouterCommonView(forms.ModalFormView):
form_id = "update_firewall_form"
context_object_name = 'firewall'
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(RouterCommonView,
self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request, firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class AddRouterToFirewallView(RouterCommonView):
form_class = AddRouterToFirewall
modal_header = _("Add Router to Firewall")
template_name = "project/firewalls/add_router_to_firewall.html"
submit_url = "horizon:project:firewalls:addrouter"
page_title = _("Add Router to Firewall")
class RemoveRouterFromFirewallView(RouterCommonView):
form_class = RemoveRouterFromFirewall
modal_header = _("Remove Router from Firewall")
template_name = "project/firewalls/remove_router_from_firewall.html"
submit_url = "horizon:project:firewalls:removerouter"
page_title = _("Remove Router from Firewall")
|
newrocknj/horizon
|
openstack_dashboard/dashboards/project/firewalls/views.py
|
Python
|
apache-2.0
| 14,127
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
test_records = [[{"doctype":"Designation", "designation_name":"_Test Designation"}]]
|
saurabh6790/test-med-app
|
hr/doctype/designation/test_designation.py
|
Python
|
agpl-3.0
| 215
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
tartavull/google-cloud-python
|
trace/tests/__init__.py
|
Python
|
apache-2.0
| 575
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
vermouthmjl/scikit-learn
|
sklearn/learning_curve.py
|
Python
|
bsd-3-clause
| 14,601
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_vpc_subnet
short_description: Manage subnets in AWS virtual private clouds
description:
- Manage subnets in AWS virtual private clouds
version_added: "2.0"
author: Robert Estelle (@erydo)
options:
az:
description:
- "The availability zone for the subnet. Only required when state=present."
required: false
default: null
cidr:
description:
- "The CIDR block for the subnet. E.g. 192.0.2.0/24. Only required when state=present."
required: false
default: null
tags:
description:
- "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
required: false
default: null
aliases: [ 'resource_tags' ]
state:
description:
- "Create or remove the subnet"
required: false
default: present
choices: [ 'present', 'absent' ]
vpc_id:
description:
- "VPC ID of the VPC in which to create the subnet."
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create subnet for database servers
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.0.1.16/28
resource_tags:
Name: Database Subnet
register: database_subnet
- name: Remove subnet for database servers
ec2_vpc_subnet:
state: absent
vpc_id: vpc-123456
cidr: 10.0.1.16/28
'''
import time
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
if __name__ != '__main__':
raise
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
class AnsibleVPCSubnetException(Exception):
pass
class AnsibleVPCSubnetCreationException(AnsibleVPCSubnetException):
pass
class AnsibleVPCSubnetDeletionException(AnsibleVPCSubnetException):
pass
class AnsibleTagCreationException(AnsibleVPCSubnetException):
pass
def get_subnet_info(subnet):
subnet_info = {'id': subnet.id,
'availability_zone': subnet.availability_zone,
'available_ip_address_count': subnet.available_ip_address_count,
'cidr_block': subnet.cidr_block,
'default_for_az': subnet.defaultForAz,
'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
'state': subnet.state,
'tags': subnet.tags,
'vpc_id': subnet.vpc_id
}
return subnet_info
def subnet_exists(vpc_conn, subnet_id):
filters = {'subnet-id': subnet_id}
subnet = vpc_conn.get_all_subnets(filters=filters)
if subnet and subnet[0].state == "available":
return subnet[0]
else:
return False
def create_subnet(vpc_conn, vpc_id, cidr, az, check_mode):
try:
new_subnet = vpc_conn.create_subnet(vpc_id, cidr, az, dry_run=check_mode)
# Sometimes AWS takes its time to create a subnet and so using
# new subnets's id to do things like create tags results in
# exception. boto doesn't seem to refresh 'state' of the newly
# created subnet, i.e.: it's always 'pending'.
subnet = False
while subnet is False:
subnet = subnet_exists(vpc_conn, new_subnet.id)
time.sleep(0.1)
except EC2ResponseError as e:
if e.error_code == "DryRunOperation":
subnet = None
elif e.error_code == "InvalidSubnet.Conflict":
raise AnsibleVPCSubnetCreationException("%s: the CIDR %s conflicts with another subnet with the VPC ID %s." % (e.error_code, cidr, vpc_id))
else:
raise AnsibleVPCSubnetCreationException(
'Unable to create subnet {0}, error: {1}'.format(cidr, e))
return subnet
def get_resource_tags(vpc_conn, resource_id):
return dict((t.name, t.value) for t in
vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
try:
cur_tags = get_resource_tags(vpc_conn, resource_id)
if cur_tags == tags:
return {'changed': False, 'tags': cur_tags}
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only:
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k])
if to_add:
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
latest_tags = get_resource_tags(vpc_conn, resource_id)
return {'changed': True, 'tags': latest_tags}
except EC2ResponseError as e:
raise AnsibleTagCreationException(
'Unable to update tags for {0}, error: {1}'.format(resource_id, e))
def get_matching_subnet(vpc_conn, vpc_id, cidr):
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc_id})
return next((s for s in subnets if s.cidr_block == cidr), None)
def ensure_subnet_present(vpc_conn, vpc_id, cidr, az, tags, check_mode):
subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
changed = False
if subnet is None:
subnet = create_subnet(vpc_conn, vpc_id, cidr, az, check_mode)
changed = True
# Subnet will be None when check_mode is true
if subnet is None:
return {
'changed': changed,
'subnet': {}
}
if tags != subnet.tags:
ensure_tags(vpc_conn, subnet.id, tags, False, check_mode)
subnet.tags = tags
changed = True
subnet_info = get_subnet_info(subnet)
return {
'changed': changed,
'subnet': subnet_info
}
def ensure_subnet_absent(vpc_conn, vpc_id, cidr, check_mode):
subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
if subnet is None:
return {'changed': False}
try:
vpc_conn.delete_subnet(subnet.id, dry_run=check_mode)
return {'changed': True}
except EC2ResponseError as e:
raise AnsibleVPCSubnetDeletionException(
'Unable to delete subnet {0}, error: {1}'
.format(subnet.cidr_block, e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
az=dict(default=None, required=False),
cidr=dict(default=None, required=True),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
vpc_id=dict(default=None, required=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_id = module.params.get('vpc_id')
tags = module.params.get('tags')
cidr = module.params.get('cidr')
az = module.params.get('az')
state = module.params.get('state')
try:
if state == 'present':
result = ensure_subnet_present(connection, vpc_id, cidr, az, tags,
check_mode=module.check_mode)
elif state == 'absent':
result = ensure_subnet_absent(connection, vpc_id, cidr,
check_mode=module.check_mode)
except AnsibleVPCSubnetException as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py
|
Python
|
gpl-3.0
| 8,929
|
#!/usr/bin/env python2
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--boto-profile', action='store',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = {}
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that wo can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that wo can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, dest)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/inventory/aws/ec2.py
|
Python
|
apache-2.0
| 55,406
|
#
# Copyright (C) 2012-2014 The Paparazzi Team
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import socket
import telnetlib
import sys
from ftplib import FTP
import ftplib
# Check if IP is valid
def is_ip(address):
try:
socket.inet_aton(address)
ip = True
except socket.error:
ip = False
return ip
# Helper function
def split_into_path_and_file(name):
if name.count('/') <= 0:
return ["./", name]
return name.rsplit('/', 1)
# Execute a command
def execute_command(tn, command):
tn.write(command + '\n')
return tn.read_until('# ')[len(command) + 2:-4]
# Check the version
def check_version(tn, directory):
return execute_command(tn, 'cat ' + directory + '/version.txt')
# Check what currently is running on the drone
def check_running(tn):
ps_aux = execute_command(tn, 'ps')
running = ""
if 'program.elf' in ps_aux:
running += ' Native (program.elf),'
if 'dragon-prog' in ps_aux:
running += ' Native (dragon-prog),'
if 'ap.elf' in ps_aux:
running += ' Paparazzi (ap.elf),'
if 'gst-launch' in ps_aux:
running += ' GStreamer (gst-launch)'
return running[1:]
# Check the filesystem
def check_filesystem(tn):
return execute_command(tn, 'df -h')
# Reboot the drone
def reboot(tn):
execute_command(tn, 'reboot')
# Upload ftp and catch memory-full error
def uploadfile(ftp, filename, content):
try:
ftp.storbinary("STOR " + filename, content)
except ftplib.error_temp:
print("FTP UPLOAD ERROR: Uploading FAILED: Probably your ARDrone memory is full.")
sys.exit()
except:
print("FTP UPLOAD ERROR: Maybe your ARDrone memory is full?", sys.exc_info()[0])
sys.exit()
# Connect with telnet and ftp, wait until login
def connect(host):
try:
tn = telnetlib.Telnet(host, timeout=3)
ftp = FTP(host)
ftp.login()
tn.read_until('# ')
return tn, ftp
except:
print('Could not connect to Parrot UAV (host: ' + host + ')')
exit(2)
# Close the telnet and ftp
def disconnect(tn, ftp):
tn.close()
ftp.close()
|
LodewijkSikkel/paparazzi
|
sw/tools/parrot/parrot_utils.py
|
Python
|
gpl-2.0
| 2,820
|