code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Enables interactivity for CLI operations
"""
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
|
edx-solutions/edx-platform
|
common/djangoapps/util/prompt.py
|
Python
|
agpl-3.0
| 1,137
|
from flask_wtf import Form
from wtforms import TextField, DecimalField, TextAreaField, DateField, validators, PasswordField, BooleanField
class CommentForm(Form):
text = TextField('Title', [validators.Required()])
text2 = TextAreaField('Body')
longitude = DecimalField('Longitude')
latitude = DecimalField('Longitude')
date = DateField('Date')
class SignupForm(Form):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required(), validators.EqualTo('confirm', message='Passwords must match')])
confirm = PasswordField('Confirm Password', [validators.Required()])
email = TextField('eMail', [validators.Required(),validators.Email()])
#accept_tos = BooleanField('I accept the TOS', [validators.Required])
class LoginForm(Form):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
class PasswordResetForm(Form):
username = TextField('Username')
email = TextField('eMail')
class PasswordChangeForm(Form):
password = PasswordField('Password', [validators.Required()])
|
homoludens/EventMap
|
hello/forms.py
|
Python
|
agpl-3.0
| 1,156
|
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# snippy - software development and maintenance notes manager.
# Copyright 2017-2020 Heikki J. Laaksonen <laaksonen.heikki.j@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""solution: Default solutions for testing."""
from tests.lib.helper import Helper
class Solution(object): # pylint: disable=too-few-public-methods
"""Default solutions for testing."""
_BEATS = 0
_NGINX = 1
_KAFKA = 2
_KAFKA_MKDN = 3
# Default time is same for the default content. See 'Test case layouts and
# data structures' for more information.
DEFAULT_TIME = '2017-10-20T11:11:19.000001+00:00'
# Default content must be always set so that it reflects content stored
# into database. For example the tags must be sorted in correct order.
# This forces defining erroneous content in each test case. This improves
# the readability and maintainability of failure testing.
_DEFAULTS = ({
'category': 'solution',
'data':('################################################################################',
'## Description',
'################################################################################',
'',
' # Debug Elastic Beats',
'',
'################################################################################',
'## References',
'################################################################################',
'',
' # Enable logs from Filebeat',
' > https://www.elastic.co/guide/en/beats/filebeat/master/enable-filebeat-debugging.html',
'',
'################################################################################',
'## Commands',
'################################################################################',
'',
' # Run Filebeat with full log level',
' $ ./filebeat -e -c config/filebeat.yml -d "*"',
'',
'################################################################################',
'## Solutions',
'################################################################################',
'',
'################################################################################',
'## Configurations',
'################################################################################',
'',
'################################################################################',
'## Whiteboard',
'################################################################################',
''),
'brief': 'Debugging Elastic Beats',
'description': 'Debug Elastic Beats',
'name': '',
'groups': ('beats',),
'tags': ('Elastic', 'beats', 'debug', 'filebeat', 'howto'),
'links': ('https://www.elastic.co/guide/en/beats/filebeat/master/enable-filebeat-debugging.html',),
'source': '',
'versions': (),
'languages': (),
'filename': 'howto-debug-elastic-beats.txt',
'created': DEFAULT_TIME,
'updated': DEFAULT_TIME,
'uuid': '21cd5827-b6ef-4067-b5ac-3ceac07dde9f',
'digest': '4346ba4c792474308bc66bd16d747875bef9b431044824987e302b726c1d298e'
}, {
'category': 'solution',
'data':('################################################################################',
'## Description',
'################################################################################',
'',
' # Instructions how to debug nginx.',
'',
'################################################################################',
'## References',
'################################################################################',
'',
' # Official nginx debugging',
' > https://www.nginx.com/resources/admin-guide/debug/',
'',
'################################################################################',
'## Commands',
'################################################################################',
'',
' # Test if nginx is configured with --with-debug',
" $ nginx -V 2>&1 | grep -- '--with-debug'",
'',
' # Check the logs are forwarded to stdout/stderr and remove links',
' $ ls -al /var/log/nginx/',
' $ unlink /var/log/nginx/access.log',
' $ unlink /var/log/nginx/error.log',
'',
' # Reloading nginx configuration',
' $ nginx -s reload',
'',
'################################################################################',
'## Solutions',
'################################################################################',
'',
'################################################################################',
'## Configurations',
'################################################################################',
'',
' # Configuring nginx default.conf',
' $ vi conf.d/default.conf',
' upstream kibana_servers {',
' server kibana:5601;',
' }',
' upstream elasticsearch_servers {',
' server elasticsearch:9200;',
' }',
'',
'################################################################################',
'## Whiteboard',
'################################################################################',
'',
' # Change nginx configuration',
" $ docker exec -i -t $(docker ps | egrep -m 1 'petelk/nginx' | awk '{print $1}') /bin/bash",
''),
'brief': 'Debugging nginx',
'description': 'Instructions how to debug nginx.',
'name': '',
'groups': ('nginx',),
'tags': ('debug', 'howto', 'logging', 'nginx'),
'links': ('https://www.nginx.com/resources/admin-guide/debug/', ),
'source': '',
'versions': (),
'languages': (),
'filename': 'howto-debug-nginx.txt',
'created': DEFAULT_TIME,
'updated': DEFAULT_TIME,
'uuid': '22cd5827-b6ef-4067-b5ac-3ceac07dde9f',
'digest': '6cfe47a8880a8f81b66ff6bd71e795069ed1dfdd259c9fd181133f683c7697eb'
}, {
'category': 'solution',
'data':('################################################################################',
'## Description',
'################################################################################',
'',
' # Investigating docker log driver and especially the Kafka plugin.',
'',
'################################################################################',
'## References',
'################################################################################',
'',
' # Kube Kafka log driver',
' > https://github.com/MickayG/moby-kafka-logdriver',
'',
' # Logs2Kafka',
' > https://groups.google.com/forum/#!topic/kubernetes-users/iLDsG85exRQ',
' > https://github.com/garo/logs2kafka',
'',
'################################################################################',
'## Commands',
'################################################################################',
'',
' # Get logs from pods',
' $ kubectl get pods',
' $ kubectl logs kafka-0',
'',
' # Install docker log driver for Kafka',
' $ docker ps --format "{{.Names}}" | grep -E \'kafka|logstash\'',
' $ docker inspect k8s_POD_kafka-0...',
" $ docker inspect --format '{{ .NetworkSettings.IPAddress }}' k8s_POD_kafka-0...",
' $ docker plugin install --disable mickyg/kafka-logdriver:latest',
' $ docker plugin set mickyg/kafka-logdriver:latest KAFKA_BROKER_ADDR="10.2.28.10:9092"',
' $ docker plugin inspect mickyg/kafka-logdriver',
' $ docker plugin enable mickyg/kafka-logdriver:latest',
' $ docker run --log-driver mickyg/kafka-logdriver:latest hello-world',
' $ docker plugin disable mickyg/kafka-logdriver:latest',
'',
' # Get current docker log driver',
" $ docker info |grep 'Logging Driver' # Default driver",
' $ docker ps --format "{{.Names}}" | grep -E \'kafka|logstash\'',
' $ docker inspect k8s_POD_kafka-0...',
" $ docker inspect --format '{{ .NetworkSettings.IPAddress }}' k8s_POD_logstash...",
" $ docker inspect --format '{{ .NetworkSettings.IPAddress }}' k8s_POD_kafka-0...",
' $ docker inspect $(docker ps | grep POD | awk \'{print $1}\') | grep -E "Hostname|NetworkID',
' $ docker inspect $(docker ps | grep POD | awk \'{print $1}\') | while read line ; do egrep -E ' +
'\'"Hostname"|"IPAddress"\' ; done | while read line ; do echo $line ; done',
'',
'################################################################################',
'## Solutions',
'################################################################################',
'',
'################################################################################',
'## Configurations',
'################################################################################',
'',
' # Logstash configuration',
' $ vi elk-stack/logstash/build/pipeline/kafka.conf',
' input {',
' gelf {}',
' }',
'',
' output {',
' elasticsearch {',
' hosts => ["elasticsearch"]',
' }',
' stdout {}',
' }',
'',
' # Kafka configuration',
' $ vi elk-stack/logstash/build/pipeline/kafka.conf',
' kafka {',
' type => "argus.docker"',
' topics => ["dockerlogs"]',
' codec => "plain"',
' bootstrap_servers => "kafka:9092"',
' consumer_threads => 1',
' }',
'',
'################################################################################',
'## Whiteboard',
'################################################################################',
''),
'brief': 'Testing docker log drivers',
'description': 'Investigating docker log driver and especially the Kafka plugin.',
'name': '',
'groups': ('docker',),
'tags': ('docker', 'driver', 'kafka', 'kubernetes', 'logging', 'logs2kafka', 'moby', 'plugin'),
'links': ('https://github.com/MickayG/moby-kafka-logdriver',
'https://github.com/garo/logs2kafka',
'https://groups.google.com/forum/#!topic/kubernetes-users/iLDsG85exRQ'),
'source': '',
'versions': (),
'languages': (),
'filename': 'kubernetes-docker-log-driver-kafka.txt',
'created': '2017-10-20T06:16:27.000001+00:00',
'updated': '2017-10-20T06:16:27.000001+00:00',
'uuid': '23cd5827-b6ef-4067-b5ac-3ceac07dde9f',
'digest': 'ee3f2ab7c63d6965ac2531003807f00caee178f6e1cbb870105c7df86e6d5be2'
}, {
'category': 'solution',
'data':('## Description',
'',
'Investigate docker log drivers and the logs2kafka log plugin.',
'',
'## References',
'',
' ```',
' # Kube Kafka log driver',
' > https://github.com/MickayG/moby-kafka-logdriver',
' ```',
'',
' ```',
' # Logs2Kafka',
' > https://groups.google.com/forum/#!topic/kubernetes-users/iLDsG85exRQ',
' > https://github.com/garo/logs2kafka',
' ```',
'',
'## Commands',
'',
' ```',
' # Get logs from pods',
' $ kubectl get pods',
' $ kubectl logs kafka-0',
' ```',
'',
' ```',
' # Install docker log driver for Kafka',
' $ docker ps --format "{{.Names}}" | grep -E \'kafka|logstash\'',
' $ docker inspect k8s_POD_kafka-0...',
' $ docker inspect --format \'{{ .NetworkSettings.IPAddress }}\' k8s_POD_kafka-0...',
' $ docker plugin install --disable mickyg/kafka-logdriver:latest',
' $ docker plugin set mickyg/kafka-logdriver:latest KAFKA_BROKER_ADDR="10.2.28.10:9092"',
' $ docker plugin inspect mickyg/kafka-logdriver',
' $ docker plugin enable mickyg/kafka-logdriver:latest',
' $ docker run --log-driver mickyg/kafka-logdriver:latest hello-world',
' $ docker plugin disable mickyg/kafka-logdriver:latest',
' ```',
'',
' ```',
' # Get current docker log driver',
' $ docker info |grep \'Logging Driver\' # Default driver',
' $ docker ps --format "{{.Names}}" | grep -E \'kafka|logstash\'',
' $ docker inspect k8s_POD_kafka-0...',
' $ docker inspect --format \'{{ .NetworkSettings.IPAddress }}\' k8s_POD_logstash...',
' $ docker inspect --format \'{{ .NetworkSettings.IPAddress }}\' k8s_POD_kafka-0...',
' $ docker inspect $(docker ps | grep POD | awk \'{print $1}\') | grep -E "Hostname|NetworkID',
' $ docker inspect $(docker ps | grep POD | awk \'{print $1}\') | while read line ; do egrep -E \'"Hostname"|"IPAddress"\' ; done | while read line ; do echo $line ; done', # noqa pylint: disable=line-too-long
' ```',
'',
'## Configurations',
'',
' ```',
' # Logstash configuration',
' $ vi elk-stack/logstash/build/pipeline/kafka.conf',
' input {',
' gelf {}',
' }',
'',
' output {',
' elasticsearch {',
' hosts => ["elasticsearch"]',
' }',
' stdout {}',
' }',
' ```',
'',
' ```',
' # Kafka configuration',
' $ vi elk-stack/logstash/build/pipeline/kafka.conf',
' kafka {',
' type => "argus.docker"',
' topics => ["dockerlogs"]',
' codec => "plain"',
' bootstrap_servers => "kafka:9092"',
' consumer_threads => 1',
' }',
' ```',
'',
'## Solutions',
'',
'## Whiteboard',
''),
'brief': 'Testing docker log drivers',
'description': 'Investigate docker log drivers and the logs2kafka log plugin.',
'name': '',
'groups': ('docker',),
'tags': ('docker', 'driver', 'kafka', 'kubernetes', 'logging', 'logs2kafka', 'moby', 'plugin'),
'links': ('https://github.com/MickayG/moby-kafka-logdriver',
'https://github.com/garo/logs2kafka',
'https://groups.google.com/forum/#!topic/kubernetes-users/iLDsG85exRQ'),
'source': '',
'versions': (),
'languages': (),
'filename': 'kubernetes-docker-log-driver-kafka.mkdn',
'created': '2019-01-04T10:54:49.265512+00:00',
'updated': '2019-01-05T10:54:49.265512+00:00',
'uuid': '24cd5827-b6ef-4067-b5ac-3ceac07dde9f',
'digest': 'c54c8a896b94ea35edf6c798879957419d26268bd835328d74b19a6e9ce2324d'
})
BEATS_CREATED = _DEFAULTS[_BEATS]['created']
BEATS_UPDATED = _DEFAULTS[_BEATS]['updated']
NGINX_CREATED = _DEFAULTS[_NGINX]['created']
NGINX_UPDATED = _DEFAULTS[_NGINX]['updated']
KAFKA_CREATED = _DEFAULTS[_KAFKA]['created']
KAFKA_UPDATED = _DEFAULTS[_KAFKA]['updated']
KAFKA_MKDN_CREATED = _DEFAULTS[_KAFKA_MKDN]['created']
KAFKA_MKDN_UPDATED = _DEFAULTS[_KAFKA_MKDN]['updated']
if not DEFAULT_TIME == BEATS_CREATED == BEATS_UPDATED == NGINX_CREATED == NGINX_UPDATED:
raise Exception('default content timestamps must be same - see \'Test case layouts and data structures\'')
BEATS_DIGEST = _DEFAULTS[_BEATS]['digest']
NGINX_DIGEST = _DEFAULTS[_NGINX]['digest']
KAFKA_DIGEST = _DEFAULTS[_KAFKA]['digest']
KAFKA_MKDN_DIGEST = _DEFAULTS[_KAFKA_MKDN]['digest']
BEATS_UUID = _DEFAULTS[_BEATS]['uuid']
NGINX_UUID = _DEFAULTS[_NGINX]['uuid']
KAFKA_UUID = _DEFAULTS[_KAFKA]['uuid']
KAFKA_MKDN_UUID = _DEFAULTS[_KAFKA_MKDN]['uuid']
BEATS = _DEFAULTS[_BEATS]
NGINX = _DEFAULTS[_NGINX]
KAFKA = _DEFAULTS[_KAFKA]
KAFKA_MKDN = _DEFAULTS[_KAFKA_MKDN]
DEFAULT_SOLUTIONS = (BEATS, NGINX)
TEMPLATE = Helper.read_template('solution.txt').split('\n')
TEMPLATE_DIGEST_TEXT = 'be2ec3ade0e984463c1d3346910a05625897abd8d3feae4b2e54bfd6aecbde2d'
TEMPLATE_DIGEST_MKDN = '073ea152d867cf06b2ee993fb1aded4c8ccbc618972db5c18158b5b68a5da6e4'
TEMPLATE_TEXT = (
'################################################################################',
'## BRIEF : Add brief title for content',
'##',
'## GROUPS : groups',
'## TAGS : example,tags',
'## FILE : example-content.md',
'################################################################################',
'',
'',
'################################################################################',
'## Description',
'################################################################################',
'',
'################################################################################',
'## References',
'################################################################################',
'',
'################################################################################',
'## Commands',
'################################################################################',
'',
'################################################################################',
'## Configurations',
'################################################################################',
'',
'################################################################################',
'## Solutions',
'################################################################################',
'',
'################################################################################',
'## Whiteboard',
'################################################################################',
'',
'################################################################################',
'## Meta',
'################################################################################',
'',
'category : solution',
'created : 2017-10-14T19:56:31.000001+00:00',
'digest : 50c37862816a197c63b2ae72c511586c3463814509c0d5c7ebde534ce0209935',
'languages : example-language',
'name : example content handle',
'source : https://www.example.com/source.md',
'updated : 2017-10-14T19:56:31.000001+00:00',
'uuid : a1cd5827-b6ef-4067-b5ac-3ceac07dde9f',
'versions : example=3.9.0,python>=3',
''
)
TEMPLATE_MKDN = (
'# Add brief title for content @groups',
'',
'> Add a description that defines the content in one chapter.',
'',
'> ',
'',
'## Description',
'',
'## References',
'',
'## Commands',
'',
'## Configurations',
'',
'## Solutions',
'',
'## Whiteboard',
'',
'## Meta',
'',
'> category : solution ',
'created : 2017-10-14T19:56:31.000001+00:00 ',
'digest : 5facdc16dc81851c2f65b112a0921eb2f2db206c7756714efb45ba0026471f11 ',
'filename : example-content.md ',
'languages : example-language ',
'name : example content handle ',
'source : https://www.example.com/source.md ',
'tags : example,tags ',
'updated : 2017-10-14T19:56:31.000001+00:00 ',
'uuid : a1cd5827-b6ef-4067-b5ac-3ceac07dde9f ',
'versions : example=3.9.0,python>=3 ',
''
)
_OUTPUTS = [(
'',
' # Elastic,beats,debug,filebeat,howto',
' > https://www.elastic.co/guide/en/beats/filebeat/master/enable-filebeat-debugging.html',
'',
' : ################################################################################',
' : ## Description',
' : ################################################################################',
' : ',
' : # Debug Elastic Beats',
' : ',
' : ################################################################################',
' : ## References',
' : ################################################################################',
' : ',
' : # Enable logs from Filebeat',
' : > https://www.elastic.co/guide/en/beats/filebeat/master/enable-filebeat-debugging.html',
' : ',
' : ################################################################################',
' : ## Commands',
' : ################################################################################',
' : ',
' : # Run Filebeat with full log level',
' : $ ./filebeat -e -c config/filebeat.yml -d "*"',
' : ',
' : ################################################################################',
' : ## Solutions',
' : ################################################################################',
' : ',
' : ################################################################################',
' : ## Configurations',
' : ################################################################################',
' : ',
' : ################################################################################',
' : ## Whiteboard',
' : ################################################################################'
)]
BEATS_OUTPUT = _OUTPUTS[_BEATS]
|
heilaaks/snippy
|
tests/lib/solution.py
|
Python
|
agpl-3.0
| 25,184
|
# -*- coding: utf-8 -*-
from odoo.exceptions import ValidationError
from odoo import models, api, _
class ProductProduct(models.Model):
_inherit = 'product.product'
_rec_name = 'config_name'
"""
Copy the function from product_configurator to show price using price list.
To Fix :
- Extra price For Attribute value
- Extra price For Custom value.
"""
@api.multi
def _compute_product_price_extra(self):
"""Compute price of configurable products as sum
of products related to attribute values picked"""
products = self.filtered(lambda x: not x.config_ok)
pricelist = self.env.user.partner_id.property_product_pricelist
configurable_products = self - products
if products:
prices = super(ProductProduct, self)._compute_product_price_extra()
conversions = self._get_conversions_dict()
for product in configurable_products:
lst_price = product.product_tmpl_id.lst_price
value_ids = product.attribute_value_ids.ids
# TODO: Merge custom values from products with cfg session
# and use same method to retrieve parsed custom val dict
custom_vals = {}
for val in product.value_custom_ids:
custom_type = val.attribute_id.custom_type
if custom_type in conversions:
try:
custom_vals[val.attribute_id.id] = conversions[
custom_type](val.value)
except:
raise ValidationError(
_("Could not convert custom value '%s' to '%s' on "
"product variant: '%s'" % (val.value,
custom_type,
product.display_name))
)
else:
custom_vals[val.attribute_id.id] = val.value
#
# prices = product.product_tmpl_id.get_cfg_price(
# value_ids, custom_vals)
product_price = pricelist.get_product_price(product, 1, 1)
# product.price_extra = prices['total'] - prices['taxes'] - lst_price
product.price_extra = product_price - lst_price
|
microcom/odoo-product-configurator
|
product_configurator_use_default_pricelist/models/product.py
|
Python
|
agpl-3.0
| 2,350
|
"""
View logic for handling course messages.
"""
from __future__ import absolute_import
from datetime import datetime
from babel.dates import format_date, format_timedelta
from django.contrib import auth
from django.template.loader import render_to_string
from django.utils.http import urlquote_plus
from django.utils.translation import get_language, to_locale
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from web_fragments.fragment import Fragment
from course_modes.models import CourseMode
from courseware.courses import get_course_date_blocks, get_course_with_access
from lms.djangoapps.course_goals.api import (
get_course_goal,
get_course_goal_options,
get_goal_api_url,
has_course_goal_permission,
valid_course_goals_ordered
)
from lms.djangoapps.course_goals.models import GOAL_KEY_CHOICES
from lms.djangoapps.courseware.courses import allow_public_access
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.course_experience import CourseHomeMessages
from student.models import CourseEnrollment
from xmodule.course_module import COURSE_VISIBILITY_PUBLIC
class CourseHomeMessageFragmentView(EdxFragmentView):
"""
A fragment that displays a course message with an alert and call
to action for three types of users:
1) Not logged in users are given a link to sign in or register.
2) Unenrolled users are given a link to enroll.
3) Enrolled users who get to the page before the course start date
are given the option to add the start date to their calendar.
This fragment requires a user_access map as follows:
user_access = {
'is_anonymous': True if the user is logged in, False otherwise
'is_enrolled': True if the user is enrolled in the course, False otherwise
'is_staff': True if the user is a staff member of the course, False otherwise
}
"""
def render_to_fragment(self, request, course_id, user_access, **kwargs):
"""
Renders a course message fragment for the specified course.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
# Get time until the start date, if already started, or no start date, value will be zero or negative
now = datetime.now(UTC)
already_started = course.start and now > course.start
days_until_start_string = "started" if already_started else format_timedelta(
course.start - now, locale=to_locale(get_language())
)
course_start_data = {
'course_start_date': format_date(course.start, locale=to_locale(get_language())),
'already_started': already_started,
'days_until_start_string': days_until_start_string
}
# Register the course home messages to be loaded on the page
_register_course_home_messages(request, course, user_access, course_start_data)
# Register course date alerts
for course_date_block in get_course_date_blocks(course, request.user):
course_date_block.register_alerts(request, course)
# Register a course goal message, if appropriate
# Only show the set course goal message for enrolled, unverified
# users that have not yet set a goal in a course that allows for
# verified statuses.
user_goal = get_course_goal(auth.get_user(request), course_key)
is_already_verified = CourseEnrollment.is_enrolled_as_verified(request.user, course_key)
if has_course_goal_permission(request, course_id, user_access) and not is_already_verified and not user_goal:
_register_course_goal_message(request, course)
# Grab the relevant messages
course_home_messages = list(CourseHomeMessages.user_messages(request))
# Pass in the url used to set a course goal
goal_api_url = get_goal_api_url(request)
# Grab the logo
image_src = 'course_experience/images/home_message_author.png'
context = {
'course_home_messages': course_home_messages,
'goal_api_url': goal_api_url,
'image_src': image_src,
'course_id': course_id,
'username': request.user.username,
}
html = render_to_string('course_experience/course-messages-fragment.html', context)
return Fragment(html)
def _register_course_home_messages(request, course, user_access, course_start_data):
"""
Register messages to be shown in the course home content page.
"""
allow_anonymous = allow_public_access(course, [COURSE_VISIBILITY_PUBLIC])
if user_access['is_anonymous'] and not allow_anonymous:
sign_in_or_register_text = (_(u'{sign_in_link} or {register_link} and then enroll in this course.')
if not CourseMode.is_masters_only(course.id)
else _(u'{sign_in_link} or {register_link}.'))
CourseHomeMessages.register_info_message(
request,
Text(sign_in_or_register_text).format(
sign_in_link=HTML(u'<a href="/login?next={current_url}">{sign_in_label}</a>').format(
sign_in_label=_('Sign in'),
current_url=urlquote_plus(request.path),
),
register_link=HTML(u'<a href="/register?next={current_url}">{register_label}</a>').format(
register_label=_('register'),
current_url=urlquote_plus(request.path),
)
),
title=Text(_('You must be enrolled in the course to see course content.'))
)
if not user_access['is_anonymous'] and not user_access['is_staff'] and \
not user_access['is_enrolled']:
title = Text(_(u'Welcome to {course_display_name}')).format(
course_display_name=course.display_name
)
if CourseMode.is_masters_only(course.id):
# if a course is a Master's only course, we will not offer user ability to self-enroll
CourseHomeMessages.register_info_message(
request,
Text(_('You must be enrolled in the course to see course content. '
'Please contact your degree administrator or edX Support if you have questions.')),
title=title
)
elif not course.invitation_only:
CourseHomeMessages.register_info_message(
request,
Text(_(
u'{open_enroll_link}Enroll now{close_enroll_link} to access the full course.'
)).format(
open_enroll_link=HTML('<button class="enroll-btn btn-link">'),
close_enroll_link=HTML('</button>')
),
title=title
)
else:
CourseHomeMessages.register_info_message(
request,
Text(_('You must be enrolled in the course to see course content.')),
)
def _register_course_goal_message(request, course):
"""
Register a message to let a learner specify a course goal.
"""
course_goal_options = get_course_goal_options()
goal_choices_html = Text(_(
'To start, set a course goal by selecting the option below that best describes '
u'your learning plan. {goal_options_container}'
)).format(
goal_options_container=HTML('<div class="row goal-options-container">')
)
# Add the dismissible option for users that are unsure of their goal
goal_choices_html += Text(
'{initial_tag}{choice}{closing_tag}'
).format(
initial_tag=HTML(
u'<div tabindex="0" aria-label="{aria_label_choice}" class="goal-option dismissible" '
'data-choice="{goal_key}">'
).format(
goal_key=GOAL_KEY_CHOICES.unsure,
aria_label_choice=Text(_(u"Set goal to: {choice}")).format(
choice=course_goal_options[GOAL_KEY_CHOICES.unsure],
),
),
choice=Text(_('{choice}')).format(
choice=course_goal_options[GOAL_KEY_CHOICES.unsure],
),
closing_tag=HTML('</div>'),
)
# Add the option to set a goal to earn a certificate,
# complete the course or explore the course
course_goals_by_commitment_level = valid_course_goals_ordered()
for goal in course_goals_by_commitment_level:
goal_key, goal_text = goal
goal_choices_html += HTML(
'{initial_tag}{goal_text}{closing_tag}'
).format(
initial_tag=HTML(
u'<button tabindex="0" aria-label="{aria_label_choice}" class="goal-option btn-outline-primary" '
'data-choice="{goal_key}">'
).format(
goal_key=goal_key,
aria_label_choice=Text(_(u"Set goal to: {goal_text}")).format(
goal_text=Text(_(goal_text))
)
),
goal_text=goal_text,
closing_tag=HTML('</button>')
)
CourseHomeMessages.register_info_message(
request,
HTML('{goal_choices_html}{closing_tag}').format(
goal_choices_html=goal_choices_html,
closing_tag=HTML('</div>')
),
title=Text(_(u'Welcome to {course_display_name}')).format(
course_display_name=course.display_name
)
)
|
jolyonb/edx-platform
|
openedx/features/course_experience/views/course_home_messages.py
|
Python
|
agpl-3.0
| 9,580
|
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
class TaxingContext(object):
def __init__(self, customer_tax_group=None, customer_tax_number=None, location=None):
self.customer_tax_group = customer_tax_group
self.customer_tax_number = customer_tax_number
self.country_code = getattr(location, "country_code", None) or getattr(location, "country", None)
self.region_code = getattr(location, "region_code", None)
self.postal_code = getattr(location, "postal_code", None)
self.location = location
|
akx/shoop
|
shoop/core/taxing/_context.py
|
Python
|
agpl-3.0
| 720
|
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.utils import translation
from shoop.simple_cms.models import Page
from shoop_tests.simple_cms.utils import create_multilanguage_page
@pytest.mark.django_db
def test_create_multilanguage_page():
with translation.override("de"):
page_id = create_multilanguage_page(url="multi").pk
with translation.override("fi"):
page = Page.objects.get(pk=page_id)
assert page.title == "test, Finnisch"
assert page.url == "multi-fi"
with translation.override("en"):
page = Page.objects.get(pk=page_id)
assert page.title == "test, Englisch"
assert page.url == "multi-en"
|
taedori81/shoop
|
shoop_tests/simple_cms/test_languages.py
|
Python
|
agpl-3.0
| 904
|
from copy import copy
import json
from cnxepub.utils import squash_xml_to_text
from cnxml.parse import parse_metadata as parse_cnxml_metadata
from cnxtransforms import cnxml_abstract_to_html
from lxml import etree
__all__ = (
'convert_to_model_compat_metadata',
'scan_for_id_mapping',
'scan_for_uuid_mapping',
'build_id_to_uuid_mapping',
'id_from_metadata',
)
ACTORS_MAPPING_KEYS = (
# (<litezip name>, <cnx-epub name>),
('authors', 'authors'),
('licensors', 'copyright_holders'),
('maintainers', 'publishers'),
)
def _format_actors(actors):
"""Format the actors list of usernames to a cnx-epub compatable format"""
formatted_actors = []
for a in actors:
formatted_actors.append({'id': a, 'type': 'cnx-id', 'name': a})
return formatted_actors
def convert_to_model_compat_metadata(metadata):
"""\
Convert the metadata to cnx-epub model compatible metadata.
This creates a copy of the metadata. It does not mutate the given
metadata.
:param metadata: metadata
:type metadata: dict
:return: metadata
:rtype: dict
"""
md = copy(metadata)
md.setdefault('cnx-archive-shortid', None)
md.setdefault('cnx-archive-uri', '{}@{}'.format(md['id'], md['version']))
md.pop('id')
# FIXME cnx-epub has an issue rendering and parsing license_text set to
# None, so hard code it to 'CC BY' for now.
md.setdefault('license_text', 'CC BY')
md.setdefault('print_style', None)
md['derived_from_title'] = md['derived_from']['title']
md['derived_from_uri'] = md['derived_from']['uri']
md.pop('derived_from')
# Translate to a Person Info structure
for lz_key, epub_key in ACTORS_MAPPING_KEYS:
md[epub_key] = _format_actors(md.pop(lz_key))
md.setdefault('editors', [])
md.setdefault('illustrators', [])
md.setdefault('translators', [])
md['summary'] = md.pop('abstract')
md['summary'] = md['summary'] and md['summary'] or None
if md['summary'] is not None:
s = cnxml_abstract_to_html(md['summary'])
s = etree.fromstring(s)
md['summary'] = squash_xml_to_text(s, remove_namespaces=True)
return md
def id_from_metadata(metadata):
"""Given an model's metadata, discover the id."""
identifier = "cnx-archive-uri"
return metadata.get(identifier)
def scan_for_id_mapping(start_dir):
"""Collect a mapping of content ids to filepaths relative to the given
directory (as ``start_dir``).
This is necessary because the filesystem could be organized as
a `book-tree`, which is a hierarchy of directories that are labeled
by title rather than by id.
:param start_dir: a directory to start the scan from
:type start_dir: :class:`pathlib.Path`
:return: mapping of content ids to the content filepath
:rtype: {str: pathlib.Path, ...}
"""
mapping = {}
for filepath in start_dir.glob('**/index.cnxml'):
with filepath.open('rb') as fb:
xml = etree.parse(fb)
md = convert_to_model_compat_metadata(parse_cnxml_metadata(xml))
id = id_from_metadata(md)
id = id.split('@')[0]
mapping[id] = filepath
return mapping
def scan_for_uuid_mapping(start_dir):
"""Collect a mapping of content UUIDs to filepaths relative to the given
directory (as ``start_dir``).
This is similar to ``scan_for_id_mapping``, but instead of using the ID
value found in CNXML as the key, we want the same mapping keyed by the
UUID in the corresponding metadata.json file if it's available.
:param start_dir: a directory to start the scan from
:type start_dir: :class:`pathlib.Path`
:return: mapping of content uuids to the content filepath
:rtype: {str: pathlib.Path, ...}
"""
mapping = {}
for filepath in start_dir.glob('**/index.cnxml'):
metadata_file = filepath.parent / 'metadata.json'
if metadata_file.exists():
with metadata_file.open('r') as metadata_json:
metadata = json.load(metadata_json)
uuid = metadata['id']
mapping[uuid] = filepath
else:
# Fallback to trying CNXML for UUID metadata
metadata = parse_cnxml_metadata(etree.parse(filepath.open()))
uuid = metadata.get('uuid')
if uuid:
mapping[uuid] = filepath
return mapping
def build_id_to_uuid_mapping(id_to_path_map, uuid_to_path_map):
"""Build a mapping of ID to UUID values based upon matching paths
:param id_to_path_map: A mapping of IDs (m12345) to filepaths
:type id_to_path_map: {str: pathlib.Path, ...}
:param uuid_to_path_map: A mapping of UUIDs to filepaths
:type uuid_to_path_map: {str: pathlib.Path, ...}
:return: mapping of ids to uuids
:rtype: {str: str, ...}
"""
mapping = {}
path_to_uuid_map = {
str(path): uuid for uuid, path in uuid_to_path_map.items()
}
for id, path in id_to_path_map.items():
mapping[id] = path_to_uuid_map.get(str(path))
return mapping
|
Connexions/nebuchadnezzar
|
nebu/models/utils.py
|
Python
|
agpl-3.0
| 5,088
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url('^json/get/$', 'django_notify.views.get_notifications', name='json_get', kwargs={}),
url('^json/mark-read/$', 'django_notify.views.mark_read', name='json_mark_read_base', kwargs={}),
url('^json/mark-read/(\d+)/$', 'django_notify.views.mark_read', name='json_mark_read', kwargs={}),
url('^goto/(?P<notification_id>\d+)/$', 'django_notify.views.goto', name='goto', kwargs={}),
url('^goto/$', 'django_notify.views.goto', name='goto_base', kwargs={}),
)
def get_pattern(app_name="notify", namespace="notify"):
"""Every url resolution takes place as "notify:view_name".
https://docs.djangoproject.com/en/dev/topics/http/urls/#topics-http-reversing-url-namespaces
"""
return urlpatterns, app_name, namespace
|
GbalsaC/bitnamiP
|
django-wiki/django_notify/urls.py
|
Python
|
agpl-3.0
| 864
|
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class SlotVerificationRequest(models.Model):
_name = 'stock.slot.verification.request'
_inherit = 'mail.thread'
@api.model
def create(self, vals):
if not vals.get('name') or vals.get('name') == '/':
vals['name'] = self.env['ir.sequence'].next_by_code(
'stock.slot.verification.request') or '/'
return super(SlotVerificationRequest, self).create(vals)
@api.multi
def _compute_involved_move_count(self):
for rec in self:
rec.involved_move_count = len(rec.involved_move_ids)
@api.multi
def _compute_involved_inv_line_count(self):
for rec in self:
rec.involved_inv_line_count = len(rec.involved_inv_line_ids)
name = fields.Char(
default="/", required=True,
readonly=True, states={'wait': [('readonly', False)]})
inventory_id = fields.Many2one(
comodel_name='stock.inventory',
string='Inventory Adjustment',
readonly=True)
inventory_line_id = fields.Many2one(
comodel_name='stock.inventory.line',
string='Inventory Line',
readonly=True)
location_id = fields.Many2one(
comodel_name='stock.location',
string='Location',
required=True)
state = fields.Selection(selection=[
('wait', 'Waiting Actions'),
('open', 'In Progress'),
('cancelled', 'Cancelled'),
('done', 'Solved')
], string='Status', default='wait')
responsible_id = fields.Many2one(
comodel_name='res.users',
string='Assigned to')
product_id = fields.Many2one(
comodel_name='product.product',
string='Product', required=True)
notes = fields.Text(string='Notes')
involved_move_ids = fields.Many2many(
comodel_name='stock.move',
relation='slot_verification_move_involved_rel',
column1='slot_verification_request_id',
column2='move_id',
string='Involved Stock Moves')
involved_move_count = fields.Integer(
compute='_compute_involved_move_count'
)
involved_inv_line_ids = fields.Many2many(
comodel_name='stock.inventory.line',
relation='slot_verification_inv_line_involved_rel',
column1='slot_verification_request_id',
column2='inventory_line_id',
string='Involved Inventory Lines')
involved_inv_line_count = fields.Integer(
compute='_compute_involved_inv_line_count')
@api.multi
def _get_involved_moves_domain(self):
domain = [('product_id', '=', self.product_id.id), '|',
('location_id', '=', self.location_id.id),
('location_dest_id', '=', self.location_id.id)]
return domain
@api.multi
def _get_involved_lines_domain(self):
domain = [('product_id', '=', self.product_id.id),
('location_id', '=', self.location_id.id)]
return domain
@api.multi
def _get_involved_lines_and_locations(self):
involved_moves = self.env['stock.move'].search(
self._get_involved_moves_domain())
involved_lines = self.env['stock.inventory.line'].search(
self._get_involved_lines_domain())
return involved_moves, involved_lines
@api.multi
def action_confirm(self):
self.write({'state': 'open'})
for rec in self:
involved_moves, involved_lines = \
rec._get_involved_lines_and_locations()
rec.involved_move_ids = involved_moves
rec.involved_inv_line_ids = involved_lines
return True
@api.multi
def action_cancel(self):
self.write({'state': 'cancelled'})
return True
@api.multi
def action_solved(self):
self.write({'state': 'done'})
return True
@api.multi
def action_view_moves(self):
action = self.env.ref('stock.stock_move_action')
result = action.read()[0]
result['context'] = {}
moves_ids = self.mapped('involved_move_ids').ids
if len(moves_ids) > 1:
result['domain'] = [('id', 'in', moves_ids)]
elif len(moves_ids) == 1:
res = self.env.ref('stock.view_move_form', False)
result['views'] = [(res and res.id or False, 'form')]
result['res_id'] = moves_ids and moves_ids[0] or False
return result
@api.multi
def action_view_inv_lines(self):
action = self.env.ref(
'stock_inventory_verification_request.action_inv_adj_line_tree')
result = action.read()[0]
result['context'] = {}
line_ids = self.mapped('involved_inv_line_ids').ids
if len(line_ids) > 1:
result['domain'] = [('id', 'in', line_ids)]
elif len(line_ids) == 1:
res = self.env.ref('stock_inventory_verification_request.'
'view_inventory_line_form', False)
result['views'] = [(res and res.id or False, 'form')]
result['res_id'] = line_ids and line_ids[0] or False
return result
|
Vauxoo/stock-logistics-warehouse
|
stock_inventory_verification_request/models/stock_slot_verification_request.py
|
Python
|
agpl-3.0
| 5,256
|
class Widget(object):
def __init__(self, options, *args, **kwargs):
super(Widget, self).__init__(*args, **kwargs)
self.options = options
def get_display_name(self):
raise NotImplementedError
def render(self, request):
raise NotImplementedError
def render_option_form(self):
raise NotImplementedError
def get_option_dict(self):
return self.options
|
sigurdga/samklang-menu
|
samklang_menu/widgets.py
|
Python
|
agpl-3.0
| 420
|
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from unittest import TestCase
from gofer.messaging.adapter.url import URL
from gofer.messaging.adapter.url import PORT, Scheme
class Test(object):
def __init__(self,
url,
adapter=None,
scheme=None,
host=None,
port=None,
userid=None,
password=None,
path=None):
self.url = url
self.adapter = adapter
self.scheme = scheme
self.host = host
self.port = port
self.userid = userid
self.password = password
self.path = path
def __call__(self, test):
url = URL(self.url)
test.assertEqual(url.adapter, self.adapter)
test.assertEqual(url.scheme, self.scheme)
test.assertEqual(url.host, self.host)
test.assertEqual(url.port, self.port)
test.assertEqual(url.userid, self.userid)
test.assertEqual(url.password, self.password)
test.assertEqual(url.path, self.path)
TESTS = [
Test('qpid+amqp://elmer:fudd@blue:5000/all',
adapter='qpid',
scheme='amqp',
host='blue',
port=5000,
userid='elmer',
password='fudd',
path='all'),
Test('amqp://elmer:fudd@yellow:1234//',
scheme='amqp',
host='yellow',
port=1234,
userid='elmer',
password='fudd',
path='/'),
Test('amqp://green:5678/all/good',
scheme='amqp',
host='green',
port=5678,
path='all/good'),
Test('amqp://red:2323',
scheme='amqp',
host='red',
port=2323),
Test('amqp://black',
scheme='amqp',
host='black',
port=5672),
Test('amqps://purple',
scheme='amqps',
host='purple',
port=5671),
Test('orange:6545',
scheme='amqp',
host='orange',
port=6545),
Test('localhost',
scheme='amqp',
host='localhost',
port=5672),
Test('',
scheme='amqp',
port=5672),
]
class TestURL(TestCase):
def test_parsing(self):
for test in TESTS:
test(self)
def test_canonical(self):
urls = [
'qpid+amqp://elmer:fudd@test-host:5000/all',
'amqp://elmer:fudd@test-host:5000/all',
'amqp://test-host:5000/all',
'amqp://test-host:5000'
]
for _url in urls:
url = URL(_url)
self.assertEqual(url.canonical, _url.split('+')[-1].rsplit('/all')[0])
def test_is_ssl(self):
# false
url = URL('amqp://localhost')
self.assertFalse(url.is_ssl())
# true
url = URL('amqps://localhost')
self.assertTrue(url.is_ssl())
def test_hash(self):
url = URL('test')
self.assertEqual(hash(url), hash(url.canonical))
def test_str(self):
urls = [
'qpid+amqp://elmer:fudd@test-host:5000/all',
'amqp://elmer:fudd@test-host:5000/all',
'amqp://test-host:5000/all',
'amqp://test-host:5000',
'amqp://test-host',
]
for _url in urls:
url = URL(_url)
self.assertEqual(str(url), url.canonical)
class TestScheme(TestCase):
def test_validated(self):
for n in PORT:
self.assertEqual(Scheme.validated(n), n.lower())
self.assertRaises(ValueError, Scheme.validated, 'unsupported')
|
jortel/gofer
|
test/unit/messaging/adapter/test_url.py
|
Python
|
lgpl-2.1
| 4,053
|
class Class(object):
pass
def func():
return 3.14
CONSTANT = 42
|
retoo/pystructure
|
tests/python/typeinference/import_star_definitions.py
|
Python
|
lgpl-2.1
| 74
|
#! /usr/bin/env python
"""
# FEATURE_GET_TLV_PROPERTIES.py: Unitary test for
# FEATURE_GET_TLV_PROPERTIES
# Copyright (C) 2012,2016 Ludovic Rousseau
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
# You have to enable the use of Escape commands with the
# DRIVER_OPTION_CCID_EXCHANGE_AUTHORIZED bit in the ifdDriverOptions
# option of the CCID driver Info.plist file
from smartcard.System import readers
from smartcard.pcsc.PCSCPart10 import getTlvProperties, SCARD_SHARE_DIRECT
# for each reader
for reader in readers():
print
print "Reader:", reader
card_connection = reader.createConnection()
card_connection.connect(mode=SCARD_SHARE_DIRECT)
# get the TLV PROPERTIES
tlv = getTlvProperties(card_connection)
for key in sorted(tlv):
if key in ["PCSCv2_PART10_PROPERTY_wIdProduct",
"PCSCv2_PART10_PROPERTY_wIdVendor"]:
print "%s: 0x%04X" % (key, tlv[key])
else:
print "%s: %s" % (key, tlv[key])
|
sixtyfive/pcsc-ctapi-wrapper
|
PCSC/UnitaryTests/FEATURE_GET_TLV_PROPERTIES.py
|
Python
|
lgpl-2.1
| 1,604
|
__version__="v2.5 beta10"
welcome_block="""
# Multi-Echo ICA, Version %s
#
# Kundu, P., Brenowitz, N.D., Voon, V., Worbe, Y., Vertes, P.E., Inati, S.J., Saad, Z.S.,
# Bandettini, P.A. & Bullmore, E.T. Integrated strategy for improving functional
# connectivity mapping using multiecho fMRI. PNAS (2013).
#
# Kundu, P., Inati, S.J., Evans, J.W., Luh, W.M. & Bandettini, P.A. Differentiating
# BOLD and non-BOLD signals in fMRI time series using multi-echo EPI. NeuroImage (2011).
# https://doi.org/10.1016/j.neuroimage.2011.12.028
#
# PROCEDURE 2a: Model fitting and component selection routines
"""
import numpy as np
def fitmodels_direct(catd,mmix,mask,t2s,tes,fout=None,reindex=False,mmixN=None,full_sel=True,debugout=False):
"""
Usage:
fitmodels_direct(fout)
Input:
fout is flag for output of per-component TE-dependence maps
t2s is a (nx,ny,nz) ndarray
tes is a 1d array
"""
#Compute opt. com. raw data
tsoc = np.array(optcom(catd,t2s,tes,mask),dtype=float)[mask]
tsoc_mean = tsoc.mean(axis=-1)
tsoc_dm = tsoc-tsoc_mean[:,np.newaxis]
#Compute un-normalized weight dataset (features)
if mmixN == None: mmixN=mmix
WTS = computefeats2(unmask(tsoc,mask),mmixN,mask,normalize=False)
#Compute PSC dataset - shouldn't have to refit data
tsoc_B = get_coeffs(unmask(tsoc_dm,mask),mask,mmix)[mask]
tsoc_Babs = np.abs(tsoc_B)
PSC = tsoc_B/tsoc.mean(axis=-1)[:,np.newaxis]*100
#Compute skews to determine signs based on unnormalized weights, correct mmix & WTS signs based on spatial distribution tails
from scipy.stats import skew
signs = skew(WTS,axis=0)
signs /= np.abs(signs)
mmix = mmix.copy()
mmix*=signs
WTS*=signs
PSC*=signs
totvar = (tsoc_B**2).sum()
totvar_norm = (WTS**2).sum()
#Compute Betas and means over TEs for TE-dependence analysis
Ne = tes.shape[0]
betas = cat2echos(get_coeffs(uncat2echos(catd,Ne),np.tile(mask,(1,1,Ne)),mmix),Ne)
nx,ny,nz,Ne,nc = betas.shape
Nm = mask.sum()
mu = catd.mean(axis=-1)
tes = np.reshape(tes,(Ne,1))
fmin,fmid,fmax = getfbounds(ne)
#Mask arrays
mumask = fmask(mu,mask)
t2smask = fmask(t2s,mask)
betamask = fmask(betas,mask)
if debugout: fout=aff
#Setup Xmats
#Model 1
X1 = mumask.transpose()
#Model 2
X2 = np.tile(tes,(1,Nm))*mumask.transpose()/t2smask.transpose()
#Tables for component selection
Kappas = np.zeros([nc])
Rhos = np.zeros([nc])
varex = np.zeros([nc])
varex_norm = np.zeros([nc])
Z_maps = np.zeros([Nm,nc])
F_R2_maps = np.zeros([Nm,nc])
F_S0_maps = np.zeros([Nm,nc])
Z_clmaps = np.zeros([Nm,nc])
F_R2_clmaps = np.zeros([Nm,nc])
F_S0_clmaps = np.zeros([Nm,nc])
Br_clmaps_R2 = np.zeros([Nm,nc])
Br_clmaps_S0 = np.zeros([Nm,nc])
for i in range(nc):
#size of B is (nc, nx*ny*nz)
B = np.atleast_3d(betamask)[:,:,i].transpose()
alpha = (np.abs(B)**2).sum(axis=0)
varex[i] = (tsoc_B[:,i]**2).sum()/totvar*100.
varex_norm[i] = (WTS[:,i]**2).sum()/totvar_norm*100.
#S0 Model
coeffs_S0 = (B*X1).sum(axis=0)/(X1**2).sum(axis=0)
SSE_S0 = (B - X1*np.tile(coeffs_S0,(Ne,1)))**2
SSE_S0 = SSE_S0.sum(axis=0)
F_S0 = (alpha - SSE_S0)*2/(SSE_S0)
F_S0_maps[:,i] = F_S0
#R2 Model
coeffs_R2 = (B*X2).sum(axis=0)/(X2**2).sum(axis=0)
SSE_R2 = (B - X2*np.tile(coeffs_R2,(Ne,1)))**2
SSE_R2 = SSE_R2.sum(axis=0)
F_R2 = (alpha - SSE_R2)*2/(SSE_R2)
F_R2_maps[:,i] = F_R2
#Compute weights as Z-values
wtsZ=(WTS[:,i]-WTS[:,i].mean())/WTS[:,i].std()
wtsZ[np.abs(wtsZ)>Z_MAX]=(Z_MAX*(np.abs(wtsZ)/wtsZ))[np.abs(wtsZ)>Z_MAX]
Z_maps[:,i] = wtsZ
#Compute Kappa and Rho
F_S0[F_S0>F_MAX] = F_MAX
F_R2[F_R2>F_MAX] = F_MAX
Kappas[i] = np.average(F_R2,weights=np.abs(wtsZ)**2.)
Rhos[i] = np.average(F_S0,weights=np.abs(wtsZ)**2.)
#Tabulate component values
comptab_pre = np.vstack([np.arange(nc),Kappas,Rhos,varex,varex_norm]).T
if reindex:
#Re-index all components in Kappa order
comptab = comptab_pre[comptab_pre[:,1].argsort()[::-1],:]
Kappas = comptab[:,1]; Rhos = comptab[:,2]; varex = comptab[:,3]; varex_norm = comptab[:,4]
nnc = np.array(comptab[:,0],dtype=np.int)
mmix_new = mmix[:,nnc]
F_S0_maps = F_S0_maps[:,nnc]; F_R2_maps = F_R2_maps[:,nnc]; Z_maps = Z_maps[:,nnc]
WTS = WTS[:,nnc]; PSC=PSC[:,nnc]; tsoc_B=tsoc_B[:,nnc]; tsoc_Babs=tsoc_Babs[:,nnc]
comptab[:,0] = np.arange(comptab.shape[0])
else:
comptab = comptab_pre
mmix_new = mmix
#Full selection including clustering criteria
seldict=None
if full_sel:
for i in range(nc):
#Save out files
out = np.zeros((nx,ny,nz,4))
if fout!=None:
ccname = "cc%.3d.nii" % i
else: ccname = ".cc_temp.nii.gz"
out[:,:,:,0] = np.squeeze(unmask(PSC[:,i],mask))
out[:,:,:,1] = np.squeeze(unmask(F_R2_maps[:,i],mask))
out[:,:,:,2] = np.squeeze(unmask(F_S0_maps[:,i],mask))
out[:,:,:,3] = np.squeeze(unmask(Z_maps[:,i],mask))
niwrite(out,fout,ccname)
os.system('3drefit -sublabel 0 PSC -sublabel 1 F_R2 -sublabel 2 F_SO -sublabel 3 Z_sn %s 2> /dev/null > /dev/null'%ccname)
csize = np.max([int(Nm*0.0005)+5,20])
#Do simple clustering on F
os.system("3dcalc -overwrite -a %s[1..2] -expr 'a*step(a-%i)' -prefix .fcl_in.nii.gz -overwrite" % (ccname,fmin))
os.system('3dmerge -overwrite -dxyz=1 -1clust 1 %i -doall -prefix .fcl_out.nii.gz .fcl_in.nii.gz' % (csize))
sel = fmask(nib.load('.fcl_out.nii.gz').get_data(),mask)!=0
sel = np.array(sel,dtype=np.int)
F_R2_clmaps[:,i] = sel[:,0]
F_S0_clmaps[:,i] = sel[:,1]
#Do simple clustering on Z at p<0.05
sel = spatclust(None,mask,csize,1.95,head,aff,infile=ccname,dindex=3,tindex=3)
Z_clmaps[:,i] = sel
#Do simple clustering on ranked signal-change map
countsigFR2 = F_R2_clmaps[:,i].sum()
countsigFS0 = F_S0_clmaps[:,i].sum()
Br_clmaps_R2[:,i] = spatclust(rankvec(tsoc_Babs[:,i]),mask,csize,max(tsoc_Babs.shape)-countsigFR2,head,aff)
Br_clmaps_S0[:,i] = spatclust(rankvec(tsoc_Babs[:,i]),mask,csize,max(tsoc_Babs.shape)-countsigFS0,head,aff)
seldict = {}
selvars = ['Kappas','Rhos','WTS','varex','Z_maps','F_R2_maps','F_S0_maps',\
'Z_clmaps','F_R2_clmaps','F_S0_clmaps','tsoc_B','Br_clmaps_R2','Br_clmaps_S0']
for vv in selvars:
seldict[vv] = eval(vv)
if debugout or ('DEBUGOUT' in args):
#Package for debug
import cPickle as cP
import zlib
try: os.system('mkdir compsel.debug')
except: pass
selvars = ['Kappas','Rhos','WTS','varex','Z_maps','Z_clmaps','F_R2_clmaps','F_S0_clmaps','Br_clmaps_R2','Br_clmaps_S0']
for vv in selvars:
with open('compsel.debug/%s.pkl.gz' % vv,'wb') as ofh:
print "Writing debug output: compsel.debug/%s.pkl.gz" % vv
ofh.write(zlib.compress(cP.dumps(eval(vv))))
ofh.close()
return seldict,comptab,betas,mmix_new
def selcomps(seldict,debug=False,olevel=2,oversion=99,knobargs=''):
#import ipdb
#Dump dictionary into variable names
for key in seldict.keys(): exec("%s=seldict['%s']" % (key,key))
#List of components
midk = []
ign = []
nc = np.arange(len(Kappas))
ncl = np.arange(len(Kappas))
#If user has specified
try:
if options.manacc:
acc = sorted([int(vv) for vv in options.manacc.split(',')])
midk = []
rej = sorted(np.setdiff1d(ncl,acc))
return acc,rej,midk #Add string for ign
except:
pass
"""
Set knobs
"""
LOW_PERC=25
HIGH_PERC=90
EXTEND_FACTOR=2
try:
if nt<100: EXTEND_FACTOR=3
except: pass
RESTRICT_FACTOR=2
if knobargs!='':
for knobarg in ''.join(knobargs).split(','): exec(knobarg)
"""
Do some tallies for no. of significant voxels
"""
countsigZ = Z_clmaps.sum(0)
countsigFS0 = F_S0_clmaps.sum(0)
countsigFR2 = F_R2_clmaps.sum(0)
countnoise = np.zeros(len(nc))
"""
Make table of dice values
"""
dice_table = np.zeros([nc.shape[0],2])
csize = np.max([int(mask.sum()*0.0005)+5,20])
for ii in ncl:
dice_FR2 = dice(Br_clmaps_R2[:,ii],F_R2_clmaps[:,ii])
dice_FS0 = dice(Br_clmaps_S0[:,ii],F_S0_clmaps[:,ii])
dice_table[ii,:] = [dice_FR2,dice_FS0] #step 3a here and above
dice_table[np.isnan(dice_table)]=0
"""
Make table of noise gain
"""
tt_table = np.zeros([len(nc),4])
counts_FR2_Z = np.zeros([len(nc),2])
for ii in nc:
comp_noise_sel = andb([np.abs(Z_maps[:,ii])>1.95,Z_clmaps[:,ii]==0])==2
countnoise[ii] = np.array(comp_noise_sel,dtype=np.int).sum()
noise_FR2_Z = np.log10(np.unique(F_R2_maps[comp_noise_sel,ii]))
signal_FR2_Z = np.log10(np.unique(F_R2_maps[Z_clmaps[:,ii]==1,ii]))
counts_FR2_Z[ii,:] = [len(signal_FR2_Z),len(noise_FR2_Z)]
tt_table[ii,:2] = stats.ttest_ind(signal_FR2_Z,noise_FR2_Z,equal_var=False)
tt_table[np.isnan(tt_table)]=0
"""
Assemble decision table
"""
d_table_rank = np.vstack([len(nc)-rankvec(Kappas), len(nc)-rankvec(dice_table[:,0]), \
len(nc)-rankvec(tt_table[:,0]), rankvec(countnoise), len(nc)-rankvec(countsigFR2) ]).T
d_table_score = d_table_rank.sum(1)
"""
Step 1: Reject anything that's obviously an artifact
a. Estimate a null variance
"""
rej = ncl[andb([Rhos>Kappas,countsigFS0>countsigFR2])>0]
rej = np.union1d(rej,ncl[andb([dice_table[:,1]>dice_table[:,0],varex>np.median(varex)])==2])
rej = np.union1d(rej,ncl[andb([tt_table[ncl,0]<0,varex[ncl]>np.median(varex)])==2])
ncl = np.setdiff1d(ncl,rej)
varex_ub_p = np.median(varex[Kappas>Kappas[getelbow(Kappas)]])
"""
Step 2: Make a guess for what the good components are, in order to estimate good component properties
a. Not outlier variance
b. Kappa>kappa_elbow
c. Rho<Rho_elbow
d. High R2* dice compared to S0 dice
e. Gain of F_R2 in clusters vs noise
f. Estimate a low and high variance
"""
ncls = ncl.copy()
for nn in range(3): ncls = ncls[1:][(varex[ncls][1:]-varex[ncls][:-1])<varex_ub_p] #Step 2a
Kappas_lim = Kappas[Kappas<getfbounds(ne)[-1]]
Rhos_lim = np.array(sorted(Rhos[ncls])[::-1])
Rhos_sorted = np.array(sorted(Rhos)[::-1])
Kappas_elbow = min(Kappas_lim[getelbow(Kappas_lim)],Kappas[getelbow(Kappas)])
Rhos_elbow = np.mean([Rhos_lim[getelbow(Rhos_lim)] , Rhos_sorted[getelbow(Rhos_sorted)], getfbounds(ne)[0]])
good_guess = ncls[andb([Kappas[ncls]>=Kappas_elbow, Rhos[ncls]<Rhos_elbow])==2]
if debug:
import ipdb
ipdb.set_trace()
if len(good_guess)==0:
return [],sorted(rej),[],sorted(np.setdiff1d(nc,rej))
Kappa_rate = (max(Kappas[good_guess])-min(Kappas[good_guess]))/(max(varex[good_guess])-min(varex[good_guess]))
Kappa_ratios = Kappa_rate*varex/Kappas
varex_lb = scoreatpercentile(varex[good_guess],LOW_PERC )
varex_ub = scoreatpercentile(varex[good_guess],HIGH_PERC)
if debug:
import ipdb
ipdb.set_trace()
"""
Step 3: Get rid of midk components - those with higher than max decision score and high variance
"""
max_good_d_score = EXTEND_FACTOR*len(good_guess)*d_table_rank.shape[1]
midkadd = ncl[andb([d_table_score[ncl] > max_good_d_score, varex[ncl] > EXTEND_FACTOR*varex_ub])==2]
midk = np.union1d(midkadd, midk)
ncl = np.setdiff1d(ncl,midk)
"""
Step 4: Find components to ignore
"""
good_guess = np.setdiff1d(good_guess,midk)
loaded = np.union1d(good_guess, ncl[varex[ncl]>varex_lb])
igncand = np.setdiff1d(ncl,loaded)
igncand = np.setdiff1d(igncand, igncand[d_table_score[igncand]<max_good_d_score])
igncand = np.setdiff1d(igncand,igncand[Kappas[igncand]>Kappas_elbow])
ign = np.array(np.union1d(ign,igncand),dtype=np.int)
ncl = np.setdiff1d(ncl,ign)
if debug:
import ipdb
ipdb.set_trace()
"""
Step 5: Scrub the set
"""
if len(ncl)>len(good_guess):
#Recompute the midk steps on the limited set to clean up the tail
d_table_rank = np.vstack([len(ncl)-rankvec(Kappas[ncl]), len(ncl)-rankvec(dice_table[ncl,0]),len(ncl)-rankvec(tt_table[ncl,0]), rankvec(countnoise[ncl]), len(ncl)-rankvec(countsigFR2[ncl])]).T
d_table_score = d_table_rank.sum(1)
num_acc_guess = np.mean([np.sum(andb([Kappas[ncl]>Kappas_elbow,Rhos[ncl]<Rhos_elbow])==2), np.sum(Kappas[ncl]>Kappas_elbow)])
candartA = np.intersect1d(ncl[d_table_score>num_acc_guess*d_table_rank.shape[1]/RESTRICT_FACTOR],ncl[Kappa_ratios[ncl]>EXTEND_FACTOR*2])
midkadd = np.union1d(midkadd,np.intersect1d(candartA,candartA[varex[candartA]>varex_ub*EXTEND_FACTOR]))
candartB = ncl[d_table_score>num_acc_guess*d_table_rank.shape[1]*HIGH_PERC/100.]
midkadd = np.union1d(midkadd,np.intersect1d(candartB,candartB[varex[candartB]>varex_lb*EXTEND_FACTOR]))
midk = np.union1d(midk,midkadd)
#Find comps to ignore
new_varex_lb = scoreatpercentile(varex[ncl[:num_acc_guess]],LOW_PERC)
candart = np.setdiff1d(ncl[d_table_score>num_acc_guess*d_table_rank.shape[1]],midk)
ignadd = np.intersect1d(candart,candart[varex[candart]>new_varex_lb])
ignadd = np.union1d(ignadd,np.intersect1d(ncl[Kappas[ncl]<=Kappas_elbow],ncl[varex[ncl]>new_varex_lb]))
ign = np.setdiff1d(np.union1d(ign,ignadd),midk)
ncl = np.setdiff1d(ncl,np.union1d(midk,ign))
if debug:
import ipdb
ipdb.set_trace()
return list(sorted(ncl)),list(sorted(rej)),list(sorted(midk)),list(sorted(ign))
|
ME-ICA/me-ica
|
meica.libs/select_model.py
|
Python
|
lgpl-2.1
| 12,834
|
"""Configuration and injectable fixtures for Pytest.
Supposed to replace the too-complex current UnitTest-based testing
framework.
DI and functions over complex inheritance hierarchies FTW!
"""
import os
import warnings
pytest_plugins = ["abilian.testing.fixtures"]
if os.environ.get("FAIL_ON_WARNINGS"):
# Don't remove !
# noinspection PyUnresolvedReferences
import pandas
warnings.simplefilter("error")
|
abilian/abilian-core
|
conftest.py
|
Python
|
lgpl-2.1
| 426
|
#
# -*- coding: utf-8 -*-
# Authors: Daniel P. Berrange <berrange@redhat.com>
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import urlparse
import importlib
import re
class Template(object):
def __init__(self,
source, protocol,
hostname, port,
username, password,
path, params):
"""
:param source: template source name
:param protocol: network transport protocol or None
:param hostname: registry hostname or None
:param port: registry port or None
:param username: username or None
:param password: password or None
:param path: template path identifier
:param params: template parameters
docker:///ubuntu
docker+https://index.docker.io/ubuntu?tag=latest
virt-builder:///fedora-20
"""
self.source = source
self.protocol = protocol
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.path = path
self.params = params
if self.params is None:
self.params = {}
@classmethod
def _get_source_impl(klass, source):
try:
p = re.compile("\W")
sourcemod = "".join(p.split(source))
sourcename = "".join([i.capitalize() for i in p.split(source)])
mod = importlib.import_module(
"libvirt_sandbox.image.sources." + sourcemod)
classname = sourcename + "Source"
classimpl = getattr(mod, classname)
return classimpl()
except Exception as e:
print e
raise Exception("Invalid source: '%s'" % source)
def get_source_impl(self):
if self.source == "":
raise Exception("Missing scheme in image URI")
return self._get_source_impl(self.source)
def __repr__(self):
if self.protocol is not None:
scheme = self.source + "+" + self.protocol
else:
scheme = self.source
if self.hostname:
if self.port:
netloc = "%s:%d" % (self.hostname, self.port)
else:
netloc = self.hostname
if self.username:
if self.password:
auth = self.username + ":" + self.password
else:
auth = self.username
netloc = auth + "@" + netloc
else:
netloc = None
query = "&".join([key + "=" + self.params[key] for key in self.params.keys()])
ret = urlparse.urlunparse((scheme, netloc, self.path, None, query, None))
return ret
@classmethod
def from_uri(klass, uri):
o = urlparse.urlparse(uri)
idx = o.scheme.find("+")
if idx == -1:
source = o.scheme
protocol = None
else:
source = o.scheme[0:idx]
protocol = o.scheme[idx + 1:]
query = {}
if o.query is not None and o.query != "":
for param in o.query.split("&"):
(key, val) = param.split("=")
query[key] = val
return klass(source, protocol,
o.hostname, o.port,
o.username, o.password,
o.path, query)
@classmethod
def get_all(klass, source, templatedir):
impl = klass._get_source_impl(source)
return impl.list_templates(templatedir)
|
agx/libvirt-sandbox-debian
|
libvirt-sandbox/image/template.py
|
Python
|
lgpl-2.1
| 4,199
|
import os
import sys
def start():
# enable profling by adding to local conf.yaml "with_internal_profiling: True"
# required: "pip install GreenletProfiler"
# Provides function stats in formats 'pstat', 'callgrind', 'ystat'
# stats are saved at "/var/lib/tendrl/profiling/$NS.publisher_id/last_run_func_stat.$stat_type"
# eg: tendrl-node-agent : /var/lib/tendrl/profiling/node_agent/last_run_func_stat.pstat
import atexit
import GreenletProfiler
GreenletProfiler.set_clock_type('cpu')
GreenletProfiler.start()
sys.stdout.write("\nStarted Tendrl profiling...")
@atexit.register
def finish():
GreenletProfiler.stop()
sys.stdout.write("\nStopped Tendrl profiling...")
stats = GreenletProfiler.get_func_stats()
_base_path = "/var/lib/tendrl/profiling/{0}/".format(NS.publisher_id)
if not os.path.exists(_base_path):
os.makedirs(_base_path)
for stat_type in ['pstat', 'callgrind', 'ystat']:
_stat_file = "last_run_func_stat.{0}".format(stat_type)
_stat_path = os.path.join(_base_path, _stat_file)
stats.save(_stat_path, type=stat_type)
sys.stdout.write("\nSaved Tendrl profiling stats at %s" % _base_path)
|
rishubhjain/commons
|
tendrl/commons/profiler.py
|
Python
|
lgpl-2.1
| 1,214
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
# Copyright (C) 2018 Ben McGinnes <ben@gnupg.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License and the GNU
# Lesser General Public along with this program; if not, see
# <https://www.gnu.org/licenses/>.
import gpg
import os.path
print("""
This script adds a new user ID to an existing key.
The gpg-agent and pinentry are invoked to enter the passphrase.
""")
c = gpg.Context()
homedir = input("Enter the GPG configuration directory path (optional): ")
fpr0 = input("Enter the fingerprint of the key to modify: ")
uid_name = input("Enter the name of the user ID: ")
uid_email = input("Enter the email address of the user ID: ")
uid_cmnt = input("Enter a comment to include (optional): ")
if homedir.startswith("~"):
if os.path.exists(os.path.expanduser(homedir)) is True:
c.home_dir = os.path.expanduser(homedir)
else:
pass
elif os.path.exists(homedir) is True:
c.home_dir = homedir
else:
pass
fpr = "".join(fpr0.split())
if uid_cmnt:
userid = "{0} ({1}) <{2}>".format(uid_name, uid_cmnt, uid_email)
else:
userid = "{0} <{2}>".format(uid_name, uid_email)
key = c.get_key(fpr, secret=True)
c.key_add_uid(key, userid)
|
gpg/gpgme
|
lang/python/examples/howto/add-userid.py
|
Python
|
lgpl-2.1
| 2,100
|
#!/usr/bin/env python
"""Sinusoidal grating calculated in realtime."""
############################
# Import various modules #
############################
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import *
from VisionEgg.FlowControl import Presentation
from VisionEgg.Gratings import *
#####################################
# Initialize OpenGL window/screen #
#####################################
screen = get_default_screen()
######################################
# Create sinusoidal grating object #
######################################
stimulus = SinGrating2D(position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
anchor = 'center',
size = ( 300.0 , 300.0 ),
spatial_freq = 10.0 / screen.size[0], # units of cycles/pixel
temporal_freq_hz = 1.0,
orientation = 45.0 )
###############################################################
# Create viewport - intermediary between stimuli and screen #
###############################################################
viewport = Viewport( screen=screen, stimuli=[stimulus] )
########################################
# Create presentation object and go! #
########################################
p = Presentation(go_duration=(5.0,'seconds'),viewports=[viewport])
p.go()
|
visionegg/visionegg
|
demo/grating.py
|
Python
|
lgpl-2.1
| 1,456
|
# Copyright (c) 2012. Los Alamos National Security, LLC.
# This material was produced under U.S. Government contract DE-AC52-06NA25396
# for Los Alamos National Laboratory (LANL), which is operated by Los Alamos
# National Security, LLC for the U.S. Department of Energy. The U.S. Government
# has rights to use, reproduce, and distribute this software.
# NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY,
# EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.
# If software is modified to produce derivative works, such modified software should
# be clearly marked, so as not to confuse it with the version available from LANL.
# Additionally, this library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License v 2.1 as published by the
# Free Software Foundation. Accordingly, this library is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt for more details.
import sys
from cStringIO import StringIO
import core
# Define output streams
class OutputStream:
def __init__(self):
self.output_str = StringIO()
# define output stream writer method
# sim_obj should be derived from either PyEntity or PyService
def write(self,sim_obj,record_type,*message):
for token in message:
self.output_str.write(str(token));
self.output_str.write(" ");
core.output(sim_obj,record_type,self.output_str.getvalue())
self.output_str.truncate(0)
# create output stream
output = OutputStream()
|
sim-x/simx
|
simx/core/OutputStream.py
|
Python
|
lgpl-2.1
| 1,732
|
#
# Authors: Robert Abram <robert.abram@entpack.com>
#
# Copyright (C) 2015-2017 EntPack
# see file 'LICENSE' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
import gettext
import logging
import multiprocessing
import operator
import os
import signal
import sys
import time
from multiprocessing import Manager
from silentdune_client import modules
from silentdune_client.utils.log import setup_logging
from silentdune_client.utils.misc import node_info_dump
from silentdune_client.utils.configuration import ClientConfiguration
from silentdune_client.utils.daemon import Daemon
_logger = logging.getLogger('sd-client')
def run():
class SDCDaemon(Daemon):
# Node configuration information
_args = None
_config = None
stopProcessing = False
reload = False
t_start = time.time()
t_mod_check = 0
def __init__(self, *args, **kwargs):
self._args = kwargs.pop('args', None)
super(SDCDaemon, self).__init__(*args, **kwargs)
def startup_modules(self):
# Get the path where this file is located.
app_path = os.path.split(os.path.realpath(__file__))[0]
# Get our package path and package name
base_path, package_name = os.path.split(app_path)
# Get loadable module list
mods = modules.__load_modules__(base_path=base_path)
active_mods = [] # List of modules marked as active.
running_mods = [] # List of modules that are really running.
# Set the configuration in each module.
for mod in mods:
mod.set_config(self._config) # Set the configuration information in the module.
# If the module is enabled, add it to the active_mods list.
if mod.module_enabled():
active_mods.append(mod)
else:
_logger.debug('Service: module {0} is disabled.'.format(mod.get_name()))
pmanager = Manager()
mqueue = pmanager.Queue()
# Keep the created child processes.
cprocs = dict() # Dictionary of module process handlers.
cqueues = dict() # Dictionary of module Queue objects.
mlist = list() # List of module names.
# Sort modules by the priority attribute so we can start them in the proper order.
sorted_mods = sorted(active_mods, key=operator.attrgetter('priority'))
for mod in sorted_mods:
_logger.debug('Service: starting module {0}: ({1})'.format(mod.get_name(), mod.priority))
if mod.service_startup() is False:
_logger.critical('Service: module ({0}) failed during startup.'.format(mod.get_name))
# sys.exit(1)
continue
name = mod.get_name()
running_mods.append(mod)
mlist.append(name)
cprocs[name] = None # Add a place holder for the module process.
# Setup thread for modules wanting a processing thread.
if mod.wants_processing_thread:
# _logger.debug('Initializing thread for {0}.'.format(name))
cqueues[name] = multiprocessing.Queue()
cprocs[name] = multiprocessing.Process(
target=mod.process_handler, args=(cqueues[name], mqueue, mlist))
cprocs[name].start()
# Give the firewall manager time to setup the initial rules.
if name == 'SilentDuneClientFirewallModule':
time.sleep(2)
return running_mods, pmanager, mqueue, cprocs, cqueues, mlist
def check_module_state(self, mods, mqueue, cprocs, cqueues, mlist, force=False):
"""
Check each module that has a thread and make sure it is still alive.
:param mods:
:return: False if all threads are running fine, True if failed module.
"""
# We only want to do a check once a minute.
time_t = int((time.time() - self.t_start))
if (time_t > self.t_mod_check and time_t % 60.0 == 0.0) or force:
self.t_mod_check = int((time.time() - self.t_start))
# Check to see that module process threads are still running.
_logger.debug('Service: checking module threads.')
for mod in mods:
name = mod.get_name()
_logger.debug('{0}: checking module thread...'.format(name))
if name in cprocs and cprocs[name]:
if cprocs[name].is_alive():
mod.restart_count = 0
else:
# See if we should attempt to restart this module
if mod.restart_count < 10:
_logger.critical('service: {0} module has unexpectedly stopped.'.format(name))
mod.restart_count += 1
_logger.info('service: attempting to restart module {0} (rc:{1})'.format(
name, mod.restart_count))
if mod.wants_processing_thread:
# _logger.debug('Initializing thread for {0}.'.format(name))
cqueues[name] = multiprocessing.Queue()
cprocs[name] = multiprocessing.Process(
target=mod.process_handler, args=(cqueues[name], mqueue, mlist))
cprocs[name].start()
else:
if mod.restart_count == 10:
_logger.warning(
'service: module restart limit exceeded for {0}, giving up.'.format(
name, mod.restart_count))
mod.restart_count += 1
if mod.restart_count % 60 == 0:
_logger.warning('service: module {0} is dead.'.format(name))
return mods, mqueue, cprocs, cqueues
def terminate_modules(self, mods, cprocs, cqueues):
"""
Shutdown modules.
"""
for mod in mods:
name = mod.get_name()
if cprocs[name] and cprocs[name].is_alive():
_logger.debug('Service: signalling {0} module to stop processing.'.format(name))
cqueues[name].put(modules.QueueTask(modules.TASK_STOP_PROCESSING))
cqueues[name].close()
cqueues[name].join_thread()
cprocs[name].join()
def run(self):
_logger.debug('Service: setting signal handlers.')
# Set SIGTERM signal Handler
signal.signal(signal.SIGTERM, signal_term_handler)
signal.signal(signal.SIGHUP, signal_hup_handler)
_logger.info('Starting Silent Dune firewall.')
# This loop allows for restarting and reloading the configuration after a SIGHUP signal has been received.
while True:
# Reset loop controllers
self.stopProcessing = False
self.reload = False
# Read the local configuration file.
self._config = ClientConfiguration(self._args.config).read_config()
mods, pmanager, mqueue, cprocs, cqueues, mlist = self.startup_modules()
# RUn main loop until we get an external signal.
_logger.debug('Service: starting main processing loop.')
while not self.stopProcessing:
mods, mqueue, cprocs, cqueues = self.check_module_state(mods, mqueue, cprocs, cqueues, mlist)
# Check manage queue for any QueueTask object.
try:
task = mqueue.get_nowait()
_logger.debug('Service: forwarding task ({0}) from {1} to {2}'.format(
task.get_task_id(), task.get_sender(), task.get_dest_name()))
if task:
# Find the destination module and send task to it.
if not task.get_dest_name() or not cqueues[task.get_dest_name()]:
_logger.error('Service: task from {0} has unknown destination.'.format(
task.get_sender()))
cqueues[task.get_dest_name()].put(task)
except:
pass
# Sleep.
time.sleep(0.25)
# Stop all module processing threads
_logger.debug('Service: terminating active modules...')
self.terminate_modules(mods, cprocs, cqueues)
# If we are not reloading, just shutdown.
if not self.reload:
break
_logger.debug('Service: reloading firewall.')
_logger.info('Firewall shutdown complete.')
# exit process
sys.exit(0)
def signal_term_handler(signal, frame):
if not _daemon.stopProcessing:
_logger.debug('Service: received SIGTERM signal.')
_daemon.stopProcessing = True
def signal_hup_handler(signal, frame):
if not _daemon.reload:
_logger.debug('Service: received SIGHUP signal.')
_daemon.reload = True
_daemon.stopProcessing = True
setup_logging(_logger, '--debug' in sys.argv)
os.environ['TMPDIR'] = '/var/run/silentdune'
if not os.path.isdir(os.environ['TMPDIR']):
os.makedirs(os.environ['TMPDIR'])
# Setup i18n - Good for 2.x and 3.x python.
kwargs = {}
if sys.version_info[0] < 3:
kwargs['unicode'] = True
gettext.install('sdc_service', **kwargs)
# Setup program arguments.
parser = argparse.ArgumentParser(prog='sdc-firewall')
parser.add_argument('-c', '--config', help=_('Use config file'), default=None, type=str) # noqa
parser.add_argument('--debug', help=_('Enable debug output'), default=False, action='store_true') # noqa
parser.add_argument('--nodaemon', help=_('Do not daemonize process'), default=False, action='store_true') # noqa
parser.add_argument('action', choices=('start', 'stop', 'restart'), default='')
args = parser.parse_args()
if os.getuid() != 0:
print('sdc-firewall: error: must be run as root')
sys.exit(4)
# --nodaemon only valid with start action
if args.nodaemon and args.action != 'start':
print('sdc-firewall: error: --nodaemon option not valid with stop or restart action')
sys.exit(1)
# Read the local configuration file.
config = ClientConfiguration(args.config).read_config()
# Dump debug information
if args.debug:
node_info_dump(args)
if not config:
_logger.error('Invalid configuration file information, aborting.')
sys.exit(1)
# Do not fork the daemon process, run in foreground. For systemd service or debugging.
if args.nodaemon:
_daemon = SDCDaemon(args=args)
_daemon.run()
else:
# Setup daemon object
_daemon = SDCDaemon(
os.path.split(config.get('settings', 'pidfile'))[0],
'0o700',
os.path.split(config.get('settings', 'pidfile'))[1],
'root',
'root',
'/dev/null',
'/dev/null',
'/dev/null',
args=args
)
if args.action == 'start':
_logger.debug('Starting daemon.')
_daemon.start()
elif args.action == 'stop':
_logger.debug('Stopping daemon.')
_daemon.stop()
elif args.action == 'restart':
_logger.debug('Restarting daemon.')
_daemon.restart()
return 0
# --- Main Program Call ---
if __name__ == '__main__':
sys.exit(run())
|
EntPack/SilentDune-Client
|
silentdune_client/sdc_firewall.py
|
Python
|
lgpl-3.0
| 12,943
|
import pytest
import time
from v8py import JavaScriptTerminated, current_context, new
def test_glob(context):
context.eval('foo = "bar"')
assert context.glob.foo == 'bar'
def test_getattr(context):
context.foo = 'bar'
assert context.foo == 'bar'
assert context.glob.foo == 'bar'
assert context.eval('foo') == 'bar'
def test_getitem(context):
context['foo'] = 'bar'
assert context['foo'] == 'bar'
assert context.glob['foo'] == 'bar'
assert context.eval('foo') == 'bar'
def test_timeout(context):
with pytest.raises(JavaScriptTerminated):
context.eval('for(;;) {}', timeout=0.1)
def test_timeout_property(context_with_timeout):
assert context_with_timeout.timeout == 0.1
start = time.time()
with pytest.raises(JavaScriptTerminated):
context_with_timeout.eval('for(;;) {}')
diff = time.time() - start
assert diff >= 0.1 and diff < 0.2
context_with_timeout.timeout = 0.25
assert context_with_timeout.timeout == 0.25
start = time.time()
with pytest.raises(JavaScriptTerminated):
context_with_timeout.eval('for(;;) {}')
diff = time.time() - start
assert diff >= 0.25 and diff < 0.3
def test_timeout_context_level(context_with_timeout):
with pytest.raises(JavaScriptTerminated):
context_with_timeout.eval('for(;;) {}')
def test_timeout_new(context_with_timeout):
context_with_timeout.eval('function Freeze() { while(true); }')
with pytest.raises(JavaScriptTerminated):
new(context_with_timeout.glob.Freeze)
def test_timeout_call(context_with_timeout):
context_with_timeout.eval('function freeze() { while(true); }')
with pytest.raises(JavaScriptTerminated):
context_with_timeout.glob.freeze()
def test_timeout_proxy(context_with_timeout):
context_with_timeout.eval("""
user = {};
user.testA = 0;
user.testC = 10;
proxy = new Proxy(user, {
get(target, prop) {
if (prop == "testA") while(true);
},
set(target, prop, value) {
if (prop == "testB") while(true);
return false;
},
deleteProperty(target, phrase) {
if (phrase == "testC") while(true);
return false;
}
});
""")
proxy = context_with_timeout.glob.proxy
with pytest.raises(JavaScriptTerminated):
testA = proxy.testA
with pytest.raises(JavaScriptTerminated):
proxy.testB = 5
with pytest.raises(JavaScriptTerminated):
del proxy.testC
def test_expose(context):
def f(): return 'f'
def g(): return 'g'
context.expose(f, g, h=f)
assert context.eval('f()') == 'f'
assert context.eval('g()') == 'g'
assert context.eval('h()') == 'f'
def f(): pass
def test_expose_module(context):
import test_context
context.expose_module(test_context)
assert context.eval('f()') is None
def test_current_context(context):
assert current_context() is None
def f():
assert current_context() is context
context.expose(f)
context.eval('f()')
|
tbodt/v8py
|
tests/test_context.py
|
Python
|
lgpl-3.0
| 3,108
|
################################################################################
"""
DESCRIPTION: Helpers for telecommand wrapper functions.
PACKAGE: spell.lang.helpers.tchelper
PROJECT: SPELL
Copyright (C) 2008, 2015 SES ENGINEERING, Luxembourg S.a.r.l.
This file is part of SPELL.
This library is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License and GNU General Public License (to which the GNU Lesser
General Public License refers) along with this library.
If not, see <http://www.gnu.org/licenses/>.
"""
###############################################################################
#===============================================================================
# SPELL imports
#===============================================================================
from spell.utils.log import *
from spell.lang.constants import *
from spell.lang.modifiers import *
from spell.lib.exception import *
from spell.lib.adapter.utctime import *
from spell.lang.functions import *
from spell.lib.adapter.constants.core import KEY_SEPARATOR
from spell.lib.adapter.tc_item import TcItemClass
from spell.lib.adapter.constants.notification import *
from spell.lib.registry import *
#===============================================================================
# Local imports
#===============================================================================
from basehelper import *
#===============================================================================
# System imports
#===============================================================================
import time,sys
################################################################################
class Send_Helper(WrapperHelper):
"""
DESCRIPTION:
Helper for the SendAndVerify wrapper.
"""
_isGroup = False
_isSequence = False
_cmdName = None
_cmdDef = None
_cmdArgs = None
# This flag is used in case of failures. The user may want to resend the
# command AND verify the parameters, or only repeat the parameters
# verification.
__doSendCommand = True
# This flag is used in case of failures. The user may want to resend the
# command and verify the parameters, but not re-adjust the limits
__doAdjustLimits = True
__doAdjustLimitsP = True
__doCheckTelemetry = True
# True if adjusting limits is feasible
__canAdjustLimits = False
# Holds the current stage of the function (TC,TM,LIM)
__section = None
__actionTaken = None
__verifyCondition = None
# Stores the original OnFailure config
__originalOnFailure = None
#===========================================================================
def __init__(self):
WrapperHelper.__init__(self,"TC")
self._opName = "Send"
self._reset()
#===========================================================================
def _initializeActionStrings(self):
WrapperHelper._initializeActionStrings(self)
self._setActionString( ACTION_REPEAT , "Repeat the whole Send() operation")
self._setActionString( ACTION_RECHECK, "Repeat the telemetry verification")
self._setActionString( ACTION_RESEND , "Send the command(s) again")
self._setActionString( ACTION_SKIP , "Skip the command injection and proceed with telemetry verification")
self._setActionString( ACTION_CANCEL , "Skip the whole operation and proceed with next SPELL instruction")
#===========================================================================
def _reset(self):
self._isGroup = False
self.__originalOnFailure = None
self._isSequence = False
self._cmdName = None
self._cmdDef = None
self._cmdArgs = None
self.__doSendCommand = True
self.__doAdjustLimits = False
self.__doAdjustLimitsP = False
self.__canAdjustLimits = False
self.__section = 'TC'
self.__actionTaken = None
self.__verifyCondition = None
self.__doCheckTelemetry = False
#===========================================================================
def _obtainVerificationDefinition(self,*args,**kargs):
# Obtain verification steps
if self._cmdArgs is not None and len(args)>=3:
self.__verifyCondition = args[3]
if type(self.__verifyCondition) != list:
raise SyntaxException("Expected a list of verification steps")
elif self._cmdArgs is None and len(args)>=2:
self.__verifyCondition = args[2]
if type(self.__verifyCondition) != list:
raise SyntaxException("Expected a list of verification steps")
elif kargs.has_key('verify'):
self.__verifyCondition = kargs.pop('verify')
else:
self.__verifyCondition = None
if self.__verifyCondition:
self.__doCheckTelemetry = True
#===========================================================================
def _obtainCommandDefinition(self, *args, **kargs):
LOG("Obtaining command definition", level = LOG_LANG)
if len(args) == 0:
LOG("No positional arguments", level = LOG_LANG)
# If no positional arguments are given, the command shall be
# given with these keywords
if not kargs.has_key('command') and\
not kargs.has_key('sequence') and\
not kargs.has_key('group'):
raise SyntaxException("Expected a command item or name")
else:
if kargs.has_key('command'):
LOG("Using keyword argument command", level = LOG_LANG)
self._isSequence = False
self._isGroup = False
self._cmdDef = kargs.pop('command')
if type(self._cmdDef)==list:
raise SyntaxException("Cannot accept list as single command")
elif kargs.has_key('group'):
LOG("Using keyword argument group", level = LOG_LANG)
self._isSequence = False
self._isGroup = True
self._cmdDef = kargs.pop('group')
if type(self._cmdDef)!=list:
raise SyntaxException("Shall provide a command list")
else:
LOG("Using keyword argument sequence", level = LOG_LANG)
self._isSequence = True
self._isGroup = False
self._cmdDef = kargs.pop('sequence')
if type(self._cmdDef)==list:
raise SyntaxException("Cannot accept command list as a sequence")
else:
raise SyntaxException("Expected keyword: command, group or sequence")
# Create the command item if necessary
if type(self._cmdDef)==str:
self._cmdDef = REGISTRY['TC'][self._cmdDef]
# Do it for each item in the list, if it is the case
elif type(self._cmdDef)==list:
cpy = []
for item in self._cmdDef:
if type(item)==str:
cpy += [REGISTRY['TC'][item]]
elif isinstance(item,TcItemClass):
cpy += [item]
else:
raise SyntaxException("Unexpected item in group: " + repr(item))
# Obtain the string representation of the entity being sent
if type(self._cmdDef)==list:
self._cmdName = []
for item in self._cmdDef:
if type(item)==str:
self._cmdName += [item]
# Must be tc item, the check was done already
else:
desc = item.desc()
if desc != "": desc = ": " + desc
self._cmdName += [item.name() + desc]
# The else case is already controlled
else:
desc = self._cmdDef.desc()
if desc != "": desc = ": " + desc
self._cmdName = self._cmdDef.name() + desc
LOG("Got command definition: " + str(self._cmdName), level = LOG_LANG)
LOG("Sequence flag: " + str(self._isSequence), level = LOG_LANG)
LOG("Group flag : " + str(self._isGroup), level = LOG_LANG)
# Copy the flags to config
self.addConfig(Sequence,self._isSequence)
#===========================================================================
def _checkCommandDefinition(self):
if not isinstance(self._cmdDef,TcItemClass) and\
not type(self._cmdDef) == str and\
not type(self._cmdDef) == list:
raise SyntaxException("Expected a TC name, TC item or TC list")
#===========================================================================
def _obtainCommandArguments(self, *args, **kargs):
# 3. Obtain the arguments
self._cmdArgs = None
if not self._isGroup:
LOG("Getting arguments for single command", level = LOG_LANG)
if kargs.has_key('args'):
LOG("Using keyword args", level = LOG_LANG)
self._cmdArgs = kargs.pop('args')
else:
LOG("No arguments found", level = LOG_LANG)
self._cmdArgs = None
# Using a group and args kword is not accepted (??)
else:
if kargs.has_key('args'):
raise SyntaxException("Cannot use args with TC lists")
#===========================================================================
def _parseCommandArguments(self):
# 6. Parse arguments if any
if self._cmdArgs is not None:
if len(self._cmdArgs)==0:
raise SyntaxException("Cannot accept empty argument list")
# Clear any previously existing argument
self._cmdDef.clear()
for argument in self._cmdArgs:
if type(argument)!=list:
raise SyntaxException("Malformed argument")
if len(argument)<1 or type(argument[0])!=str:
raise SyntaxException("Malformed argument")
argName = argument[0]
argument = argument[1:]
LOG("Set argument: " + str(argName) + "=" + repr(argument), level = LOG_LANG)
self._cmdDef[argName] = argument
#===========================================================================
def _checkCommandArguments(self):
if not self._cmdArgs is None and type(self._cmdArgs)!=list:
raise SyntaxException("Expected an argument list")
#===========================================================================
def _doPreOperation(self, *args, **kargs ):
#-----------------------------------------------------------------------
# Parse the command information
#-----------------------------------------------------------------------
# 1. Obtain the command/sequence
self._obtainCommandDefinition(*args,**kargs)
# 2. Check the command correctness
self._checkCommandDefinition()
# 3. Obtain tc arguments
self._obtainCommandArguments(*args,**kargs)
# 4. Check arguments correctness
self._checkCommandArguments()
# 5. Parse command arguments
self._parseCommandArguments()
# Some text messages, not needed if Confirm is activated as the confirmation
# mechanism already displays the command
if (not self.hasConfig(Confirm)) or (self.getConfig(Confirm)!=True):
if self._isSequence:
self._write("Sending sequence " + repr(self._cmdName))
elif self._isGroup:
self._write("Sending group of " + str(len(self._cmdDef)) + " element(s)")
for name in self._cmdName:
self._write(" - " + repr(name))
else:
self._write("Sending command " + repr(self._cmdName))
#-----------------------------------------------------------------------
# Parse the telemetry information
#-----------------------------------------------------------------------
self._obtainVerificationDefinition(*args,**kargs)
if type(self.__verifyCondition)==list:
if type(self.__verifyCondition[0])!=list:
self.__verifyCondition = [self.__verifyCondition]
#-----------------------------------------------------------------------
# Avoid alarms if the conditions are ok
#-----------------------------------------------------------------------
self.__doAdjustLimits = self.hasConfig(AdjLimits) and \
type(self.__verifyCondition)==list and \
self.getConfig(AdjLimits)==True
self.__doAdjustLimitsP = self.__doAdjustLimits
self.__canAdjustLimits = self.__doAdjustLimits
# Store information for possible failures
self.setFailureInfo("TM", self._cmdDef)
#==========================================================================
def _buildCommandDescription(self):
msg = "Please confirm execution of the following "
if self._isGroup:
msg += "command group:"
for cmd in self._cmdDef:
msg += "\n Command: " + cmd.name()
if (cmd.desc().strip() != ""): msg += " ('" + cmd.desc() + "')"
if len(cmd._getParams())>0:
msg += "\n Arguments:"
for param in cmd._getParams():
msg += "\n - " + repr(param.name) + " = " + str(param.value.get()) + " " + str(param.value.units())
elif self._isSequence:
msg += "sequence: " + self._cmdDef.name()
if (self._cmdDef.desc().strip() != ""): msg += " ('" + self._cmdDef.desc() + "')"
if len(self._cmdDef.getElements())>0:
msg += "\n Elements:"
for element in self._cmdDef.getElements():
msg += "\n - " + repr(element)
if len(self._cmdDef._getParams())>0:
msg += "\n Arguments:"
for param in self._cmdDef._getParams():
msg += "\n - " + repr(param.name) + " = " + str(param.value.get()) + " " + str(param.value.units())
else:
msg += "command: " + self._cmdDef.name()
if (self._cmdDef.desc().strip() != ""): msg += " ('" + self._cmdDef.desc() + "')"
if len(self._cmdDef._getParams())>0:
msg += "\n Arguments:"
for param in self._cmdDef._getParams():
msg += "\n - " + repr(param.name) + " = " + str(param.value.get()) + " " + str(param.value.units())
return msg
#===========================================================================
def _doOperation(self, *args, **kargs ):
repeat = False
self.__originalOnFailure = self.getConfig(OnFailure)
#-----------------------------------------------------------------------
# CONFIRM SECTION
#-----------------------------------------------------------------------
# Confirm execution if needed
confirm = REGISTRY['TC'].shouldForceTcConfirm()
confirm = confirm or self.hasConfig(Confirm) and self.getConfig(Confirm) == True
if confirm:
self.__section = 'CONFIRM'
msg = self._buildCommandDescription()
if not self._prompt(msg, [], {Type:OK_CANCEL}):
return [ False, False, NOTIF_STATUS_CL, "Cancelled by user" ]
#-----------------------------------------------------------------------
# LIMIT ADJUSTMENT SECTION
#-----------------------------------------------------------------------
if self.__canAdjustLimits and self.__doAdjustLimitsP:
self.__section = 'LIM1'
# We don't allow resend nor recheck, only repeat
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RESEND))
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RECHECK))
# Adapt the action messages
self._setActionString( ACTION_REPEAT , "Retry disabling limits")
self._setActionString( ACTION_SKIP , "Skip limits adjustment and command injection. Proceed with telemetry verification")
self._setActionString( ACTION_CANCEL , "Skip the whole Send() operation and return failure (False)")
# Store information for possible failures
self.setFailureInfo("TM", self.__verifyCondition)
# We need to enlarge the limit range to the maximum to
# avoid alarms (analog parameters) or to allow any
# status value (status parameters)
REGISTRY['CIF'].write("Avoiding alarms by adjusting limits before TC execution")
for condition in self.__verifyCondition:
paramName = condition[0]
paramValue = condition[2]
operator = condition[1]
# Do not adjust limits if the condition config dict says the contrary
if type(condition[-1])==dict:
itemCfg = condition[-1]
if itemCfg.has_key(AdjLimits) and itemCfg[AdjLimits] == False: continue
# Do not adjust limits if eq operator is not used
if operator != eq: continue
# Proceed with limit adjustment
if type(paramValue)==str: #Status parameters
# First get the currentValue
paramItem = REGISTRY['TM'][paramName]
paramItem.refresh( Wait = False )
currentValue = paramItem.eng( Wait = False )
# Build the expected value list
if (currentValue != paramValue):
expectedValues = currentValue + ", " + paramValue
else:
continue
limits = {}
limits[Expected] = expectedValues
# Adjust the limits accordingly
REGISTRY['CIF'].write(" - " + repr(paramName) + " adjusting to expected values: " + expectedValues)
else: #Analog parameters
# Set the limit to the maximum value
limits = {}
limits[LoRed] = -1.7e+308
limits[LoYel] = -1.7e+308
limits[HiRed] = 1.7e+308
limits[HiYel] = 1.7e+308
REGISTRY['CIF'].write(" - " + repr(paramName) + " enlarged analog limits to the maximum")
REGISTRY['TM'].setLimits( paramName, limits, config = self.getConfig() )
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
#-----------------------------------------------------------------------
# COMMAND SECTION
#-----------------------------------------------------------------------
# If we are repeating the operation due to an user action, check
# the flag to see if we have to resend the command
if self.__doSendCommand:
self.__section = 'TC'
# Store information for possible failures
self.setFailureInfo("TC", self._cmdDef)
# We do not allow recheck or repeat yet, only resend
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~REPEAT))
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RECHECK))
# Adapt the action messages
if self._isGroup:
self._setActionString( ACTION_RESEND , "Send the whole command group again")
elif self._isSequence:
self._setActionString( ACTION_RESEND , "Send the command sequence again")
else:
self._setActionString( ACTION_RESEND , "Send the command again")
if self.__verifyCondition:
self._setActionString( ACTION_SKIP , "Skip the command injection. Proceed with telemetry verification")
else:
self._setActionString( ACTION_SKIP , "Skip the command injection and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the whole Send() operation and return failure (False)")
try:
# Actually send the command
tcIsSuccess = REGISTRY['TC'].send(self._cmdDef, config = self.getConfig() )
except DriverException,ex:
raise ex
if tcIsSuccess:
self._write("Execution success")
else:
self._write("Execution failed", {Severity:ERROR} )
raise DriverException("Command execution failed")
else:
tcIsSuccess = True
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
#-----------------------------------------------------------------------
# TELEMETRY SECTION
#-----------------------------------------------------------------------
# If there are verification sets, verify them
if self.__doCheckTelemetry and self.__verifyCondition and tcIsSuccess:
self.__section = 'TM'
# Store information for possible failures
self.setFailureInfo("TM", self.__verifyCondition)
# Adapt the action messages
self._setActionString( ACTION_RECHECK, "Repeat the telemetry verification")
self._setActionString( ACTION_SKIP , "Skip the telemetry verification and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the telemetry verification and return failure (False)")
# Wait some time before verifying if requested
if self.hasConfig(Delay):
delay = self.getConfig(Delay)
if delay:
from spell.lang.functions import WaitFor
self._write("Waiting "+ str(delay) + " seconds before TM verification", {Severity:INFORMATION})
WaitFor(delay, Notify=False, Verbosity=999)
# We dont allow repeat here but allow recheck at least
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~REPEAT))
# Adapt the action messages
self._setActionString( ACTION_RECHECK, "Repeat the telemetry verification")
self._setActionString( ACTION_SKIP , "Skip the telemetry verification and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the telemetry verification and return failure (False)")
# Perform verification
tmIsSuccess = REGISTRY['TM'].verify(self.__verifyCondition, config=self.getConfig())
#repeat, tmIsSuccess = self._processActionOnResult(tmIsSuccess)
else:
tmIsSuccess = True
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
#-----------------------------------------------------------------------
# ADJUST LIMITS SECTION
#-----------------------------------------------------------------------
if tmIsSuccess and self.__canAdjustLimits and self.__doAdjustLimits:
self.__section = "LIM2"
# Store information for possible failures
self.setFailureInfo("TM", self.__verifyCondition)
# We dont allow recheck/resend for this, only repeat if the user wants
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RESEND))
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RECHECK))
# Adapt the action messages
self._setActionString( ACTION_REPEAT , "Repeat the final limit adjustment")
self._setActionString( ACTION_SKIP , "Skip the final limit adjustment and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the final limit adjustment and return failure (False)")
REGISTRY['CIF'].write("Adjusting limit definitions after TC execution")
for condition in self.__verifyCondition:
paramName = condition[0]
paramValue = condition[2]
operator = condition[1]
# Do not adjust limits if not eq operator used
if operator != eq: continue
# Do not adjust limits if the condition config dict says the contrary
conditionTolerance = None
if type(condition[-1])==dict:
itemCfg = condition[-1]
conditionTolerance = itemCfg.get(Tolerance)
if itemCfg.has_key(AdjLimits) and itemCfg[AdjLimits] == False: continue
if type(paramValue)==str: #Status parameters
# Build the expected value list
limits = {}
limits[Expected] = paramValue
# Adjust the limits accordingly
REGISTRY['CIF'].write(" - " + repr(paramName) + " adjusting to expected value: " + paramValue)
else: #Analog parameters
# if the condition has its own tolerance, use it
if conditionTolerance:
tolerance = conditionTolerance
else:
tolerance = self.getConfig(Tolerance)
if tolerance is None: tolerance = 0.1
limits = {}
limits[LoRed] = paramValue - tolerance
limits[LoYel] = paramValue - tolerance
limits[HiRed] = paramValue + tolerance
limits[HiYel] = paramValue + tolerance
REGISTRY['CIF'].write(" - " + repr(paramName) + " limits set to ( " + str(limits[LoRed]) +
" , " + str(limits[LoYel]) + " | " + str(limits[HiYel]) + " , " + str(limits[HiRed]) + " )")
REGISTRY['CIF'].write(" Tolerance used: " + str(tolerance))
REGISTRY['TM'].setLimits( paramName, limits, config = self.getConfig() )
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
# Depending on the result of both operations we decide to repeat the whole
# or part of the operation.
if self.__verifyCondition is None:
result = tcIsSuccess
else:
result = tcIsSuccess and tmIsSuccess
if self.__actionTaken in ["SKIP","CANCEL"]:
opStatus = NOTIF_STATUS_SP
elif result:
opStatus = NOTIF_STATUS_OK
else:
opStatus = NOTIF_STATUS_FL
return [ repeat, result, opStatus, "" ]
#===========================================================================
def _driverUpdateActionList(self, theOptions, exception = None):
if self.__section == "TC":
return REGISTRY['TC'].driverUpdateActionList( theOptions, exception )
return theOptions
#===========================================================================
def _driverPerformAction(self, code):
if self.__section == "TC":
return REGISTRY['TC'].driverPerformAction(code)
return None # [False,False]
#===========================================================================
def _driverBeforeAction(self, code):
if self.__section == "TC":
return REGISTRY['TC'].driverBeforeAction(code)
#===========================================================================
def _driverAfterAction(self, code):
if self.__section == "TC":
return REGISTRY['TC'].driverAfterAction(code)
#===========================================================================
def _getExceptionFlag(self, exception ):
# Special case for verify, OnFalse
if exception.reason.find("evaluated to False")>0:
return self.getConfig(PromptUser)
else:
return self.getConfig(PromptFailure)
#===========================================================================
def _getBehaviorOptions(self, exception):
# If the OnFailure parameter is not set, get the default behavior.
# This default behavior depends on the particular primitive being
# used, so it is implemented in child wrappers.
if self.getConfig(OnFailure) is None:
LOG("Using defaults")
self.setConfig({OnFailure:ABORT})
# Special case for verify, OnFalse
if exception and (exception.reason.find("evaluated to False")>0):
optionRef = self.getConfig(OnFalse)
else:
optionRef = self.getConfig(OnFailure)
# Get the desired behavior
theOptions = self._getActionList( optionRef, exception )
return theOptions
#===========================================================================
def _doSkip(self):
self.__actionTaken = "SKIP"
if self.getConfig(PromptUser)==True:
self._write("Operation skipped", {Severity:WARNING} )
# By skipping the operation, if we are in LIM1 or TC stages we still
# want to verify TM
if self.__section in ['LIM1','TC']:
self.__doAdjustLimitsP = False
self.__doAdjustLimits = False
self.__doSendCommand = False
self.__doCheckTelemetry = True
return [True,False]
elif self.__section == 'TM':
self.__doAdjustLimitsP = False
self.__doAdjustLimits = False
self.__doSendCommand = False
self.__doCheckTelemetry = False
return [True,False]
else:
return [False,True]
#===========================================================================
def _doCancel(self):
self._write("Operation cancelled", {Severity:WARNING} )
self.__actionTaken = "CANCEL"
return [False,False]
#===========================================================================
def _doResend(self):
self.__actionTaken = "RESEND"
if self._isSequence:
self._write("Retrying sequence execution", {Severity:WARNING} )
elif self._isGroup:
self._write("Retrying group execution", {Severity:WARNING} )
else:
self._write("Retrying command execution", {Severity:WARNING} )
self.__doSendCommand = True
self.__doAdjustLimitsP = False
self.__doCheckTelemetry = True
return [True,False]
#===========================================================================
def _doRepeat(self):
self.__actionTaken = "CANCEL"
self._write("Retry whole operation", {Severity:WARNING} )
self.__doAdjustLimits = True
self.__doAdjustLimitsP = True
self.__doSendCommand = True
self.__doCheckTelemetry = True
return [True,False]
#===========================================================================
def _doRecheck(self):
self.__actionTaken = "RECHECK"
self._write("Retry verification block", {Severity:WARNING} )
self.__doSendCommand = False
self.__doAdjustLimitsP = False
self.__doAdjustLimits = True
self.__doCheckTelemetry = True
return [True,False]
################################################################################
class BuildTC_Helper(WrapperHelper):
"""
DESCRIPTION:
Helper for the Build TC wrapper.
"""
_tcName = None
_tcArguments = []
_tcItem = None
_isSequence = False
#===========================================================================
def __init__(self):
WrapperHelper.__init__(self, "TC")
self._tcName = None
self._tcArguments = []
self._opName = "TC build"
self._tcItem = None
self._isSequence = False
#===========================================================================
def _obtainCommandName(self, *args, **kargs ):
if len(args)==1:
if type(args[0])!=str:
raise SyntaxException("Expected a command name")
self._tcName = args[0]
elif len(args)==0:
if kargs.has_key('command'):
self._tcName = kargs.get('command')
elif kargs.has_key('sequence'):
self._tcName = kargs.get('sequence')
self._isSequence = True
else:
raise SyntaxException("Expected a command or sequence")
else:
raise SyntaxException("Expected a command name")
#===========================================================================
def _obtainCommandArguments(self, *args, **kargs ):
if len(args)<=1:
if kargs.has_key('args'):
self._tcArguments = kargs.get('args')
else:
if type(args[1])!=list:
raise SyntaxException("Expected a list of arguments")
self._tcArguments = args[1]
#===========================================================================
def _doPreOperation(self, *args, **kargs ):
self._obtainCommandName(*args,**kargs)
self._obtainCommandArguments(*args,**kargs)
# Store information for possible failures
self.setFailureInfo("TC", self._tcName)
#===========================================================================
def _doOperation(self, *args, **kargs ):
self._setActionString( ACTION_SKIP , "Skip the command construction and return None")
self._setActionString( ACTION_REPEAT , "Repeat the command construction")
if self._isSequence:
self._write("Building sequence " + repr(self._tcName))
else:
self._write("Building command " + repr(self._tcName))
# Create the item
LOG("Obtaining TC entity: " + repr(self._tcName), level = LOG_LANG)
self._tcItem = REGISTRY['TC'][self._tcName]
self._tcItem.clear()
self._tcItem.configure(self.getConfig())
if self._isSequence:
self._tcItem.addConfig(Sequence,True)
# Assign the arguments
for tcArg in self._tcArguments:
LOG("Parsed TC argument: " + repr(tcArg[0]), level = LOG_LANG)
LOG("Argument config : " + repr(tcArg[1:]), level = LOG_LANG)
self._tcItem[ tcArg[0] ] = tcArg[1:]
self._write(" - Argument " + repr(tcArg[0]) + " value " + repr(tcArg[1:]))
return [False,self._tcItem,NOTIF_STATUS_OK,""]
#===========================================================================
def _doSkip(self):
self._write("Skipping command construction", {Severity:WARNING} )
self._write("CAUTION: procedure logic may become invalid!", {Severity:WARNING} )
self._tcItem = None
return [False,None]
#===========================================================================
def _doRepeat(self):
self._write("Repeat command construction", {Severity:WARNING} )
return [True,False]
################################################################################
class BuildMemoryLoad_Helper(BuildTC_Helper):
"""
DESCRIPTION:
Helper for the BuildMemoryLoad wrapper.
"""
#===========================================================================
def __init__(self):
BuildTC_Helper.__init__(self)
#===========================================================================
def _doOperation(self, *args, **kargs ):
self._setActionString( ACTION_SKIP , "Skip the memory load construction and return None")
self._setActionString( ACTION_REPEAT , "Repeat the memory load construction")
repeat, tcItem, status, msg = super(BuildMemoryLoad_Helper, self)._doOperation(args,kargs);
tcItem.addConfig('MemoryLoad',True)
return [repeat,tcItem,status,msg]
|
Spacecraft-Code/SPELL
|
src/spell/spell/lang/helpers/tchelper.py
|
Python
|
lgpl-3.0
| 37,359
|
import sys
sys.path.insert(0, "..")
import logging
import time
try:
from IPython import embed
except ImportError:
import code
def embed():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
from opcua import Client
from opcua import ua
class SubHandler(object):
"""
Subscription Handler. To receive events from server for a subscription
data_change and event methods are called directly from receiving thread.
Do not do expensive, slow or network operation there. Create another
thread if you need to do such a thing
"""
def datachange_notification(self, node, val, data):
print("Python: New data change event", node, val)
def event_notification(self, event):
print("Python: New event", event)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARN)
#logger = logging.getLogger("KeepAlive")
#logger.setLevel(logging.DEBUG)
client = Client("opc.tcp://localhost:4840/freeopcua/server/")
# client = Client("opc.tcp://admin@localhost:4840/freeopcua/server/") #connect using a user
try:
client.connect()
client.load_type_definitions() # load definition of server specific structures/extension objects
# Client has a few methods to get proxy to UA nodes that should always be in address space such as Root or Objects
root = client.get_root_node()
print("Root node is: ", root)
objects = client.get_objects_node()
print("Objects node is: ", objects)
# Node objects have methods to read and write node attributes as well as browse or populate address space
print("Children of root are: ", root.get_children())
# get a specific node knowing its node id
#var = client.get_node(ua.NodeId(1002, 2))
#var = client.get_node("ns=3;i=2002")
#print(var)
#var.get_data_value() # get value of node as a DataValue object
#var.get_value() # get value of node as a python builtin
#var.set_value(ua.Variant([23], ua.VariantType.Int64)) #set node value using explicit data type
#var.set_value(3.9) # set node value using implicit data type
# Now getting a variable node using its browse path
myvar = root.get_child(["0:Objects", "2:MyObject", "2:MyVariable"])
obj = root.get_child(["0:Objects", "2:MyObject"])
print("myvar is: ", myvar)
# subscribing to a variable node
handler = SubHandler()
sub = client.create_subscription(500, handler)
handle = sub.subscribe_data_change(myvar)
time.sleep(0.1)
# we can also subscribe to events from server
sub.subscribe_events()
# sub.unsubscribe(handle)
# sub.delete()
# calling a method on server
res = obj.call_method("2:multiply", 3, "klk")
print("method result is: ", res)
embed()
finally:
client.disconnect()
|
iirob/python-opcua
|
examples/client-example.py
|
Python
|
lgpl-3.0
| 3,005
|
import abc
import math
import numpy
from scipy.io import loadmat
from Configs import Configs
from Paths import Paths
from datasets.LupusFilter import VisitsFilter, \
NullFIlter, TemporalSpanFilter
from infos.Info import Info, NullInfo
from infos.InfoElement import SimpleDescription, PrintableInfoElement
from infos.InfoGroup import InfoGroup
from infos.InfoList import InfoList
from datasets.Batch import Batch
from datasets.Dataset import Dataset
import os
# TO CHECK normalization, selected features
class BuildBatchStrategy(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def build_batch(self, patience):
"""build up a batch according to the strategy"""
@abc.abstractmethod
def keys(self) -> list:
"""return the keys of the sets to be used with this strategy"""
@abc.abstractmethod
def n_in(self, num_pat_feats):
"""returns the number of features each batch is composed of given the number of features
of each visits of a patience"""
class PerVisitTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats
def __init__(self):
pass
def keys(self) -> list:
return ['early_pos', 'late_pos', 'neg']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
n_visits = len(targets)
mask = numpy.ones_like(feats)
return feats[0:n_visits - 1, :], targets[1:n_visits, :], mask
class PerPatienceTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats
def keys(self) -> list:
return ['neg', 'late_pos']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
non_zero_indexes = numpy.where(targets > 0)[0]
if len(non_zero_indexes) > 0:
first_positive_idx = numpy.min(non_zero_indexes)
assert (first_positive_idx > 0)
outputs = numpy.zeros(shape=(first_positive_idx, 1), dtype=Configs.floatType)
outputs[-1] = 1
inputs = feats[0:first_positive_idx, :]
else:
inputs = feats[0:-1, :]
outputs = targets[0:-1, :]
mask = numpy.zeros_like(outputs)
mask[-1, :] = 1
return inputs, outputs, mask
class TemporalDifferenceTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats
def keys(self) -> list:
return ['neg', 'late_pos']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
outputs = numpy.zeros(shape=(1, 1), dtype=Configs.floatType)
inputs = numpy.zeros(shape=(1, feats.shape[1]), dtype=Configs.floatType)
non_zero_indexes = numpy.where(targets > 0)[0]
if len(non_zero_indexes) > 0:
first_positive_idx = numpy.min(non_zero_indexes)
assert (first_positive_idx > 0)
outputs[0, :] = 1
inputs[0, :] = feats[first_positive_idx - 1, :] - feats[0, :]
else:
inputs[0, :] = feats[-2, :] - feats[0, :]
outputs[0, :] = targets[-2, :]
mask = numpy.zeros_like(outputs)
mask[0, :] = 1
return inputs, outputs, mask
class LastAndFirstVisitsTargets(BuildBatchStrategy):
def n_in(self, num_pat_feats):
return num_pat_feats * 2
def keys(self) -> list:
return ['neg', 'late_pos']
def build_batch(self, patience):
feats = patience['features']
targets = patience['targets']
outputs = numpy.zeros(shape=(1, 1), dtype=Configs.floatType)
inputs = numpy.zeros(shape=(1, feats.shape[1] * 2), dtype=Configs.floatType)
non_zero_indexes = numpy.where(targets > 0)[0]
if len(non_zero_indexes) > 0:
first_positive_idx = numpy.min(non_zero_indexes)
assert (first_positive_idx > 0)
outputs[0, :] = 1
inputs[0, :] = numpy.concatenate((feats[first_positive_idx - 1, :], feats[0, :]), axis=0)
else:
inputs[0, :] = numpy.concatenate((feats[-2, :], feats[0, :]), axis=0)
outputs[0, :] = targets[-2, :]
mask = numpy.zeros_like(outputs)
mask[0, :] = 1
return inputs, outputs, mask
class LupusDataset(Dataset):
@staticmethod
def parse_mat(mat_file: str, feature_names:list=None):
mat_obj = loadmat(mat_file)
positive_patients = mat_obj['pazientiPositivi']
negative_patients = mat_obj['pazientiNegativi']
features_struct = mat_obj['selectedFeatures']
# features_struct = mat_obj['featuresVip7']
features_names = LupusDataset.__find_features_names(features_struct) if feature_names is None else feature_names
# features_names = ['DNA', 'arthritis', 'c3level', 'c4level', 'hematological', 'skinrash', 'sledai2kInferred']
#
# features_names = ['APS', 'DNA', 'FM', 'Hashimoto', 'MyasteniaGravis', 'SdS',
# 'arterialthrombosis', 'arthritis', 'c3level', 'c4level', 'dislipidemia', 'hcv',
# 'hematological', 'hypertension', 'hypothyroidism', 'kidney', 'mthfr', 'npsle',
# 'pregnancypathology', 'serositis', 'sex', 'skinrash', 'sledai2kInferred',
# 'venousthrombosis']
# first 10 in ranking
# features_names = ["c4level", "c3level", "arthritis", "arterialthrombosis", "SdS", "MyasteniaGravis", "Hashimoto", "FM", "DNA", "APS"]
# last 10 in ranking
# features_names = ["venousthrombosis", "sledai2kInferred", "skinrash", "sex", "serositis", "pregnancypathology",
# "npsle", "mthfr", "kidney", "hypothyroidism"]
# last 10 without sledai
# features_names = ["venousthrombosis", "serositis", "pregnancypathology", "mthfr", "kidney", "hypothyroidism"]
# in-between
# features_names = ["hypertension", "hematological", "hcv", "dislipidemia"]
# features_names = ['APS' 'DNA' 'FM' 'Hashimoto' 'MyasteniaGravis' 'SdS' 'age'
# 'arterialthrombosis' 'arthritis' 'c3level' 'c4level' 'dislipidemia' 'hcv'
# 'hematological' 'hypertension' 'hypothyroidism' 'kidney' 'mthfr' 'npsle'
# 'pregnancypathology' 'serositis' 'sex' 'skinrash' 'sledai2kInferred'
# 'venousthrombosis' 'yearOfDisease']
# features_names = ['age', 'MyasteniaGravis', 'arthritis', 'c3level', 'c4level', 'hematological', 'skinrash', 'sledai2kInferred']
return positive_patients, negative_patients, features_names
@staticmethod
def load_mat(mat_file: str, visit_selector: VisitsFilter = NullFIlter(), seed=Configs.seed, features_names:list=None):
positive_patients, negative_patients, features_names = LupusDataset.parse_mat(mat_file=mat_file, feature_names=features_names)
data = numpy.concatenate((positive_patients, negative_patients), axis=0)
features_normalizations = LupusDataset.__find_normalization_factors(features_names, data)
result = LupusDataset.__process_patients(data, features_names, features_normalizations,
visit_selector=visit_selector)
####################
# folder = '/home/giulio'
# os.makedirs(folder, exist_ok=True)
# prefix = folder + '/'
# file = open(prefix + "visits_pos.txt", "w")
# exs = result["late_pos"]
# for i in range(len(exs)):
# pat = LupusDataset.__get_patience_descr(exs[i]) + "\n"
# file.write(pat)
# file.close()
####################
early_positives = result["early_pos"]
late_positives = result["late_pos"]
negatives = result["neg"]
rng = numpy.random.RandomState(seed)
rng.shuffle(early_positives)
rng.shuffle(late_positives)
rng.shuffle(negatives)
infos = InfoGroup('Lupus Dataset', InfoList(PrintableInfoElement('features', '', features_names),
PrintableInfoElement('normalizations', '', features_normalizations),
PrintableInfoElement('early positives', ':d',
len(early_positives)),
PrintableInfoElement('late positives', ':d', len(late_positives)),
PrintableInfoElement('negatives', ':d', len(negatives)),
visit_selector.infos))
return early_positives, late_positives, negatives, result["max_visits_late_pos"], result[
"max_visits_neg"], features_names, infos
@staticmethod
def __split_set(set, i, k):
n = len(set)
m = int(float(n) / k)
start = m * i
end = int(start + m if i < k - 1 else n)
return set[0:start] + set[end:], set[start:end]
@staticmethod
def no_test_dataset(mat_file: str, strategy: BuildBatchStrategy = PerVisitTargets, seed: int = Configs.seed,
visit_selector: VisitsFilter = NullFIlter(), feats: list = None):
early_positives, late_positives, negatives, max_visits_pos, max_visits_neg, features_names, infos = LupusDataset.load_mat(
mat_file, visit_selector=visit_selector, features_names=feats)
train_set = dict(early_pos=early_positives, late_pos=late_positives, neg=negatives, max_pos=max_visits_pos,
max_neg=max_visits_neg)
data_dict = dict(train=train_set, test=train_set, features=features_names)
return LupusDataset(data=data_dict, infos=infos, seed=seed, strategy=strategy, feats=feats)
@staticmethod
def k_fold_test_datasets(mat_file: str, k: int = 1, strategy: BuildBatchStrategy = PerVisitTargets(),
seed: int = Configs.seed, visit_selector: VisitsFilter = NullFIlter(), feats: list = None):
early_positives, late_positives, negatives, max_visits_pos, max_visits_neg, features_names, infos = LupusDataset.load_mat(
mat_file, visit_selector=visit_selector, features_names=feats)
for i in range(k):
eptr, epts = LupusDataset.__split_set(early_positives, i=i, k=k)
lptr, lpts = LupusDataset.__split_set(late_positives, i=i, k=k)
ntr, nts = LupusDataset.__split_set(negatives, i=i, k=k)
train_set = dict(early_pos=eptr, late_pos=lptr, neg=ntr, max_pos=max_visits_pos,
max_neg=max_visits_neg)
test_set = dict(early_pos=epts, late_pos=lpts, neg=nts, max_pos=max_visits_pos,
max_neg=max_visits_neg)
data_dict = dict(train=train_set, test=test_set, features=features_names)
yield LupusDataset(data=data_dict, infos=infos, seed=seed, strategy=strategy, feats=feats)
@staticmethod
def get_set_info(set):
return InfoList(PrintableInfoElement('early_pos', ':d', len(set['early_pos'])),
PrintableInfoElement('late_pos', ':d', len(set['late_pos'])),
PrintableInfoElement('neg', ':d', len(set['neg'])))
def __init__(self, data: dict, infos: Info = NullInfo(), strategy: BuildBatchStrategy = PerVisitTargets(),
seed: int = Configs.seed, feats: list = None):
self.__train = data['train']
self.__test = data['test']
self.__features = data['features'] if feats is None else feats
self.__rng = numpy.random.RandomState(seed)
self.__n_in = strategy.n_in(len(self.__features))
self.__n_out = 1
self.__build_batch_strategy = strategy # TODO add infos
split_info = InfoGroup('split', InfoList(InfoGroup('train', InfoList(LupusDataset.get_set_info(self.__train))),
InfoGroup('test', InfoList(LupusDataset.get_set_info(self.__test)))))
self.__infos = InfoList(infos, split_info)
def format_row(self, row):
s = str(row).replace("[", '').replace("]", "")
return ' '.join(s.split()) + "\n"
def write_to_file(self, batch: Batch, folder: str, name: str):
os.makedirs(folder, exist_ok=True)
prefix = folder + '/' + name
feats_file = open(prefix + "_features.txt", "w")
labels_file = open(prefix + "_labels_txt", "w")
for i in range(batch.inputs.shape[2]):
example = batch.inputs[0, :, i]
feats_file.write(self.format_row(example))
labels_file.write(self.format_row(batch.outputs[0, :, i]))
feats_file.close()
labels_file.close()
@staticmethod
def __find_features_names(features):
names = []
if isinstance(features, numpy.ndarray) or isinstance(features, numpy.void):
for obj in features:
names.extend(LupusDataset.__find_features_names(obj))
return numpy.unique(names)
elif isinstance(features, numpy.str_):
return [str(features)]
else:
raise TypeError('got type: {}, expected type is "numpy.str_"', type(features))
@staticmethod
def __find_normalization_factors(fetures_names, data):
vals = dict()
for f in fetures_names:
data_f = data[f]
vals[f] = (dict(min=min(data_f).item().item(), max=max(data_f).item().item()))
return vals
#
# @staticmethod
# def __split_positive(positives):
#
# early_positives = []
# late_positives = []
#
# for p in positives:
# targets = p['targets']
# assert (sum(targets) > 0)
# if targets[0] > 0:
# early_positives.append(p)
# else:
# late_positives.append(p)
#
# return early_positives, late_positives
@staticmethod
def __process_patients(mat_data, features_names, features_normalizations, visit_selector) -> dict:
result = dict(early_pos=[], late_pos=[], neg=[], max_visits_late_pos=0, max_visits_neg=0)
n_features = len(features_names)
patients_ids = numpy.unique(mat_data['PazienteId'])
for id in patients_ids:
visits_indexes = mat_data['PazienteId'] == id.item()
visits = mat_data[visits_indexes]
visits = sorted(visits, key=lambda visit: visit['numberVisit'].item())
visits = visit_selector.select_visits(visits)
n_visits = len(visits)
if n_visits > 0:
pat_matrix = numpy.zeros(shape=(n_visits, n_features))
target_vec = numpy.zeros(shape=(n_visits, 1))
for j in range(n_visits):
target_vec[j] = 1 if visits[j]['sdi'] > 0 else 0 # sdi is greater than one for positive patients
for k in range(n_features):
f_name = features_names[k]
a = features_normalizations[f_name]['min']
b = features_normalizations[f_name]['max']
pat_matrix[j, k] = (visits[j][f_name].item() - a) / (b - a) # nomalization
# pat_matrix[j, k] = visits[j][f_name].item() # nomalization
d = dict(features=pat_matrix, targets=target_vec)
if visits[0]['sdi'] > 0:
result["early_pos"].append(d)
elif visits[-1]['sdi'] > 0:
result["late_pos"].append(d)
result["max_visits_late_pos"] = max(result["max_visits_late_pos"], n_visits)
else:
result["neg"].append(d)
result["max_visits_neg"] = max(result["max_visits_neg"], n_visits)
return result
@staticmethod
def __get_patience_descr(pat_dict):
features = pat_dict['features']
targets = pat_dict['targets']
n_visits = len(targets)
assert (n_visits == features.shape[0])
visits_descr = []
for i in range(n_visits):
visits_descr.append('Visit {}:\n features: {}\t targets(sdi): {}\n'.format(i, features[i, :], targets[i]))
return ''.join(visits_descr)
@staticmethod
def print_results(patient_number, batch, y):
n_visits = int(sum(sum(batch.mask[:, :, patient_number])).item())
print('Patient {}: number of visits: {}'.format(patient_number, n_visits))
print('Net output,\tLabel')
for i in range(n_visits):
print('\t{:01.2f},\t {:01.0f}'.format(y[i, :, patient_number].item(),
batch.outputs[i, :, patient_number].item()))
def __build_batch(self, indexes, sets, max_length) -> Batch:
max_length -= 1
n_sets = len(sets)
n_batch_examples = 0
for i in indexes:
n_batch_examples += len(i)
inputs = numpy.zeros(shape=(max_length, self.__n_in, n_batch_examples))
outputs = numpy.zeros(shape=(max_length, self.__n_out, n_batch_examples))
mask = numpy.zeros_like(outputs)
partial_idx = 0
for i in range(n_sets):
bs = len(indexes[i])
for j in range(bs):
idx = indexes[i][j]
pat = sets[i][idx]
feats, targets, pat_mask = self.__build_batch_strategy.build_batch(pat)
n = feats.shape[0]
assert (n == targets.shape[0])
index = partial_idx + j
inputs[0:n, :, index] = feats
outputs[0:n, :, index] = targets
mask[0:n, :, index] = pat_mask
partial_idx += bs
return Batch(inputs, outputs, mask)
def __sets_from_keys(self, data):
sets = []
for key in self.__build_batch_strategy.keys():
sets.append(data[key])
return sets
# def get_train_batch(self, batch_size:int):
# exs = self.__sets_from_keys(self.__train)
# pool = []
# for e in exs:
# pool += e
#
# indexes = self.__rng.randint(size=(batch_size, 1), low=0, high=len(pool))
# max_length = len(pool[max(indexes, key=lambda i: len(pool[i]['targets']))]['targets'])
# return self.__build_batch([indexes], [pool], max_length)
def get_train_batch(self, batch_size: int) -> Batch:
"""return a 'Batch' of size 'batch_size'"""
exs = self.__sets_from_keys(self.__train)
bs = int(math.ceil(float(batch_size) / len(exs)))
indexes = []
max_length = 0
for e in exs:
e_indexes = self.__rng.randint(size=(bs, 1), low=0, high=len(e))
indexes.append(e_indexes)
max_length = max(len(e[max(e_indexes, key=lambda i: len(e[i]['targets']))]['targets']), max_length)
return self.__build_batch(indexes, exs, max_length)
@property
def n_in(self):
return self.__n_in
@property
def n_out(self):
return self.__n_out
def __get_batch_from_whole_sets(self, sets: list, max_length: int) -> Batch:
indexes = []
for e in sets:
indexes.append(range(len(e)))
return self.__build_batch(indexes, sets, max_length)
def __get_set(self, set, mode):
max_length = max(set['max_pos'], set['max_neg'])
if mode == 'whole':
sets = self.__sets_from_keys(set)
return [self.__get_batch_from_whole_sets(sets, max_length)]
elif mode == 'split':
keys = self.__build_batch_strategy.keys()
splits = self.__sets_from_keys(set)
assert (len(keys) == len(splits))
d = dict()
for i in range(len(keys)):
d[keys[i]] = self.__get_batch_from_whole_sets([splits[i]], max_length=max_length)
return d
else:
raise ValueError('unsupported value') # TODO
@property
def test_set(self):
return self.__get_set(self.__test, mode='whole')
@property
def train_set(self):
return self.__get_set(self.__train, mode='whole')
@property
def split_train(self):
return self.__get_set(self.__train, mode='split')
@property
def split_test(self):
return self.__get_set(self.__test, mode='split')
@staticmethod
def correct_prediction(y):
"""correct the prediction in such a way that the probabilities are monotonic non decreasing"""
max_val = 0.
result_y = y
for i in range(y.shape[0]):
i_val = result_y[i]
result_y[i] = max(i_val, max_val)
max_val = max(max_val, i_val)
return result_y
@staticmethod
def get_scores_visits(y, t, mask):
n_examples = y.shape[2]
n_visits_max = n_examples * y.shape[0]
reduced_mask = numpy.sum(mask, axis=1)
scores = numpy.zeros(shape=(n_visits_max, 1), dtype=Configs.floatType)
labels = numpy.zeros_like(scores)
visit_count = 0
for i in range(n_examples):
n_visits = sum(reduced_mask[:, i])
y_filtered = y[0:n_visits, :, i]
t_filtered = t[0:n_visits, :, i]
scores[visit_count:visit_count + n_visits] = y_filtered
labels[visit_count:visit_count + n_visits] = t_filtered
visit_count += n_visits
return scores[0:visit_count], labels[0:visit_count]
@staticmethod
def get_scores_patients(y, t, mask):
if numpy.sum(numpy.sum(numpy.sum(t))) <= 0:
print('t', t)
print('t_size ', t.shape)
assert (numpy.sum(numpy.sum(numpy.sum(t))) > 0)
n_examples = y.shape[2]
reduced_mask = numpy.sum(mask, axis=1)
scores = numpy.zeros(shape=(n_examples, 1), dtype=Configs.floatType)
labels = numpy.zeros_like(scores)
for i in range(n_examples):
non_zero_indexes = numpy.where(reduced_mask[:, i] > 0)[0]
idx = numpy.min(non_zero_indexes)
scores[i] = y[idx, :, i]
labels[i] = t[idx, :, i]
assert (numpy.sum(scores) > 0 and numpy.sum(scores) != len(scores))
return scores, labels
@staticmethod
def get_scores(y, t, mask):
n_examples = y.shape[2]
reduced_mask = numpy.sum(mask, axis=1)
scores = numpy.zeros(shape=(n_examples, 1), dtype=Configs.floatType)
labels = numpy.zeros_like(scores)
for i in range(n_examples):
n_visits = sum(reduced_mask[:, i])
y_filtered = y[0:n_visits, :, i]
t_filtered = t[0:n_visits, :, i]
non_zero_indexes = numpy.nonzero(t_filtered)[0]
zero_indexes = numpy.nonzero(t_filtered < 1)[0]
n_non_zero = non_zero_indexes.shape[0]
n_zero = zero_indexes.shape[0]
assert (n_zero + n_non_zero == n_visits)
if n_non_zero > 0 and n_zero > 0 and numpy.min(y_filtered[non_zero_indexes]) < numpy.max(
y_filtered[zero_indexes]):
# in this case the prediction is non consistent whatever the threshold is
scores[i] = -1.
labels[i] = 1
else:
# in this case the probability are consistent, hence we choose as score (and label)
# for the patience that of the visit which has the farthest score from the label
to_compare_index = []
to_compare_values = []
if n_non_zero > 0:
p1_index = numpy.argmin(y_filtered[non_zero_indexes])
p1 = 1. - y_filtered[p1_index]
to_compare_index.append(p1_index)
to_compare_values.append(p1)
if n_zero > 0:
p2_index = numpy.argmax(y_filtered[zero_indexes])
p2 = y_filtered[p2_index]
to_compare_index.append(p2_index)
to_compare_values.append(p2)
j = to_compare_index[numpy.argmin(to_compare_values).item()]
scores[i] = y_filtered[j].item()
labels[i] = t_filtered[j].item()
return scores, labels
@property
def infos(self):
return self.__infos
if __name__ == '__main__':
filter = TemporalSpanFilter(min_age_span_upper=2, min_age_span_lower=2, min_visits_neg=4)
dataset = LupusDataset.no_test_dataset(Paths.lupus_path, seed=13, strategy=PerPatienceTargets(),
visit_selector=filter)
print(dataset.infos)
batch = dataset.get_train_batch(batch_size=3)
print(str(batch))
# XXX REMOVEME
strategy = TemporalDifferenceTargets()
id = 0
for dataset in LupusDataset.k_fold_test_datasets(Paths.lupus_path, k=8, strategy=strategy,
visit_selector=TemporalSpanFilter(
min_age_span_upper=2,
min_age_span_lower=2, min_visits_neg=5,
min_visits_pos=1)):
dataset.write_to_file(dataset.train_set[0], '/home/giulio', 'train_' + str(id))
dataset.write_to_file(dataset.test_set[0], '/home/giulio', 'test_' + str(id))
id += 1
a = dataset.train_set[0].inputs
b = dataset.test_set[0].inputs
c = numpy.concatenate((a, b), axis=2)
stats = numpy.zeros(shape=(c.shape[1],))
for i in range(c.shape[2]):
for j in range(c.shape[1]):
a = c[:, j, i]
if len(set(a)) > 1:
stats[j] += 1
print('stats', stats)
|
GiulioGx/RNNs
|
sources/datasets/LupusDataset.py
|
Python
|
lgpl-3.0
| 25,918
|
import codecs
import os
import re
import json
from . import WIKI_DIR
from collections import defaultdict
def _get_filename(slug):
return os.path.join(WIKI_DIR, '%s.md' % (slug,))
class Index(object):
def __init__(self):
self.texts, self.words = {}, set()
self.finvindex = defaultdict(set)
def update_index(self, doc_id, words):
for w in words:
self.finvindex[w].add((doc_id, self.texts[doc_id].index(w)))
def put(self, doc_id, content):
self.remove(doc_id)
txt = filter(None, map(lambda x: re.sub('[^a-z0-9]', '', x.lower()), filter(lambda w: len(w) > 3, content.split())))
self.texts[doc_id] = txt
self.update_index(doc_id, set(txt))
def remove(self, doc_id):
for k, v in self.finvindex.items():
to_delete = []
for w in v:
if w[0] == doc_id:
to_delete.append(w)
for t in to_delete:
v.remove(t)
def term_search(self, terms):
if not set(terms).issubset(set(self.finvindex.keys())):
return set()
return reduce(set.intersection,
(set(x[0] for x in txtindx)
for term, txtindx in self.finvindex.items()
if term in terms),
set(self.texts.keys()))
def search(self, phrase):
import difflib
wordsinphrase = phrase.strip().split()
tmp = []
for w in wordsinphrase:
r = difflib.get_close_matches(w, self.finvindex.keys(), cutoff=0.8)
if r:
tmp.append(r[0])
else:
tmp.append(w)
wordsinphrase = tmp
if not set(wordsinphrase).issubset(set(self.finvindex.keys())):
return set()
if len(wordsinphrase) < 2:
firstword, otherwords = wordsinphrase[0], wordsinphrase[1:]
else:
firstword, otherwords = wordsinphrase[0], []
found = []
for txt in self.term_search(wordsinphrase):
for firstindx in (indx for t,indx in self.finvindex[firstword] if t == txt):
if all((txt, firstindx+1 + otherindx) in self.finvindex[otherword]
for otherindx, otherword in enumerate(otherwords)):
found.append(txt)
return found
class Post(object):
def __init__(self, title, body, created=None, modified=None, tags=None, **kwargs):
self.title = str(title).strip()
self.body = str(body.strip()) if body else None
self.slug = str(Post.build_slug(self.title))
self.tags = filter(None, tags.split(',') if isinstance(tags, basestring) else tags if tags else [])
self.created = str(created) if created else None
self.modified = str(modified) if modified else None
def __cmp__(self, other):
if not other:
return -1
return (int(self.created > other.created) or -1) if self.created != other.created else 0
def serialize(self):
buf = ['<!---']
for k, v in self.__dict__.items():
if k not in ['body', 'slug', 'tags']:
buf.append('='.join((str(k), str(v))))
elif k == 'tags':
buf.append('%s=%s' % (k, ','.join(self.tags)))
buf.append('--->')
buf.append(self.body)
return '\n'.join(buf)
@staticmethod
def build_slug(title):
return re.sub(r'[\.!,;/\?#\ ]+', '-', title).strip().lower()
@staticmethod
def build(data, title=None):
tmp = {}
body = []
header = False
for line in data.split('\n'):
if line == '<!---':
header = True
elif line == '--->':
header = False
elif header:
(k, v) = [v.strip() for v in line.split('=')]
tmp[k] = v
body.append(line)
tmp['body'] = '\n'.join(body)
if not tmp.get('title'):
tmp['title'] = ' '.join(title.replace('.md', '').split('-'))
return Post(**tmp)
class PostProxy(object):
def __init__(self, slug):
self.slug = slug
self.post = None
def __getattr__(self, name):
if not self.post:
with codecs.open(_get_filename(self.slug), 'r', 'utf8') as f:
self.post = Post.build(f.read())
if name == 'body' and not getattr(self.post, 'body', None):
with codecs.open(os.path.join(WIKI_DIR, '%s.md' % (self.slug,)), 'r', 'utf8') as f:
self.post.body = f.read()
return getattr(self.post, name)
class Wiki(object):
def add_post(self, post):
self._save_post(post)
def del_post(self, post):
os.remove(_get_filename(post.slug))
def get_post(self, slug):
if os.path.exists(_get_filename(slug)):
with codecs.open(_get_filename(slug), 'r', 'utf8') as f:
return Post.build(f.read())
def find_all(self):
return [PostProxy(f.replace('.md', '')) for f in os.listdir(WIKI_DIR)]
def _save_post(self, post):
with codecs.open(_get_filename(post.slug), 'w', 'utf8') as f:
tmp = post.__dict__.items()
body = tmp.pop('body', '')
f.write('<!---\n%s\n--->\n' % '\n'.join(['%s = %s' % (k, v) for k,v in tmp.items()]))
f.write(post.body)
|
apruden/genwiki
|
genwiki/model.py
|
Python
|
lgpl-3.0
| 5,404
|
# -*- coding: utf-8 -*-
"""This module defines some special functions
(originally defined in SpecialFunctions.h)."""
# Copyright (C) 2008-2014 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Johan Hake 2008-2009
# Modified by Garth N. Wells 2010
# Modified by Martin S. Alnaes 2013-2014
__all__ = ["MeshCoordinates", "FacetArea", "FacetNormal", "CellSize", "CellVolume",
'SpatialCoordinate', 'CellNormal', 'Circumradius', 'MinFacetEdgeLength', 'MaxFacetEdgeLength']
# Import UFL and SWIG-generated extension module (DOLFIN C++)
import ufl
import dolfin.cpp as cpp
# Local imports
from dolfin.functions.expression import Expression
def _mesh2domain(mesh):
"Deprecation mechanism for symbolic geometry."
if isinstance(mesh, ufl.Cell):
cpp.deprecation("Constructing geometry from a Cell", "1.4", "1.5",
"Pass mesh instead, for example use FacetNormal(mesh) instead of FacetNormal(triangle) or triangle.n")
return ufl.as_domain(mesh)
class MeshCoordinates(Expression, ufl.Coefficient, cpp.MeshCoordinates):
def __init__(self, mesh):
"Create function that evaluates to the mesh coordinates at each vertex."
cpp.MeshCoordinates.__init__(self, mesh)
self._ufl_element = ufl.VectorElement("Lagrange", mesh.ufl_domain(), 1)
ufl.Coefficient.__init__(self, self._ufl_element, count=self.id())
MeshCoordinates.__doc__ = cpp.MeshCoordinates.__doc__
class FacetArea(Expression, ufl.Coefficient, cpp.FacetArea):
def __init__(self, mesh):
"""
Create function that evaluates to the facet area/length on each facet.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
fa = FacetArea(mesh)
"""
cpp.FacetArea.__init__(self, mesh)
self._ufl_element = ufl.FiniteElement("Discontinuous Lagrange", mesh.ufl_domain(), 0)
ufl.Coefficient.__init__(self, self._ufl_element, count=self.id())
FacetArea.__doc__ = cpp.FacetArea.__doc__
# Simple definition of FacetNormal via UFL
def FacetNormal(mesh):
"""
Return symbolic facet normal for given mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
n = FacetNormal(mesh)
"""
return ufl.FacetNormal(_mesh2domain(mesh))
# Simple definition of CellSize via UFL
def CellSize(mesh):
"""
Return function cell size for given mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
h = CellSize(mesh)
"""
return 2.0*ufl.Circumradius(_mesh2domain(mesh))
# Simple definition of CellVolume via UFL
def CellVolume(mesh):
"""
Return symbolic cell volume for given mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
vol = CellVolume(mesh)
"""
return ufl.CellVolume(_mesh2domain(mesh))
# Simple definition of SpatialCoordinate via UFL
def SpatialCoordinate(mesh):
"""
Return symbolic physical coordinates for given mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
vol = SpatialCoordinate(mesh)
"""
return ufl.SpatialCoordinate(_mesh2domain(mesh))
# Simple definition of CellNormal via UFL
def CellNormal(mesh):
"""
Return symbolic cell normal for given manifold mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
vol = CellNormal(mesh)
"""
return ufl.CellNormal(_mesh2domain(mesh))
# Simple definition of Circumradius via UFL
def Circumradius(mesh):
"""
Return symbolic cell circumradius for given mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
vol = Circumradius(mesh)
"""
return ufl.Circumradius(_mesh2domain(mesh))
# Simple definition of MinFacetEdgeLength via UFL
def MinFacetEdgeLength(mesh):
"""
Return symbolic minimum facet edge length of a cell for given mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
vol = MinFacetEdgeLength(mesh)
"""
return ufl.MinFacetEdgeLength(_mesh2domain(mesh))
# Simple definition of MaxFacetEdgeLength via UFL
def MaxFacetEdgeLength(mesh):
"""
Return symbolic maximum facet edge length of a cell for given mesh.
*Arguments*
mesh
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
*Example of usage*
.. code-block:: python
mesh = UnitSquare(4,4)
vol = MaxFacetEdgeLength(mesh)
"""
return ufl.MaxFacetEdgeLength(_mesh2domain(mesh))
|
akshmakov/Dolfin-Fijee-Fork
|
site-packages/dolfin/functions/specialfunctions.py
|
Python
|
lgpl-3.0
| 6,001
|
{
'name': "Reminders and Agenda for Phonecalls",
'version': '1.0.0',
'author': 'IT-Projects LLC, Ivan Yelizariev',
'category': 'Reminders and Agenda',
'website': 'https://yelizariev.github.io',
'price': 6.00,
'currency': 'EUR',
'depends': ['reminder_base', 'crm'],
'data': [
'views.xml',
],
'installable': True,
}
|
raycarnes/addons-yelizariev
|
reminder_phonecall/__openerp__.py
|
Python
|
lgpl-3.0
| 370
|
from django.core.management.base import NoArgsCommand, CommandError
import sys
from xadrpy.core.workers.daemon import DaemonHandler
class Command(NoArgsCommand):
def handle(self, **options):
daemon_handler = DaemonHandler("daemon.pid", "daemon.sock")
try:
daemon_handler.stop()
sys.stdout.write("Worker is stopped.\n")
except Exception, e:
raise CommandError(e)
|
palankai/xadrpy
|
src/xadrpy/core/workers/management/commands/worker_stop.py
|
Python
|
lgpl-3.0
| 441
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Oct 16, 2015
# @author: Bo Zhao
# @email: bo_zhao@hks.harvard.edu
# @website: http://yenching.org
# @organization: Harvard Kennedy School
import urllib2
import json
import sys
from settings import BAIDU_AK
from log import *
reload(sys)
sys.setdefaultencoding('utf-8')
def geocode(loc):
lat, lng = -1, -1
url = 'http://api.map.baidu.com/geocoder/v2/?address=%s&output=json&ak=%s' % (loc, BAIDU_AK)
others = [u'其他', u'美国', u'英国', u'澳大利亚', u'伊朗', u'台湾', u'沙特阿拉伯',
u'爱尔兰', u'印度', u'印尼', u'奥地利', u'挪威', u'乌克兰', u'瑞士',
u'西班牙', u'古巴', u'挪威', u'德国', u'埃及', u'巴西', u'比利时']
if loc in others:
pass
else:
try:
response = urllib2.urlopen(url.replace(' ', '%20'))
except urllib2.HTTPError, e:
log(WARNING, e, 'geocode')
try:
loc_json = json.loads(response.read())
lat = loc_json[u'result'][u'location'][u'lat']
lng = loc_json[u'result'][u'location'][u'lng']
except ValueError:
log(ERROR, "No JSON object was decoded", 'geocode')
except KeyError, e:
log(ERROR, e.message, 'geocode')
return [lat, lng]
# Estimate where a post was sent out based on the semantics of the user's name,
# verified inforamtion, and/or other contextual information.
def geocode_by_semantics(project, address, port):
from pymongo import MongoClient
client = MongoClient(address, port)
db = client[project]
search_json = {'$or': [{'latlng': [0, 0]}, {'latlng': [-1, -1]}], 'verified': True}
users = db.users.find(search_json)
count = db.users.find(search_json).count()
print count
i = 0
for user in users:
i += 1
verified_info = user['verified_info']
username = user['username']
verified_info = verified_info.replace(u'主持人', '').replace(u'职员', '').replace(u'院长', '').replace(u'经理', '')
verified_info = verified_info.split(u' ')[0]
if verified_info == u'前' or u'www' in verified_info or u'律师' in verified_info or u'学者' in verified_info or u'作家' in verified_info or u'媒体人' in verified_info or u'诗人' in verified_info:
verified_info = ''
locational_info = verified_info
if locational_info == '':
locational_info = username
if verified_info != '':
latlng = geocode(verified_info)
else:
continue
log(NOTICE, '#%d geocode the user by its semantic info %s. %d posts remain. latlng: %s ' % (i, verified_info.encode('gbk', 'ignore'), count - i, str(latlng)))
if latlng[0] != -1 and latlng[0] != 0:
db.users.update({'userid': user['userid']}, {'$set': {'latlng': latlng}})
log(NOTICE, "mission compeletes.")
def geocode_locational_info(project, address, port):
from pymongo import MongoClient
client = MongoClient(address, port)
db = client[project]
search_json = {'$or': [{'latlng': [0, 0]}, {'latlng': [-1, -1]}], 'location': {'$ne': ''}}
users = db.users.find(search_json)
count = users.count()
print count
i = 0
for user in users:
i += 1
if 'location' in user.keys():
latlng = geocode(user['location'])
log(NOTICE, '#%d geocode the user by its locational info %s. %d posts remain. latlng: %s ' % (i, user['location'].encode('gbk', 'ignore'), count - i, str(latlng)))
if latlng[0] != -1 and latlng[0] != 0:
db.users.update({'userid': user['userid']}, {'$set': {'latlng': latlng}})
else:
continue
log(NOTICE, "mission compeletes.")
# Estimate where a post was sent out based the path of its author.
def estimate_location_by_path(user):
est_latlng = [-1, -1]
path = user['path']
latlng = user['latlng']
if user['path'] != [] and user['path'][0][0] != 0:
if latlng != [0, 0] and latlng != [-1, -1]:
path.append(latlng)
avg_lat = 0
avg_lng = 0
for latlng in path:
avg_lat += latlng[0]
avg_lng += latlng[1]
avg_lat /= float(len(path))
avg_lng /= float(len(path))
distances = []
for latlng in path:
distances.append(abs(latlng[0] - avg_lat) + abs(latlng[1] - avg_lng))
est_latlng = path[distances.index(min(distances))][0:2]
elif user['path'] == [] and latlng != [0, 0]:
est_latlng = latlng
else:
pass
return est_latlng
# Estimate where a post was sent out by the locational information of its author.
def georeference(project, address, port):
from pymongo import MongoClient
client = MongoClient(address, port)
db = client[project]
search_json = {'$or': [{'latlng': [0, 0]}, {'latlng': [-1, -1]}]}
posts = db.posts.find(search_json)
count = db.posts.find(search_json).count()
i = 0
for post in posts:
# userid = post['user']['userid']
username = post['user']['username']
user = db.users.find_one({'username': username})
i += 1
try:
if abs(user['latlng'][0] - 0) < 0.001:
pass
elif abs(user['latlng'][0] + 1) < 0.001:
pass
else:
try:
db.posts.update_many({'mid': post['mid']}, {'$set': {
'latlng': user['latlng']
}
})
log(NOTICE, 'georeferencing #%d, %d posts remain. latlng: %s ' % (i, count - i, str(user['latlng'])))
except:
log(NOTICE, 'the user latlng does not exit')
except:
print "user has been mistakenly deleted"
log(NOTICE, "mission compeletes.")
|
jakobzhao/ashcrawler
|
core/geo.py
|
Python
|
lgpl-3.0
| 5,940
|
# -*- coding: utf-8 -*-
import os
import KBEngine
from KBEDebug import *
def onBaseAppReady(isBootstrap):
"""
KBEngine method.
baseapp已经准备好了
@param isBootstrap: 是否为第一个启动的baseapp
@type isBootstrap: BOOL
"""
INFO_MSG('onBaseAppReady: isBootstrap=%s, appID=%s, bootstrapGroupIndex=%s, bootstrapGlobalIndex=%s' % \
(isBootstrap, os.getenv("KBE_COMPONENTID"), os.getenv("KBE_BOOTIDX_GROUP"), os.getenv("KBE_BOOTIDX_GLOBAL")))
def onReadyForLogin(isBootstrap):
"""
KBEngine method.
如果返回值大于等于1.0则初始化全部完成, 否则返回准备的进度值0.0~1.0。
在此可以确保脚本层全部初始化完成之后才开放登录。
@param isBootstrap: 是否为第一个启动的baseapp
@type isBootstrap: BOOL
"""
return 1.0
def onReadyForShutDown():
"""
KBEngine method.
进程询问脚本层:我要shutdown了,脚本是否准备好了?
如果返回True,则进程会进入shutdown的流程,其它值会使得进程在过一段时间后再次询问。
用户可以在收到消息时进行脚本层的数据清理工作,以让脚本层的工作成果不会因为shutdown而丢失。
"""
INFO_MSG('onReadyForShutDown()')
return True
def onBaseAppShutDown(state):
"""
KBEngine method.
这个baseapp被关闭前的回调函数
@param state: 0 : 在断开所有客户端之前
1 : 在将所有entity写入数据库之前
2 : 所有entity被写入数据库之后
@type state: int
"""
INFO_MSG('onBaseAppShutDown: state=%i' % state)
def onInit(isReload):
"""
KBEngine method.
当引擎启动后初始化完所有的脚本后这个接口被调用
@param isReload: 是否是被重写加载脚本后触发的
@type isReload: bool
"""
INFO_MSG('onInit::isReload:%s' % isReload)
def onFini():
"""
KBEngine method.
引擎正式关闭
"""
INFO_MSG('onFini()')
def onCellAppDeath(addr):
"""
KBEngine method.
某个cellapp死亡
"""
WARNING_MSG('onCellAppDeath: %s' % (str(addr)))
def onGlobalData(key, value):
"""
KBEngine method.
globalData有改变
"""
DEBUG_MSG('onGlobalData: %s' % key)
def onGlobalDataDel(key):
"""
KBEngine method.
globalData有删除
"""
DEBUG_MSG('onDelGlobalData: %s' % key)
def onGlobalBases(key, value):
"""
KBEngine method.
globalBases有改变
"""
DEBUG_MSG('onGlobalBases: %s' % key)
def onGlobalBasesDel(key):
"""
KBEngine method.
globalBases有删除
"""
DEBUG_MSG('onGlobalBasesDel: %s' % key)
def onLoseChargeCB(ordersID, dbid, success, datas):
"""
KBEngine method.
有一个不明订单被处理, 可能是超时导致记录被billing
清除, 而又收到第三方充值的处理回调
"""
DEBUG_MSG('onLoseChargeCB: ordersID=%s, dbid=%i, success=%i, datas=%s' % \
(ordersID, dbid, success, datas))
|
dreamsxin/kbengine
|
assets/scripts/base/kbemain.py
|
Python
|
lgpl-3.0
| 2,796
|
#!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
settingsData = {
'sound': {
'enabled': True,
'driver': 'genericDriver',
'theme': 'default',
'volume': 1.0,
'genericPlayFileCommand': 'play -q -v fenrirVolume fenrirSoundFile',
'genericFrequencyCommand': 'play -q -v fenrirVolume -n -c1 synth fenrirDuration sine fenrirFrequence'
},
'speech':{
'enabled': True,
'driver': 'genericDriver',
'serverPath': '',
'rate': 0.75,
'pitch': 0.5,
'capitalPitch':0.8,
'volume': 1.0,
'module': '',
'voice': 'en-us',
'language': '',
'autoReadIncoming': True,
'genericSpeechCommand':'espeak -a fenrirVolume -s fenrirRate -p fenrirPitch -v fenrirVoice "fenrirText"',
'fenrirMinVolume':0,
'fenrirMaxVolume':200,
'fenrirMinPitch':0,
'fenrirMaxPitch':99,
'fenrirMinRate':80,
'fenrirMaxRate':450,
},
'braille':{
'enabled': False,
'driver':'brlapiDriver',
'layout': 'en',
'flushMode': 'word', #NONE,FIX,CHAR,WORD
'flushTimeout': 3,
'cursorFocusMode':'page', # page,fixCell
'fixCursorOnCell': -1,
'cursorFollowMode': 'review', # none, review, last, text
'panSizeHorizontal': 0 # 0 = display size
},
'screen':{
'driver': 'vcsaDriver',
'encoding': 'auto',
'screenUpdateDelay': 0.1,
'suspendingScreen': '',
'autodetectSuspendingScreen': False,
},
'general':{
'debugLevel': debug.debugLevel.DEACTIVE,
'debugMode': 'FILE',
'debugFile': '',
'punctuationProfile':'default',
'punctuationLevel': 'some',
'respectPunctuationPause':True,
'newLinePause':True,
'numberOfClipboards': 10,
'emoticons': True,
'fenrirKeys': 'KEY_KP0,KEY_META',
'scriptKeys': 'KEY_COMPOSE',
'timeFormat': '%I:%M%P',
'dateFormat': '%A, %B %d, %Y',
'autoSpellCheck': False,
'spellCheckLanguage': 'en_US',
'scriptPath': '/usr/share/fenrirscreenreader/scripts',
'commandPath': '/usr/share/fenrirscreenreader/commands',
'attributeFormatString': 'Background fenrirBGColor,Foreground fenrirFGColor,fenrirUnderline,fenrirBold,fenrirBlink, Font fenrirFont,Fontsize fenrirFontSize',
'autoPresentIndent': False,
'autoPresentIndentMode': 1,
'hasAttributes': True,
'shell': '',
},
'focus':{
'cursor': True,
'highlight': False,
},
'remote':{
'enabled': True,
'driver': 'unixDriver',
'port': 22447,
'socketFile':'',
'enableSettingsRemote': True,
'enableCommandRemote': True,
},
'barrier':{
'enabled': True,
'leftBarriers': '│└┌─',
'rightBarriers': '│┘┐─',
},
'review':{
'lineBreak': True,
'endOfScreen': True,
'leaveReviewOnCursorChange': True,
'leaveReviewOnScreenChange': True,
},
'menu':{
'vmenuPath': '',
'quickMenu': 'speech#rate;speech#pitch;speech#volume',
},
'promote':{
'enabled': True,
'inactiveTimeoutSec': 120,
'list': '',
},
'time':{
'enabled': False,
'presentTime': True,
'presentDate': True,
'delaySec': 0,
'onMinutes': '00,30',
'announce': True,
'interrupt': False,
},
'keyboard':{
'driver': 'evdev',
'device': 'all',
'grabDevices': True,
'ignoreShortcuts': False,
'keyboardLayout': "desktop",
'charEchoMode': 2, # while capslock
'charDeleteEcho': True,
'wordEcho': True,
'interruptOnKeyPress': True,
'interruptOnKeyPressFilter': '',
'doubleTapTimeout': 0.2,
}
}
|
chrys87/fenrir
|
src/fenrirscreenreader/core/settingsData.py
|
Python
|
lgpl-3.0
| 3,330
|
# -*- coding: utf-8 -*-
# Copyright(C) 2019 Budget Insight
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module, BackendConfig
from weboob.capabilities.base import find_object
from weboob.capabilities.bill import CapDocument, Document, SubscriptionNotFound, Subscription, DocumentNotFound
from weboob.tools.value import Value, ValueBackendPassword
from .browser import BouyguesBrowser
__all__ = ['BouyguesModule']
class BouyguesModule(Module, CapDocument):
NAME = 'bouygues'
DESCRIPTION = 'Bouygues Télécom'
MAINTAINER = 'Florian Duguet'
EMAIL = 'florian.duguet@budget-insight.com'
LICENSE = 'LGPLv3+'
VERSION = '2.1'
CONFIG = BackendConfig(Value('login', label='Numéro de mobile, de clé/tablette ou e-mail en @bbox.fr'),
ValueBackendPassword('password', label='Mot de passe'),
ValueBackendPassword('lastname', label='Nom de famille', default=''))
BROWSER = BouyguesBrowser
def create_default_browser(self):
return self.create_browser(self.config['login'].get(), self.config['password'].get(), self.config['lastname'].get())
def iter_subscription(self):
return self.browser.iter_subscriptions()
def get_subscription(self, _id):
return find_object(self.iter_subscription(), id=_id, error=SubscriptionNotFound)
def iter_documents(self, subscription):
if not isinstance(subscription, Subscription):
subscription = self.get_subscription(subscription)
return self.browser.iter_documents(subscription)
def get_document(self, _id):
subid = _id.rsplit('_', 1)[0]
subscription = self.get_subscription(subid)
return find_object(self.iter_documents(subscription), id=_id, error=DocumentNotFound)
def download_document(self, document):
if not isinstance(document, Document):
document = self.get_document(document)
return self.browser.download_document(document)
|
laurentb/weboob
|
modules/bouygues/module.py
|
Python
|
lgpl-3.0
| 2,730
|
import pytest
from eos_data_distribution import DirTools
from gi.repository import GLib
ITER_COUNT = 10
class TestClass:
@pytest.mark.timeout(timeout=3, method='thread')
def test_0(self, tmpdir):
loop = GLib.MainLoop()
self.__called = 0
def cb_changed(M, p, m, f, o, evt, d=None, e=None):
print('signal', e, p, f, o, evt, d)
assert e == 'created'
self.__called += 1
d = tmpdir.mkdir("ndn")
m = DirTools.Monitor(str(d))
[m.connect(s, cb_changed, s) for s in ['created']]
[d.mkdir(str(i)) for i in range(ITER_COUNT)]
GLib.timeout_add_seconds(2, lambda: loop.quit())
loop.run()
assert self.__called == ITER_COUNT
|
endlessm/endless-ndn
|
eos_data_distribution/DirTools/test_class.py
|
Python
|
lgpl-3.0
| 742
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import math
import os
from datetime import date
# yapf: disable
class InPsight:
# POV-Ray defines
defines = {}
defines['Shadows'] = 'false'
defines['Background_Color'] = '<0.6,0.6,0.6>'
defines['Output_File_Type'] = 'N'
defines['Output_Alpha'] = 'true'
defines['Light_Color'] = '<1,1,1>'
defines['Filename'] = 'inpsight'
defines['Filepath'] = os.getcwd()
defines['Antialias'] = 'true'
defines['Antialias_Threshold'] = '0.1'
# Molecule geometry
atoms = [] # (Z,x,y,z,R,r,g,b,t) in bohr
bonds = [] # (x1,y1,z1,R1,x2,y2,z2,R2,r,g,b,t)
# Molecular geometry defines
colors = []
radii = []
radial_scale = 0.25
bond_width = 0.2 # bohr
bohr_per_ang = 1.8897161646320724
bonding_alpha = 0.65 # Used to select/reject bonds via sum of vDW radii
# View defines (high-level)
azimuth = 0.0
elevation = 0.0
zoom = 0.5
height = 900
width = 1200
# Camera positions (low-level)
location = [1.0,0.0,0.0]
up = [0.0,0.75,0.0]
right = [1.0,0.0,0.0]
sky = [0.0,-1.0,0.0]
look_at = [0.0,0.0,0.0]
light = [1.0,0.0,0.0]
light_color = [0.6,0.6,0.6]
# Standard Jmol colors, 256-based
colors.append([0,0,0])
colors.append([255,255,255])
colors.append([217,255,255])
colors.append([204,128,255])
colors.append([194,255,0])
colors.append([255,181,181])
colors.append([144,144,144])
colors.append([48,80,248])
colors.append([255,13,13])
colors.append([144,224,80])
colors.append([179,227,245])
colors.append([171,92,242])
colors.append([138,255,0])
colors.append([191,166,166])
colors.append([240,200,160])
colors.append([255,128,0])
colors.append([255,255,48])
colors.append([31,240,31])
colors.append([128,209,227])
colors.append([143,64,212])
colors.append([61,255,0])
colors.append([230,230,230])
colors.append([191,194,199])
colors.append([166,166,171])
colors.append([138,153,199])
colors.append([156,122,199])
colors.append([224,102,51])
colors.append([240,144,160])
colors.append([80,208,80])
colors.append([200,128,51])
colors.append([125,128,176])
colors.append([194,143,143])
colors.append([102,143,143])
colors.append([189,128,227])
colors.append([255,161,0])
colors.append([166,41,41])
colors.append([92,184,209])
colors.append([112,46,176])
colors.append([0,255,0])
colors.append([148,255,255])
colors.append([148,224,224])
colors.append([115,194,201])
colors.append([84,181,181])
colors.append([59,158,158])
colors.append([36,143,143])
colors.append([10,125,140])
colors.append([0,105,133])
colors.append([192,192,192])
colors.append([255,217,143])
colors.append([166,117,115])
colors.append([102,128,128])
colors.append([158,99,181])
colors.append([212,122,0])
colors.append([148,0,148])
colors.append([66,158,176])
colors.append([87,23,143])
colors.append([0,201,0])
colors.append([112,212,255])
colors.append([255,255,199])
colors.append([217,255,199])
colors.append([199,255,199])
colors.append([163,255,199])
colors.append([143,255,199])
colors.append([97,255,199])
colors.append([69,255,199])
colors.append([48,255,199])
colors.append([31,255,199])
colors.append([0,255,156])
colors.append([0,230,117])
colors.append([0,212,82])
colors.append([0,191,56])
colors.append([0,171,36])
colors.append([77,194,255])
colors.append([77,166,255])
colors.append([33,148,214])
colors.append([38,125,171])
colors.append([38,102,150])
colors.append([23,84,135])
colors.append([208,208,224])
colors.append([255,209,35])
colors.append([184,184,208])
colors.append([166,84,77])
colors.append([87,89,97])
colors.append([158,79,181])
colors.append([171,92,0])
colors.append([117,79,69])
colors.append([66,130,150])
colors.append([66,0,102])
colors.append([0,125,0])
colors.append([112,171,250])
colors.append([0,186,255])
colors.append([0,161,255])
colors.append([0,143,255])
colors.append([0,128,255])
colors.append([0,107,255])
colors.append([84,92,242])
colors.append([120,92,227])
colors.append([138,79,227])
colors.append([161,54,212])
colors.append([179,31,212])
colors.append([179,31,186])
colors.append([179,13,166])
colors.append([189,13,135])
colors.append([199,0,102])
colors.append([204,0,89])
colors.append([209,0,79])
colors.append([217,0,69])
colors.append([224,0,56])
colors.append([230,0,46])
colors.append([235,0,38])
# Approximate vDW radii in angstrom
radii.append(2.0)
radii.append(1.001)
radii.append(1.012)
radii.append(0.825)
radii.append(1.408)
radii.append(1.485)
radii.append(1.452)
radii.append(1.397)
radii.append(1.342)
radii.append(1.287)
radii.append(1.243)
radii.append(1.144)
radii.append(1.364)
radii.append(1.639)
radii.append(1.716)
radii.append(1.705)
radii.append(1.683)
radii.append(1.639)
radii.append(1.595)
radii.append(1.485)
radii.append(1.474)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.650)
radii.append(1.727)
radii.append(1.760)
radii.append(1.771)
radii.append(1.749)
radii.append(1.727)
radii.append(1.628)
radii.append(1.606)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.672)
radii.append(1.804)
radii.append(1.881)
radii.append(1.892)
radii.append(1.892)
radii.append(1.881)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
def __init__(self,molecule):
self.molecule = molecule
self.molecule.update_geometry()
self.update_geometry()
def update_geometry(self):
# Atoms
natom = self.molecule.natom()
self.atoms = []
for k in range(0,natom):
x = self.molecule.x(k)
y = self.molecule.y(k)
z = self.molecule.z(k)
Z = self.molecule.Z(k)
atom = Z, x, y, z, self.radial_scale * self.bohr_per_ang * self.radii[Z], self.colors[Z][0] / 256.0, \
self.colors[Z][1] / 256.0, self.colors[Z][2] / 256.0, 0.0
self.atoms.append(atom)
# Bonds
self.bonds = []
for k in range(1,natom):
for l in range (0, k):
Z1 = self.atoms[k][0]
Z2 = self.atoms[l][0]
R1 = self.bohr_per_ang*self.radii[Z1]
R2 = self.bohr_per_ang*self.radii[Z2]
x1 = self.atoms[k][1]
y1 = self.atoms[k][2]
z1 = self.atoms[k][3]
x2 = self.atoms[l][1]
y2 = self.atoms[l][2]
z2 = self.atoms[l][3]
r1 = self.atoms[k][5]
g1 = self.atoms[k][6]
b1 = self.atoms[k][7]
t1 = self.atoms[k][8]
r2 = self.atoms[l][5]
g2 = self.atoms[l][6]
b2 = self.atoms[l][7]
t2 = self.atoms[l][8]
R = math.sqrt((x1-x2)*(x1-x2) +
(y1-y2)*(y1-y2) +
(z1-z2)*(z1-z2))
if (R < self.bonding_alpha*(R1 + R2)):
omega = R2 / (R1 + R2)
xc = omega * (x1 - x2) + x2
yc = omega * (y1 - y2) + y2
zc = omega * (z1 - z2) + z2
bond1 = x1,y1,z1,self.bond_width, xc,yc,zc,self.bond_width,r1,g1,b1,t1
bond2 = x2,y2,z2,self.bond_width, xc,yc,zc,self.bond_width,r2,g2,b2,t2
self.bonds.append(bond1)
self.bonds.append(bond2)
def set_define(self, key, value):
self.defines[key] = value
def set_color(self, Z, color):
self.colors[Z] = color
def set_radius(self, Z, radius):
self.radii[Z] = radius
def position_camera(self):
xc = self.molecule.center_of_mass()
self.look_at = [xc[0], xc[1], xc[2]]
Rmax = 0.0
natom = self.molecule.natom()
for k in range(0,natom):
x = [self.molecule.x(k), self.molecule.y(k), self.molecule.z(k)]
R = math.sqrt((x[0] - xc[0])*(x[0] - xc[0]) +
(x[1] - xc[1])*(x[1] - xc[1]) +
(x[2] - xc[2])*(x[2] - xc[2]))
if R > Rmax:
Rmax = R
Rmax = Rmax / self.zoom
if (self.width < self.height):
self.right = [Rmax, 0.0, 0.0]
self.up = [0.0, self.right[0]*self.height/self.width, 0.0]
else:
self.up = [0.0, Rmax, 0.0]
self.right = [self.up[1]*self.width/self.height, 0.0, 0.0]
phi = math.pi*(-self.azimuth)/180.0
theta = math.pi*(90.0 - self.elevation)/180.0
delta = [Rmax*math.cos(phi)*math.sin(theta), Rmax*math.sin(phi)*math.sin(theta), Rmax*math.cos(theta)]
self.location = [xc[0] + delta[0], xc[1] + delta[1], xc[2] + delta[2]]
phi = math.pi*(-(self.azimuth + 30.0))/180.0
theta = math.pi*(90.0 - (self.elevation + 30.0))/180.0
delta = [Rmax*math.cos(phi)*math.sin(theta), Rmax*math.sin(phi)*math.sin(theta), Rmax*math.cos(theta)]
self.light = [xc[0] + delta[0], xc[1] + delta[1], xc[2] + delta[2]]
def set_view(self,azimuth, elevation, zoom = 0.7):
self.azimuth = azimuth
self.elevation = elevation
self.zoom = zoom
self.position_camera()
def set_size(self, width,height):
self.width = width
self.height = height
def set_camera(self, location, sky, up, right, look_at, light, light_color):
self.location = location
self.sky = sky
self.up = up
self.right = right
self.look_at = look_at
self.light = light
self.light_color = light_color
def save_molecule(self, filename):
if (filename != ''):
self.defines['Filename'] = filename
ini_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov.ini'
pov_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov'
png_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.png'
pov_file = self.defines['Filename'] + '.pov'
png_file = self.defines['Filename'] + '.png'
# Write the pov.ini file
fh = open(ini_filename,'w')
fh.write('; InPsight: visualization in Psi4\n')
fh.write('; by Rob Parrish\n')
fh.write('; .pov.ini file\n')
fh.write('; Created %s\n' % str(date.today()))
fh.write('\n')
fh.write('Input_File_Name=%s\n' % pov_file)
fh.write('Output_to_File=true\n')
fh.write('Output_File_Type=%s\n' % self.defines['Output_File_Type'])
fh.write('Output_File_Name=%s\n' % png_file)
fh.write('Height=%s\n' % str(self.height))
fh.write('Width=%s\n' % str(self.width))
fh.write('Output_Alpha=%s\n' % self.defines['Output_Alpha'])
fh.write('Antialias=%s\n' % self.defines['Antialias'])
fh.write('Antialias_Threshold=%s\n' % self.defines['Antialias_Threshold'])
fh.write('Display=true\n')
fh.write('Warning_Level=5\n')
fh.write('Verbose=false\n')
fh.close()
# Write the pov file
fh = open(pov_filename, 'w')
fh.write('// InPsight: visualization in Psi4\n')
fh.write('// by Rob Parrish\n')
fh.write('// .pov file (adopted from Jmol)\n')
fh.write('// Created %s\n' % str(date.today()))
fh.write('#declare Width = %s;\n' % str(self.width))
fh.write('#declare Height = %s;\n' % str(self.height))
fh.write('#declare Shadows = %s; \n' % self.defines['Shadows'])
fh.write('\n')
fh.write('camera{\n')
fh.write(' orthographic\n')
fh.write(' location < %s, %s, %s>\n' % (str(self.location[0]),str(self.location[1]),str(self.location[2]) ))
fh.write(' sky < %s, %s, %s>\n' % (str(self.sky[0]), str(self.sky[1]), str(self.sky[2]) ))
fh.write(' up < %s, %s, %s>\n' % (str(self.up[0]), str(self.up[1]), str(self.up[2]) ))
fh.write(' right < %s, %s, %s>\n' % (str(self.right[0]), str(self.right[1]), str(self.right[2]) ))
fh.write(' look_at < %s, %s, %s>\n' % (str(self.look_at[0]), str(self.look_at[1]), str(self.look_at[2]) ))
fh.write('}\n')
fh.write('\n')
fh.write('background { color rgb %s }\n' % self.defines['Background_Color'])
fh.write('light_source { <%s,%s,%s> rgb <%s,%s,%s> }\n'
% (str(self.light[0]),str(self.light[1]),str(self.light[2]),
str(self.light_color[0]),str(self.light_color[1]),str(self.light_color[2])))
fh.write('\n')
fh.write('// ***********************************************\n')
fh.write('// macros for atom/bond shapes\n')
fh.write('// ***********************************************\n')
fh.write('#macro check_shadow()\n')
fh.write(' #if (!Shadows)\n')
fh.write(' no_shadow \n')
fh.write(' #end\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro translucentFinish(T)\n')
fh.write(' #local shineFactor = T;\n')
fh.write(' #if (T <= 0.25)\n')
fh.write(' #declare shineFactor = (1.0-4*T);\n')
fh.write(' #end\n')
fh.write(' #if (T > 0.25)\n')
fh.write(' #declare shineFactor = 0;\n')
fh.write(' #end\n')
fh.write(' finish {\n')
fh.write(' ambient 0.45\n')
fh.write(' diffuse 0.84\n')
fh.write(' specular 0.22\n')
fh.write(' roughness .00001\n')
fh.write(' metallic shineFactor\n')
fh.write(' phong 0.9*shineFactor\n')
fh.write(' phong_size 120*shineFactor\n')
fh.write('}#end\n')
fh.write('\n')
fh.write('#macro a(X,Y,Z,RADIUS,R,G,B,T)\n')
fh.write(' sphere{<X,Y,Z>,RADIUS\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro b(X1,Y1,Z1,RADIUS1,X2,Y2,Z2,RADIUS2,R,G,B,T)\n')
fh.write(' cone{<X1,Y1,Z1>,RADIUS1,<X2,Y2,Z2>,RADIUS2\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end \n')
for bond in self.bonds:
fh.write('b(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(bond[0]),str(bond[1]),str(bond[2]),str(bond[3]),
str(bond[4]),str(bond[5]),str(bond[6]),str(bond[7]),
str(bond[8]),str(bond[9]),str(bond[10]),str(bond[11])))
for atom in self.atoms:
fh.write('a(%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(atom[1]),str(atom[2]),str(atom[3]),str(atom[4]),
str(atom[5]),str(atom[6]),str(atom[7]),str(atom[8])))
fh.close()
def save_density(self,filename='rho',overlap = 2.0,n = [40,40,40],caxis = [0.0,1.0]):
if (filename != ''):
self.defines['Filename'] = filename
# grid = GridProp()
# GridProp seems to have been retired
grid = None
grid.set_n(n[0],n[1],n[2])
grid.set_caxis(caxis[0],caxis[1])
grid.set_filename(self.defines['Filename'])
grid.add('RHO')
grid.compute()
df3_file = filename + '.RHO.df3'
l = [grid.get_l(0),grid.get_l(1),grid.get_l(2)]
o = [grid.get_o(0),grid.get_o(1),grid.get_o(2)]
ini_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov.ini'
pov_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov'
png_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.png'
pov_file = self.defines['Filename'] + '.pov'
png_file = self.defines['Filename'] + '.png'
# Write the pov.ini file
fh = open(ini_filename,'w')
fh.write('; InPsight: visualization in Psi4\n')
fh.write('; by Rob Parrish\n')
fh.write('; .pov.ini file\n')
fh.write('; Created %s\n' % str(date.today()))
fh.write('\n')
fh.write('Input_File_Name=%s\n' % pov_file)
fh.write('Output_to_File=true\n')
fh.write('Output_File_Type=%s\n' % self.defines['Output_File_Type'])
fh.write('Output_File_Name=%s\n' % png_file)
fh.write('Height=%s\n' % str(self.height))
fh.write('Width=%s\n' % str(self.width))
fh.write('Output_Alpha=%s\n' % self.defines['Output_Alpha'])
fh.write('Antialias=%s\n' % self.defines['Antialias'])
fh.write('Antialias_Threshold=%s\n' % self.defines['Antialias_Threshold'])
fh.write('Display=true\n')
fh.write('Warning_Level=5\n')
fh.write('Verbose=false\n')
fh.close()
# Write the pov file
fh = open(pov_filename, 'w')
fh.write('// InPsight: visualization in Psi4\n')
fh.write('// by Rob Parrish\n')
fh.write('// .pov file (adopted from Jmol)\n')
fh.write('// Created %s\n' % str(date.today()))
fh.write('#declare Shadows = %s; \n' % self.defines['Shadows'])
fh.write('\n')
fh.write('camera{\n')
fh.write(' orthographic\n')
fh.write(' location < %s, %s, %s>\n' % (str(self.location[0]),str(self.location[1]),str(self.location[2]) ))
fh.write(' sky < %s, %s, %s>\n' % (str(self.sky[0]), str(self.sky[1]), str(self.sky[2]) ))
fh.write(' up < %s, %s, %s>\n' % (str(self.up[0]), str(self.up[1]), str(self.up[2]) ))
fh.write(' right < %s, %s, %s>\n' % (str(self.right[0]), str(self.right[1]), str(self.right[2]) ))
fh.write(' look_at < %s, %s, %s>\n' % (str(self.look_at[0]), str(self.look_at[1]), str(self.look_at[2]) ))
fh.write('}\n')
fh.write('\n')
fh.write('background { color rgb %s }\n' % self.defines['Background_Color'])
fh.write('light_source { <%s,%s,%s> rgb <%s,%s,%s> }\n'
% (str(self.light[0]),str(self.light[1]),str(self.light[2]),
str(self.light_color[0]),str(self.light_color[1]),str(self.light_color[2])))
fh.write('\n')
fh.write('// ***********************************************\n')
fh.write('// macros for atom/bond shapes\n')
fh.write('// ***********************************************\n')
fh.write('#macro check_shadow()\n')
fh.write(' #if (!Shadows)\n')
fh.write(' no_shadow \n')
fh.write(' #end\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro translucentFinish(T)\n')
fh.write(' #local shineFactor = T;\n')
fh.write(' #if (T <= 0.25)\n')
fh.write(' #declare shineFactor = (1.0-4*T);\n')
fh.write(' #end\n')
fh.write(' #if (T > 0.25)\n')
fh.write(' #declare shineFactor = 0;\n')
fh.write(' #end\n')
fh.write(' finish {\n')
fh.write(' ambient 0.45\n')
fh.write(' diffuse 0.84\n')
fh.write(' specular 0.22\n')
fh.write(' roughness .00001\n')
fh.write(' metallic shineFactor\n')
fh.write(' phong 0.9*shineFactor\n')
fh.write(' phong_size 120*shineFactor\n')
fh.write('}#end\n')
fh.write('\n')
fh.write('#macro a(X,Y,Z,RADIUS,R,G,B,T)\n')
fh.write(' sphere{<X,Y,Z>,RADIUS\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro b(X1,Y1,Z1,RADIUS1,X2,Y2,Z2,RADIUS2,R,G,B,T)\n')
fh.write(' cone{<X1,Y1,Z1>,RADIUS1,<X2,Y2,Z2>,RADIUS2\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end \n')
for bond in self.bonds:
fh.write('b(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(bond[0]),str(bond[1]),str(bond[2]),str(bond[3]),
str(bond[4]),str(bond[5]),str(bond[6]),str(bond[7]),
str(bond[8]),str(bond[9]),str(bond[10]),str(bond[11])))
for atom in self.atoms:
fh.write('a(%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(atom[1]),str(atom[2]),str(atom[3]),str(atom[4]),
str(atom[5]),str(atom[6]),str(atom[7]),str(atom[8])))
fh.close()
# yapf: enable
|
psi4/psi4
|
psi4/driver/p4util/inpsight.py
|
Python
|
lgpl-3.0
| 23,279
|
import re
import Queue
class AbsAnalyst(object):
"""docstring for AbsAnalyst"""
LOGTIME_REGEXP = re.compile("(?P<log_time>\w{4}-\w{2}-\w{2} \w{2}:\w{2}:\w{2})")
def __init__(self):
raise NotImplemented
def isMatch(self, line):
raise NotImplemented
def doStatistic(self):
raise NotImplemented
def doAnalyse(self):
raise NotImplemented
|
DrZhang/LogAnalyst
|
Analysts/AbsAnalyst.py
|
Python
|
lgpl-3.0
| 353
|
from pycp2k.inputsection import InputSection
class _each126(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
|
SINGROUP/pycp2k
|
pycp2k/classes/_each126.py
|
Python
|
lgpl-3.0
| 1,114
|
import sys
if __name__ == "__main__":
print("Genoa python script")
sys.exit(0)
|
sfu-ireceptor/gateway
|
resources/agave_apps/genoa/genoa.py
|
Python
|
lgpl-3.0
| 88
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pythymio
import random
from gardenworld import *
init('info2_1')
with pythymio.thymio(["acc"],[]) as Thym:
state = dict([])
state["time"] = 0
state["delay"] = 10
def dispatch(evtid, evt_name, evt_args):
# https://www.thymio.org/en:thymioapi prox freq is 16Hz
if evt_name == "fwd.acc": # every 0.0625 sec
state["time"] += 0.0625
state["delay"] -= 1
if state["delay"] < 0:
if 7 < evt_args[1] < 14:
if evt_args[0] > 10:
state["delay"] = 20
tg()
elif evt_args[0] < -10:
state["delay"] = 20
td()
elif evt_args[1] > 20 and abs(evt_args[0]) < 8:
state["delay"] = 10
av()
elif evt_args[1] < 5:
if evt_args[0] > 10:
state["delay"] = 20
dp()
elif evt_args[0] < -10:
state["delay"] = 20
ra()
else: # Wat?
print evt_name
# Now lets start the loopy thing
Thym.loop(dispatch)
print "state is %s" % state
print "Sayonara"
|
pierreboudes/pyThymio
|
garden_real.py
|
Python
|
lgpl-3.0
| 1,328
|
from __future__ import print_function
import sys
import os
# from setuptools import Distribution
from pkg_resources import get_distribution, working_set, VersionConflict
def samefile(path, other):
"""
Workaround for missing ``os.path.samefile`` in Windows Python 2.7.
"""
return os.path.normcase(os.path.normpath(os.path.realpath(path))) \
== os.path.normcase(os.path.normpath(os.path.realpath(other)))
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
try:
import zetup
except VersionConflict:
egg_info = 'zetup.egg-info'
dist = get_distribution('zetup')
if samefile(
dist.location, os.path.dirname(os.path.realpath(__file__))
) and os.path.exists(egg_info):
print("zetup: Removing possibly outdated %s/" % egg_info,
# don't pollute stdout
file=sys.stderr)
for fname in os.listdir(egg_info):
os.remove(os.path.join(egg_info, fname))
os.rmdir(egg_info)
# when run via pip, the egg-info is still referenced by setuptools,
# which would try to read the contents
for keys in working_set.entry_keys.values():
if 'zetup' in keys:
keys.remove('zetup')
del working_set.by_key['zetup']
from zetup import Zetup, DistributionNotFound, VersionConflict
try:
from zetup.commands import make, pytest, tox, conda
except (ImportError, DistributionNotFound, VersionConflict):
# ==> no zetup commands available
# standard setup commands work anyway
pass
# setup_req = 'setuptools >= 15.0'
# try:
# get_distribution(setup_req)
# except VersionConflict:
# for mod in ['setuptools', 'pkg_resources']:
# for name, _ in list(sys.modules.items()):
# if name == mod or name.startswith(mod + '.'):
# del sys.modules[name]
# sys.path.insert(0, Distribution().fetch_build_egg(setup_req))
zfg = Zetup()
zetup.requires.Requirements('setuptools >= 36.2', zfg=zfg).check()
setup = zfg.setup
setup['package_data']['zetup.commands.make'] = [
'templates/*.jinja',
'templates/package/*.jinja',
]
setup()
|
userzimmermann/zetup.py
|
setup.py
|
Python
|
lgpl-3.0
| 2,147
|
# -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
from bliss.controllers.tango_attr_as_counter import TangoAttrCounter
class tango_fe(TangoAttrCounter):
def __init__(self, name, config):
TangoAttrCounter.__init__(self, name, config)
|
tiagocoutinho/bliss
|
bliss/controllers/tango_fe.py
|
Python
|
lgpl-3.0
| 379
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017
#
# Author(s):
#
# Lars Ørum Rasmussen <ras@dmi.dk>
# Janne Kotro <janne.kotro@fmi.fi>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Notes:
# - This is probably also the place to add possible alarm related plugins (fx. Nagios).
# - Timer reset from: http://code.activestate.com/recipes/577407-resettable-timer-class-a-little-enhancement-from-p/
import threading
import logging
import time
LOGGER = logging.getLogger(__name__)
# Seconds between heartbeats. A default value could be calculated after a few heartbeat.
# Newer version of posttroll is sending heartbeats including `min_interval`.
DEFAULT_MIN_INTERVAL = 30
class Monitor(threading.Thread):
"""Will monitor heartbeats.
Will set alarm event if no heartbeat received in specified time interval.
Will do nothing if no time interval scale defined.
"""
def __init__(self, alarm_event, **kwargs):
"""Will set `alarm_event` if no heartbeat in time interval `heartbeat_alarm_scale` times
heartbeat time interval.
"""
self._alarm_scale = float(kwargs.get("heartbeat_alarm_scale", 0))
self._alarm_event = alarm_event
self._interval = self._alarm_scale * DEFAULT_MIN_INTERVAL
self._finished = threading.Event()
threading.Thread.__init__(self)
def __call__(self, msg=None):
"""Receive a heartbeat (or not) to reset the timer.
TODO: If possibility for blocking, add a queue.
"""
if self._alarm_scale:
if msg and msg.type == "beat":
try:
self._interval = self._alarm_scale * float(msg.data["min_interval"])
except (KeyError, AttributeError, TypeError, ValueError):
pass
LOGGER.debug("Resetting heartbeat alarm timer to %.1f sec", self._interval)
self._resetted = True
self._finished.set()
self._finished.clear()
def start(self):
if self._alarm_scale:
threading.Thread.start(self)
return self
def stop(self):
self._finished.set()
#
# Context interface.
#
def __enter__(self):
return self.start()
def __exit__(self, *exc):
return self.stop()
#
# Running in the thread.
#
def run(self):
LOGGER.debug("Starting heartbeat monitor with alarm scale %.2f", self._alarm_scale)
while not self._finished.is_set():
self._resetted = True
while self._resetted:
self._resetted = False
self._finished.wait(self._interval)
# prevent a race condition between a finished set / clear (?)
time.sleep(0.05)
if not self._finished.is_set():
self._set_alarm()
LOGGER.debug("Stopping heartbeat monitor")
def _set_alarm(self):
if self._alarm_event:
LOGGER.debug("Missing heartbeat alarm!")
self._alarm_event.set()
|
pytroll/pytroll-file-utils
|
trollmoves/heartbeat_monitor.py
|
Python
|
lgpl-3.0
| 3,623
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from logging import getLogger
from eos import __version__ as eos_version
from eos.eve_obj_builder import EveObjBuilder
from eos.util.repr import make_repr_str
from .exception import ExistingSourceError
from .exception import UnknownSourceError
from .source import Source
logger = getLogger(__name__)
class SourceManager:
"""Manages data sources.
Handle and access different sources in an easy way. Useful for cases when
you want to work with, for example, Tranquility and Singularity data at the
same time.
"""
# Format: {literal alias: Source}
_sources = {}
# Default source, will be used implicitly when instantiating fit
default = None
@classmethod
def add(cls, alias, data_handler, cache_handler, make_default=False):
"""Add source to source manager.
Adding includes initializing all facilities hidden behind name 'source'.
After source has been added, it is accessible with alias.
Args:
alias: Alias under which source will be accessible.
data_handler: Data handler instance.
cache_handler: Cache handler instance.
make_default (optional): Do we need to mark passed source as default
or not. Default source will be used for instantiating new fits,
if no other source is specified.
"""
logger.info('adding source with alias "{}"'.format(alias))
if alias in cls._sources:
raise ExistingSourceError(alias)
# Compare fingerprints from data and cache
cache_fp = cache_handler.get_fingerprint()
data_version = data_handler.get_version()
current_fp = cls.__format_fingerprint(data_version)
# If data version is corrupt or fingerprints mismatch, update cache
if data_version is None or cache_fp != current_fp:
if data_version is None:
logger.info('data version is None, updating cache')
else:
msg = (
'fingerprint mismatch: cache "{}", data "{}", '
'updating cache'
).format(cache_fp, current_fp)
logger.info(msg)
# Generate eve objects and cache them, as generation takes
# significant amount of time
eve_objects = EveObjBuilder.run(data_handler)
cache_handler.update_cache(eve_objects, current_fp)
# Finally, add record to list of sources
source = Source(alias=alias, cache_handler=cache_handler)
cls._sources[alias] = source
if make_default is True:
cls.default = source
@classmethod
def get(cls, alias):
"""Using source alias, return source.
Args:
alias: Alias of source to return.
Returns:
Source instance.
"""
try:
return cls._sources[alias]
except KeyError:
raise UnknownSourceError(alias)
@classmethod
def remove(cls, alias):
"""Remove source by alias.
Args:
alias: Alias of source to remove.
"""
logger.info('removing source with alias "{}"'.format(alias))
try:
del cls._sources[alias]
except KeyError:
raise UnknownSourceError(alias)
@classmethod
def list(cls):
return list(cls._sources.keys())
@staticmethod
def __format_fingerprint(data_version):
return '{}_{}'.format(data_version, eos_version)
@classmethod
def __repr__(cls):
spec = [['sources', '_sources']]
return make_repr_str(cls, spec)
|
pyfa-org/eos
|
eos/source/manager.py
|
Python
|
lgpl-3.0
| 4,535
|
from django.db import models
from _datetime import date
class Restaurant(models.Model):
name = models.CharField(max_length=200)
transportation = models.BooleanField(default=False)
weatherSensetion = models.BooleanField(default=False)
status = models.BooleanField(default=True)
totalDay = models.IntegerField(default=0)
counter = models.IntegerField(default=0)
def __str__(self):
return self.name
def deleteRest(self, deleteId):
self.objects.filter(id=deleteId).delete()
def updateStatus(self, newStatus, updateId):
self.objects.get(id=updateId).update(status = newStatus)
|
itucsProject2/Proje1
|
restaurant/models.py
|
Python
|
unlicense
| 639
|
#! /usr/bin/python
from base64 import b64encode, b64decode
from Crypto import Random
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Random import random
from Crypto.Signature import PKCS1_v1_5
privateKeyStr = """-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAws5ZlcsFQv8oh+f5YDE/Dqro+tyQxcRpw8Ykjo/Vxq/x7rFg
CZch7IUWfImTXEiYbePVApgcCFS/yMBJpaG9mWYbYDmpQEMrYEAdo7dB0A6NS/DF
vdlTmhUxe2YBqeP7U+s5pZ1nekhVD1vCkJroP8Z8pwOZ4kDo1pWDcguL8j0c0a5J
eO24sBtBxak3lDOlTdIrc6ulJ/cNrhzIhbmuQUTwImsmOH/SYHHKhMctPAU26CRa
i8NmhIucNx+0LYhikaJXgfdyHD/a7RdSqMHyQWqRjvEyk7DJOEojSEF8OlES24qo
yMTNRUIndrQc2u96oQToQh9sjg6S0g8TlWc0BwIDAQABAoIBAAYE7D21pTb5vV3d
rBXtz537Z/GAaTTDKeVztON2zEs0WoxejLIfKlwtjJFSiuwaDiOvG4DWBF+5hqeE
UYI9qicYQZZKkdE7ghaaRJAvdGgiWvlSujlwgqXLK9k9QKXoNnbUWNamM3FS1NYB
ptRjBPQbhPSAJvwXt1oSCpq2gp98eBYOIFSXu2CAU9RzMcmf6fnC1ddqR+ZIr7Hy
J8ud/VByVyW4qth+sUAKNMQaoDGA2laP7LyzHOhGL9B7j/+hhrdkf25onrEdBMf6
4B1wRZ9Ljfa0UZek874XcaQgX3dBBSjujeECU99iVXKMRQXbP/W2wgrEL+Rt/Lq9
DzzXVNkCgYEAz4e36LGCKIbGCsc4egCFf7kFcS4UM4Yi5EVDXjUKpJC5JlNMkP5x
YCmGRrZWrm1texpAj/xjRDkLxusOQrxA/TpEcC3VQMv4iYip6RR1EMKKlYtK71o0
VQdKFu4Zpe6bvULwmKzGEnzjSKABUbX203ORz06qAbsVyJvY2k+3xiMCgYEA8E3a
xRC8WWRog+Kf/dIrWlbgwI+oeIOKVdRTMLJOlpevXRSMHf4QwQIE0seQMGkSYXzy
q8LY6h2Y2mVN7/bWleWP3JhEcI6j3tstz6/Pl1eJOk50Sg5O5NNtDk2DCYr7sqWo
wGTJZTujqgKaEcwQURcb5hBhO7fOi6stQDzj7s0CgYAJxA03egLx/UWchDt4IkYl
+IL1DIIO2qURsOC2sjBKBjVwREM6H38eU1XqRfJWSvf37whSvkG9mCGhvrXxFgLI
59EIpl9+qRqM/k1gloHbxAvZdbCVCt2jkrA92/6A/HV/toKz7I11mULoy/7D5Zgz
4yBdbQo7Ap7Hze2qeE3hmQKBgQCpAVh3eIm/JQsMlnqHfSTYaHmxdFe8SmckbiwD
96Ol7hB7mlPhVCpcVBsoUwppwbwXrFTP0D3WifJSxyTFPAk3azh+oOihcPB3cBcM
Fk4d1obA5ySKYP3DmZ79jC44q/LRdgj1Ju0tsYAttZ6+HZLsNSB98c8wvNkbCczN
fQWhkQKBgFzyQSZzKHKlu3Zfeyg+doPXlXlQVpRHqYUoOkzY83DE5ZujXE5H081P
bui0fXQMsIFBt+/ItCwZsp9rBXPW0uWtA0jDIEkk5Ro4FuRyx/59/Zllxo3vpZyA
v7lalsxJIywVo2Ni393HP/63bDKjb99mVzeEfpy7Q89Qh+upP8c6
-----END RSA PRIVATE KEY-----
"""
publicKeyStr = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAws5ZlcsFQv8oh+f5YDE/
Dqro+tyQxcRpw8Ykjo/Vxq/x7rFgCZch7IUWfImTXEiYbePVApgcCFS/yMBJpaG9
mWYbYDmpQEMrYEAdo7dB0A6NS/DFvdlTmhUxe2YBqeP7U+s5pZ1nekhVD1vCkJro
P8Z8pwOZ4kDo1pWDcguL8j0c0a5JeO24sBtBxak3lDOlTdIrc6ulJ/cNrhzIhbmu
QUTwImsmOH/SYHHKhMctPAU26CRai8NmhIucNx+0LYhikaJXgfdyHD/a7RdSqMHy
QWqRjvEyk7DJOEojSEF8OlES24qoyMTNRUIndrQc2u96oQToQh9sjg6S0g8TlWc0
BwIDAQAB
-----END PUBLIC KEY-----"""
license = """{
"some_data": "and its value",
"more": "and more",
"offline": true,
"partner": "bob"
}"""
def base64_encode(data):
""" Base64 encodes the given data using Pulselocker's URL-safe character set. """
return b64encode(data).replace('+', '-').replace('/', '_').replace('\n', '')
def base64_decode(data):
""" Base64 decodes the given data using Pulselocker's URL-safe character set. """
return b64decode(data.replace('-', '+').replace('_', '/'))
def encrypt_large_payload(data, public_key, padding="1234567890abcdefghijklmnopqrstuvwxyz"):
"""
Performs alternate encryption scheme. This is best for large messages which are bigger than the
key size. First, a random 256-bit key is generated. That key is then used to encrypt the data
using AES 256 CBC. The public key is then used to encrypt the key using PKCS1 OAEP. The two are
appended together and Base64-encoded for safe transportation.
Required Paramters:
data The payload to be encrypted.
public_key An RSA public key capable of encrypting 48 bytes (384 bits) of data.
Optional Parameters:
padding Characters to choose from when padding the message. Defaults to 0-9 and a-z.
Returns Base64-encoded, AES encrypted message with PKCS1 OAEP encrpyted key.
"""
# Generate the key and IV.
random_device = Random.new()
aes_key = random_device.read(32) # 256-bit random key
aes_iv = random_device.read(16) # 128-bit random IV
# Encrypt the key and IV together as the start of the message.
rsa_cipher = PKCS1_OAEP.new(RSA.importKey(public_key))
encrypted_key_iv = rsa_cipher.encrypt(aes_key + aes_iv)
# Encrypt the data using AES CBC. CBC requires the data to be an exact multiple of the AES block
# size (16 bytes). To satisfy this we will left-pad the payload with a randomly chosen character
# from the `padding` parameter to bring it's size to an exact multiple of 16 bytes.
pad_char = random.choice(padding)
padding_size = (16 - len(data) % 16) - 1 # Minus 1 byte for hex-encoded padding size
padded_data = "%x%s" % (padding_size, data.rjust(len(data) + padding_size, pad_char))
aes_cipher = AES.new(aes_key, AES.MODE_CBC, aes_iv)
encrypted_data = aes_cipher.encrypt(padded_data)
# Return the whole encrypted key, iv, and data all joined together and Base64 encoded.
return base64_encode(encrypted_key_iv + encrypted_data)
def decrypt_large_payload(data, private_key):
"""
Reverses the alternate encryption scheme. This method extracts a PKCS1 OAEP encrypted key and IV
from the data, then uses that key and IV to perform AES 256 CBC decryption on the rest of the
data. From there, it extracts the size of the payload padding, then chops that off and returns
the decrypted message with no padding.
"""
# Load the key and find out how big it is.
rsa_key = RSA.importKey(private_key)
key_size = (rsa_key.size() + 1) / 8
# Decode the data and separate the key and payload.
data = base64_decode(data)
encrypted_key = data[:key_size]
encrypted_payload = data[key_size:]
# Decrypt the key, then separate the AES key and IV.
rsa_cipher = PKCS1_OAEP.new(rsa_key)
decrypted_key = rsa_cipher.decrypt(encrypted_key)
aes_key = decrypted_key[:32]
aes_iv = decrypted_key[32:]
# Decrypt the payload and remove the padding.
aes_cipher = AES.new(aes_key, AES.MODE_CBC, aes_iv)
decrypted_payload = aes_cipher.decrypt(encrypted_payload)
padding_size = int(decrypted_payload[0], 16) + 1 # Plus 1 for the padding number itself.
payload = decrypted_payload[padding_size:]
return payload
def sign(data, private_key):
"""
Apply SHA-256 hash to (base64 encoded) data and sign it using private_key.
Return base64 encoded signature using PKCS#1 v1.5.
"""
rsa_key = RSA.importKey(private_key)
signer = PKCS1_v1_5.new(rsa_key)
digest = SHA256.new()
digest.update(base64_decode(data)) # assume data was encrypted w/ library.
signature = signer.sign(digest)
return base64_encode(signature)
def verify(data, signature, public_key):
"""
Apply SHA-256 hash to (base64 encoded) data and validate it against PKCS#1 v1.5 signature and
public_key.
Returns Boolean (True if validation passed).
"""
rsa_key = RSA.importKey(public_key)
signer = PKCS1_v1_5.new(rsa_key)
digest = SHA256.new()
digest.update(base64_decode(data)) # assume data was encrypted w/ library.
return signer.verify(digest, base64_decode(signature))
encrypted = encrypt_large_payload(license, publicKeyStr)
print "Encrypted payload:", encrypted
print "Decrypted payload:", decrypt_large_payload(encrypted, privateKeyStr)
signature = sign(encrypted, privateKeyStr)
print "Signature:", signature
print "Valid:", verify(encrypted, signature, publicKeyStr)
|
NatalieWolfe/scratch-space
|
crypt.py
|
Python
|
unlicense
| 7,350
|
# -*- coding: cp1252 -*-
from compiler import *
####################################################################################################################
# Each scene prop record contains the following fields:
# 1) Scene prop id: used for referencing scene props in other files. The prefix spr_ is automatically added before each scene prop id.
# 2) Scene prop flags. See header_scene_props.py for a list of available flags
# 3) Mesh name: Name of the mesh.
# 4) Physics object name:
# 5) Triggers: Simple triggers that are associated with the scene prop
####################################################################################################################
check_item_use_trigger = (ti_on_scene_prop_use,
[
(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":instance_id"),
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_use_item", ":instance_id", ":agent_id"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_use_item, ":instance_id", ":agent_id"),
(try_end),
])
check_sally_door_use_trigger_double = (ti_on_scene_prop_use,
[
(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":instance_id"),
(agent_get_position, pos1, ":agent_id"),
(prop_instance_get_starting_position, pos2, ":instance_id"),
(scene_prop_get_slot, ":opened_or_closed", ":instance_id", scene_prop_open_or_close_slot),
(try_begin),
#out doors like castle sally door can be opened only from inside, if door coordinate is behind your coordinate. Also it can be closed from both sides.
(prop_instance_get_scene_prop_kind, ":scene_prop_id", ":instance_id"),
(assign, ":can_open_door", 0),
(try_begin),
(neg|eq, ":scene_prop_id", "spr_viking_keep_destroy_sally_door_right"),
(neg|eq, ":scene_prop_id", "spr_viking_keep_destroy_sally_door_left"),
(neg|eq, ":scene_prop_id", "spr_earth_sally_gate_right"),
(neg|eq, ":scene_prop_id", "spr_earth_sally_gate_left"),
(position_is_behind_position, pos1, pos2),
(assign, ":can_open_door", 1),
(else_try),
(this_or_next|eq, ":scene_prop_id", "spr_viking_keep_destroy_sally_door_right"),
(this_or_next|eq, ":scene_prop_id", "spr_viking_keep_destroy_sally_door_left"),
(this_or_next|eq, ":scene_prop_id", "spr_earth_sally_gate_right"),
(eq, ":scene_prop_id", "spr_earth_sally_gate_left"),
(neg|position_is_behind_position, pos1, pos2),
(assign, ":can_open_door", 1),
(try_end),
(this_or_next|eq, ":can_open_door", 1),
(eq, ":opened_or_closed", 1),
(try_begin),
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_use_item", ":instance_id", ":agent_id"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_use_item, ":instance_id", ":agent_id"),
(try_end),
(try_end),
(try_end),
])
check_sally_door_use_trigger = (ti_on_scene_prop_use,
[
(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":instance_id"),
(agent_get_position, pos1, ":agent_id"),
(prop_instance_get_starting_position, pos2, ":instance_id"),
(scene_prop_get_slot, ":opened_or_closed", ":instance_id", scene_prop_open_or_close_slot),
(try_begin),
#out doors like castle sally door can be opened only from inside, if door coordinate is behind your coordinate. Also it can be closed from both sides.
(this_or_next|position_is_behind_position, pos1, pos2),
(eq, ":opened_or_closed", 1),
(try_begin),
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_use_item", ":instance_id", ":agent_id"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_use_item, ":instance_id", ":agent_id"),
(try_end),
(try_end),
(try_end),
])
check_castle_door_use_trigger = (ti_on_scene_prop_use,
[
(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":instance_id"),
(agent_get_position, pos1, ":agent_id"),
(prop_instance_get_starting_position, pos2, ":instance_id"),
(scene_prop_get_slot, ":opened_or_closed", ":instance_id", scene_prop_open_or_close_slot),
(try_begin),
(ge, ":agent_id", 0),
(agent_get_team, ":agent_team", ":agent_id"),
#in doors like castle room doors can be opened from both sides, but only defenders can open these doors. Also it can be closed from both sides.
(this_or_next|eq, ":agent_team", 0),
(eq, ":opened_or_closed", 1),
(try_begin),
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_use_item", ":instance_id", ":agent_id"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_use_item, ":instance_id", ":agent_id"),
(try_end),
(try_end),
(try_end),
])
check_ladder_animate_trigger = (ti_on_scene_prop_is_animating,
[
(store_trigger_param_1, ":instance_id"),
(store_trigger_param_2, ":remaining_time"),
(call_script, "script_check_creating_ladder_dust_effect", ":instance_id", ":remaining_time"),
])
check_ladder_animation_finish_trigger = (ti_on_scene_prop_animation_finished,
[
(store_trigger_param_1, ":instance_id"),
(prop_instance_enable_physics, ":instance_id", 1),
])
scene_props = [
("invalid_object",0,"question_mark","0", []),
("inventory",sokf_type_container|sokf_place_at_origin,"package","bobaggage", []),
("empty", 0, "0", "0", []),
("chest_a",sokf_type_container,"chest_gothic","bochest_gothic", []),
("container_small_chest",sokf_type_container,"package","bobaggage", []),
("container_chest_b",sokf_type_container,"chest_b","bo_chest_b", []),
("container_chest_c",sokf_type_container,"chest_c","bo_chest_c", []),
("player_chest",sokf_type_container,"player_chest","bo_player_chest", []),
("locked_player_chest",0,"player_chest","bo_player_chest", []),
("light_sun",sokf_invisible,"light_sphere","0", [
(ti_on_init_scene_prop,
[
(neg|is_currently_night),
(store_trigger_param_1, ":prop_instance_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_scale, pos5, ":prop_instance_no"),
(position_get_scale_x, ":scale", pos5),
(store_time_of_day,reg(12)),
(try_begin),
(is_between,reg(12),5,20),
(store_mul, ":red", 5 * 200, ":scale"),
(store_mul, ":green", 5 * 193, ":scale"),
(store_mul, ":blue", 5 * 180, ":scale"),
(else_try),
(store_mul, ":red", 5 * 90, ":scale"),
(store_mul, ":green", 5 * 115, ":scale"),
(store_mul, ":blue", 5 * 150, ":scale"),
(try_end),
(val_div, ":red", 100),
(val_div, ":green", 100),
(val_div, ":blue", 100),
(set_current_color,":red", ":green", ":blue"),
(set_position_delta,0,0,0),
(add_point_light_to_entity, 0, 0),
]),
]),
("light",sokf_invisible,"light_sphere","0", [
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":prop_instance_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_scale, pos5, ":prop_instance_no"),
(position_get_scale_x, ":scale", pos5),
(store_mul, ":red", 3 * 200, ":scale"),
(store_mul, ":green", 3 * 145, ":scale"),
(store_mul, ":blue", 3 * 45, ":scale"),
(val_div, ":red", 100),
(val_div, ":green", 100),
(val_div, ":blue", 100),
(set_current_color,":red", ":green", ":blue"),
(set_position_delta,0,0,0),
(add_point_light_to_entity, 10, 30),
]),
]),
("light_red",sokf_invisible,"light_sphere","0", [
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":prop_instance_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_scale, pos5, ":prop_instance_no"),
(position_get_scale_x, ":scale", pos5),
(store_mul, ":red", 2 * 170, ":scale"),
(store_mul, ":green", 2 * 100, ":scale"),
(store_mul, ":blue", 2 * 30, ":scale"),
(val_div, ":red", 100),
(val_div, ":green", 100),
(val_div, ":blue", 100),
(set_current_color,":red", ":green", ":blue"),
(set_position_delta,0,0,0),
(add_point_light_to_entity, 20, 30),
]),
]),
("light_night",sokf_invisible,"light_sphere","0", [
(ti_on_init_scene_prop,
[
# (store_time_of_day,reg(12)),
# (neg|is_between,reg(12),5,20),
(is_currently_night, 0),
(store_trigger_param_1, ":prop_instance_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_scale, pos5, ":prop_instance_no"),
(position_get_scale_x, ":scale", pos5),
(store_mul, ":red", 3 * 160, ":scale"),
(store_mul, ":green", 3 * 145, ":scale"),
(store_mul, ":blue", 3 * 100, ":scale"),
(val_div, ":red", 100),
(val_div, ":green", 100),
(val_div, ":blue", 100),
(set_current_color,":red", ":green", ":blue"),
(set_position_delta,0,0,0),
(add_point_light_to_entity, 10, 30),
]),
]),
("torch",0,"torch_a","0",
[
(ti_on_init_scene_prop,
[
(set_position_delta,0,-35,48),
(particle_system_add_new, "psys_torch_fire"),
(particle_system_add_new, "psys_torch_smoke"),
(particle_system_add_new, "psys_torch_fire_sparks"),
(play_sound, "snd_torch_loop", 0),
(set_position_delta,0,-35,56),
(particle_system_add_new, "psys_fire_glow_1"),
# (particle_system_emit, "psys_fire_glow_1",9000000),
#second method
(get_trigger_object_position, pos2),
(set_position_delta,0,0,0),
(position_move_y, pos2, -35),
(position_move_z, pos2, 55),
(particle_system_burst, "psys_fire_glow_fixed", pos2, 1),
]),
]),
("torch_night",0,"torch_a","0",
[
(ti_on_init_scene_prop,
[
# (store_time_of_day,reg(12)),
# (neg|is_between,reg(12),5,20),
(is_currently_night, 0),
(set_position_delta,0,-35,48),
(particle_system_add_new, "psys_torch_fire"),
(particle_system_add_new, "psys_torch_smoke"),
(particle_system_add_new, "psys_torch_fire_sparks"),
(set_position_delta,0,-35,56),
(particle_system_add_new, "psys_fire_glow_1"),
(particle_system_emit, "psys_fire_glow_1",9000000),
(play_sound, "snd_torch_loop", 0),
]),
]),
# ("Baggage",sokf_place_at_origin|sokf_entity_body,"package","bobaggage"),
("barrier_20m",sokf_invisible|sokf_type_barrier,"barrier_20m","bo_barrier_20m", []),
("barrier_16m",sokf_invisible|sokf_type_barrier,"barrier_16m","bo_barrier_16m", []),
("barrier_8m" ,sokf_invisible|sokf_type_barrier,"barrier_8m" ,"bo_barrier_8m" , []),
("barrier_4m" ,sokf_invisible|sokf_type_barrier,"barrier_4m" ,"bo_barrier_4m" , []),
("barrier_2m" ,sokf_invisible|sokf_type_barrier,"barrier_2m" ,"bo_barrier_2m" , []),
("exit_4m" ,sokf_invisible|sokf_type_barrier_leave,"barrier_4m" ,"bo_barrier_4m" , []),
("exit_8m" ,sokf_invisible|sokf_type_barrier_leave,"barrier_8m" ,"bo_barrier_8m" , []),
("exit_16m" ,sokf_invisible|sokf_type_barrier_leave,"barrier_16m" ,"bo_barrier_16m" , []),
("ai_limiter_2m" ,sokf_invisible|sokf_type_ai_limiter,"barrier_2m" ,"bo_barrier_2m" , []),
("ai_limiter_4m" ,sokf_invisible|sokf_type_ai_limiter,"barrier_4m" ,"bo_barrier_4m" , []),
("ai_limiter_8m" ,sokf_invisible|sokf_type_ai_limiter,"barrier_8m" ,"bo_barrier_8m" , []),
("ai_limiter_16m",sokf_invisible|sokf_type_ai_limiter,"barrier_16m","bo_barrier_16m", []),
("Shield",sokf_dynamic,"0","boshield", []),
("shelves",0,"shelves","boshelves", []),
("table_tavern",0,"table_tavern","botable_tavern", []),
("table_castle_a",0,"table_castle_a","bo_table_castle_a", []),
("chair_castle_a",0,"chair_castle_a","bo_chair_castle_a", []),
("pillow_a",0,"pillow_a","bo_pillow", []),
("pillow_b",0,"pillow_b","bo_pillow", []),
("pillow_c",0,"pillow_c","0", []),
("interior_castle_g_square_keep_b",0,"interior_castle_g_square_keep_b","bo_interior_castle_g_square_keep_b", []),
("carpet_with_pillows_a",0,"carpet_with_pillows_a","bo_carpet_with_pillows", []),
("carpet_with_pillows_b",0,"carpet_with_pillows_b","bo_carpet_with_pillows", []),
("table_round_a",0,"table_round_a","bo_table_round_a", []),
("table_round_b",0,"table_round_b","bo_table_round_b", []),
("fireplace_b",0,"fireplace_b","bo_fireplace_b", []),
("fireplace_c",0,"fireplace_c","bo_fireplace_c", []),
("sofa_a",0,"sofa_a","bo_sofa", []),
("sofa_b",0,"sofa_b","bo_sofa", []),
("ewer_a",0,"ewer_a","bo_ewer_a", []),
("end_table_a",0,"end_table_a","bo_end_table_a", []),
("fake_houses_steppe_a",0,"fake_houses_steppe_a","0", []),
("fake_houses_steppe_b",0,"fake_houses_steppe_b","0", []),
("fake_houses_steppe_c",0,"fake_houses_steppe_c","0", []),
("boat_destroy",0,"boat_destroy","bo_boat_destroy", []),
("destroy_house_a",0,"destroy_house_a","bo_destroy_house_a", []),
("destroy_house_b",0,"destroy_house_b","bo_destroy_house_b", []),
("destroy_house_c",0,"destroy_house_c","bo_destroy_house_c", []),
("destroy_heap",0,"destroy_heap","bo_destroy_heap", []),
("destroy_castle_a",0,"destroy_castle_a","bo_destroy_castle_a", []),
("destroy_castle_b",0,"destroy_castle_b","bo_destroy_castle_b", []),
("destroy_castle_c",0,"destroy_castle_c","bo_destroy_castle_c", []),
("destroy_castle_d",0,"destroy_castle_d","bo_destroy_castle_d", []),
("destroy_windmill",0,"destroy_windmill","bo_destroy_windmill", []),
("destroy_tree_a",0,"destroy_tree_a","bo_destroy_tree_a", []),
("destroy_tree_b",0,"destroy_tree_b","bo_destroy_tree_b", []),
("destroy_bridge_a",0,"destroy_bridge_a","bo_destroy_bridge_a", []),
("destroy_bridge_b",0,"destroy_bridge_b","bo_destroy_bridge_b", []),
("catapult",0,"Catapult","bo_Catapult", []),
("catapult_destructible",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible,"Catapult","bo_Catapult", [
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 1600),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(prop_instance_get_position, pos1, ":instance_no"),
(particle_system_burst, "psys_dummy_smoke_big", pos1, 100),
(particle_system_burst, "psys_dummy_straw_big", pos1, 100),
(position_move_z, pos1, -500),
(position_rotate_x, pos1, 90),
(prop_instance_animate_to_position, ":instance_no", pos1, 300), #animate to 6 meters below in 6 second
(try_begin),
(eq, "$g_round_ended", 0),
(scene_prop_get_team, ":scene_prop_team_no", ":instance_no"),
(try_begin),
(eq, ":scene_prop_team_no", 0),
(assign, ":scene_prop_team_no_multiplier", -1),
(else_try),
(assign, ":scene_prop_team_no_multiplier", 1),
(try_end),
(try_begin),
(eq, "$g_number_of_targets_destroyed", 0),
(store_mul, ":target_no_mul_scene_prop_team", ":scene_prop_team_no_multiplier", 1), #1 means destroyed object is a catapult
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_show_multiplayer_message", multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"),
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_show_multiplayer_message, multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
(try_end),
(val_add, "$g_number_of_targets_destroyed", 1),
(else_try),
(store_mul, ":target_no_mul_scene_prop_team", ":scene_prop_team_no_multiplier", 9), #9 means attackers destroyed all targets
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_show_multiplayer_message", multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"),
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_show_multiplayer_message, multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
(try_end),
(val_add, "$g_number_of_targets_destroyed", 1),
(try_end),
(try_end),
#giving gold for destroying target (for catapult)
#step-1 calculating total damage given to that scene prop
(assign, ":total_damage_given", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(try_begin),
(eq, "spr_catapult_destructible", "$g_destructible_target_1"),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_1),
(else_try),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_2),
(try_end),
(val_add, ":total_damage_given", ":damage_given"),
(try_end),
#step-2 sharing 1000 gold (if num active players < 20 then 50 * num active players) to players which gave damage with the damage amounts.
(assign, ":destroy_money_addition", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(val_add, ":destroy_money_addition", 50),
(try_end),
(try_begin),
(ge, ":destroy_money_addition", multi_destroy_target_money_add),
(assign, ":destroy_money_addition", multi_destroy_target_money_add),
(try_end),
(val_mul, ":destroy_money_addition", "$g_multiplayer_battle_earnings_multiplier"),
(val_div, ":destroy_money_addition", 100),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(try_begin),
(eq, "spr_catapult_destructible", "$g_destructible_target_1"),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_1),
(else_try),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_2),
(try_end),
(player_get_gold, ":player_gold", ":player_no"), #give money to player which helped flag to be owned by new_flag_owner team
(val_mul, ":damage_given", ":destroy_money_addition"),
(try_begin),
(ge, ":total_damage_given", ":damage_given"),
(gt, ":damage_given", 0),
(store_div, ":gold_earned", ":damage_given", ":total_damage_given"),
(else_try),
(assign, ":gold_earned", 0),
(try_end),
(val_add, ":player_gold", ":gold_earned"),
(player_set_gold, ":player_no", ":player_gold", multi_max_gold_that_can_be_stored),
(try_end),
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(set_fixed_point_multiplier, 1),
(position_get_x, ":attacker_agent_id", pos2),
(try_begin),
(ge, ":attacker_agent_id", 0),
(agent_is_alive, ":attacker_agent_id"),
(agent_is_human, ":attacker_agent_id"),
(neg|agent_is_non_player, ":attacker_agent_id"),
(agent_get_player_id, ":attacker_player_id", ":attacker_agent_id"),
(ge, ":attacker_player_id", 0),
(player_is_active, ":attacker_player_id"),
(try_begin),
(eq, "spr_catapult_destructible", "$g_destructible_target_1"),
(player_get_slot, ":damage_given", ":attacker_player_id", slot_player_damage_given_to_target_1),
(val_add, ":damage_given", ":damage"),
(player_set_slot, ":attacker_player_id", slot_player_damage_given_to_target_1, ":damage_given"),
(else_try),
(player_get_slot, ":damage_given", ":attacker_player_id", slot_player_damage_given_to_target_2),
(val_add, ":damage_given", ":damage"),
(player_set_slot, ":attacker_player_id", slot_player_damage_given_to_target_2, ":damage_given"),
(try_end),
(try_end),
(try_end),
]),
]),
("broom",0,"broom","0", []),
("garlic",0,"garlic","0", []),
("garlic_b",0,"garlic_b","0", []),
("destroy_a",0,"destroy_a","0", []),
("destroy_b",0,"destroy_b","0", []),
("bridge_wooden",0,"bridge_wooden","bo_bridge_wooden", []),
("bridge_wooden_snowy",0,"bridge_wooden_snowy","bo_bridge_wooden", []),
("grave_a",0,"grave_a","bo_grave_a", []),
("village_house_e",0,"village_house_e","bo_village_house_e", []),
("village_house_f",0,"village_house_f","bo_village_house_f", []),
("village_house_g",0,"village_house_g","bo_village_house_g", []),
("village_house_h",0,"village_house_h","bo_village_house_h", []),
("village_house_i",0,"village_house_i","bo_village_house_i", []),
("village_house_j",0,"village_house_j","bo_village_house_j", []),
("village_wall_a",0,"village_wall_a","bo_village_wall_a", []),
("village_wall_b",0,"village_wall_b","bo_village_wall_b", []),
("village_snowy_house_a",0,"village_snowy_house_a","bo_village_snowy_house_a", []),
("village_snowy_house_b",0,"village_snowy_house_b","bo_village_snowy_house_b", []),
("village_snowy_house_c",0,"village_snowy_house_c","bo_village_snowy_house_c", []),
("village_snowy_house_d",0,"village_snowy_house_d","bo_village_snowy_house_d", []),
("village_snowy_house_e",0,"village_snowy_house_e","bo_village_snowy_house_e", []),
("village_snowy_house_f",0,"village_snowy_house_f","bo_village_snowy_house_f", []),
("town_house_steppe_a",0,"town_house_steppe_a","bo_town_house_steppe_a", []),
("town_house_steppe_b",0,"town_house_steppe_b","bo_town_house_steppe_b", []),
("town_house_steppe_c",0,"town_house_steppe_c","bo_town_house_steppe_c", []),
("town_house_steppe_d",0,"town_house_steppe_d","bo_town_house_steppe_d", []),
("town_house_steppe_e",0,"town_house_steppe_e","bo_town_house_steppe_e", []),
("town_house_steppe_f",0,"town_house_steppe_f","bo_town_house_steppe_f", []),
("town_house_steppe_g",0,"town_house_steppe_g","bo_town_house_steppe_g", []),
("town_house_steppe_h",0,"town_house_steppe_h","bo_town_house_steppe_h", []),
("town_house_steppe_i",0,"town_house_steppe_i","bo_town_house_steppe_i", []),
("carpet_a",0,"carpet_a","0", []),
("carpet_b",0,"carpet_b","0", []),
("carpet_c",0,"carpet_c","0", []),
("carpet_d",0,"carpet_d","0", []),
("carpet_e",0,"carpet_e","0", []),
("carpet_f",0,"carpet_f","0", []),
("awning_a",0,"awning_a","bo_awning", []),
("awning_b",0,"awning_b","bo_awning", []),
("awning_c",0,"awning_c","bo_awning", []),
("awning_long",0,"awning_long","bo_awning_long", []),
("awning_long_b",0,"awning_long_b","bo_awning_long", []),
("awning_d",0,"awning_d","bo_awning_d", []),
("ship",0,"ship","bo_ship", []),
("ship_b",0,"ship_b","bo_ship_b", []),
("ship_c",0,"ship_c","bo_ship_c", []),
("ship_d",0,"ship_d","bo_ship_d", []),
("snowy_barrel_a",0,"snowy_barrel_a","bo_snowy_barrel_a", []),
("snowy_fence",0,"snowy_fence","bo_snowy_fence", []),
("snowy_wood_heap",0,"snowy_wood_heap","bo_snowy_wood_heap", []),
("village_snowy_stable_a",0,"village_snowy_stable_a","bo_village_snowy_stable_a", []),
("village_straw_house_a",0,"village_straw_house_a","bo_village_straw_house_a", []),
("village_stable_a",0,"village_stable_a","bo_village_stable_a", []),
("village_shed_a",0,"village_shed_a","bo_village_shed_a", []),
("village_shed_b",0,"village_shed_b","bo_village_shed_b", []),
("dungeon_door_cell_a",0,"dungeon_door_cell_a","bo_dungeon_door_cell_a", []),
("dungeon_door_cell_b",0,"dungeon_door_cell_b","bo_dungeon_door_cell_b", []),
("dungeon_door_entry_a",0,"dungeon_door_entry_a","bo_dungeon_door_entry_a", []),
("dungeon_door_entry_b",0,"dungeon_door_entry_b","bo_dungeon_door_entry_a", []),
("dungeon_door_entry_c",0,"dungeon_door_entry_c","bo_dungeon_door_entry_a", []),
("dungeon_door_direction_a",0,"dungeon_door_direction_a","bo_dungeon_door_direction_a", []),
("dungeon_door_direction_b",0,"dungeon_door_direction_b","bo_dungeon_door_direction_a", []),
("dungeon_door_stairs_a",0,"dungeon_door_stairs_a","bo_dungeon_door_stairs_a", []),
("dungeon_door_stairs_b",0,"dungeon_door_stairs_b","bo_dungeon_door_stairs_a", []),
("dungeon_bed_a",0,"dungeon_bed_a","0", []),
("dungeon_bed_b",0,"dungeon_bed_b","bo_dungeon_bed_b", []),
("torture_tool_a",0,"torture_tool_a","bo_torture_tool_a", []),
("torture_tool_b",0,"torture_tool_b","0", []),
("torture_tool_c",0,"torture_tool_c","bo_torture_tool_c", []),
("skeleton_head",0,"skeleton_head","0", []),
("skeleton_bone",0,"skeleton_bone","0", []),
("skeleton_a",0,"skeleton_a","bo_skeleton_a", []),
("dungeon_stairs_a",sokf_type_ladder,"dungeon_stairs_a","bo_dungeon_stairs_a", []),
("dungeon_stairs_b",sokf_type_ladder,"dungeon_stairs_b","bo_dungeon_stairs_a", []),
("dungeon_torture_room_a",0,"dungeon_torture_room_a","bo_dungeon_torture_room_a", []),
("dungeon_entry_a",0,"dungeon_entry_a","bo_dungeon_entry_a", []),
("dungeon_entry_b",0,"dungeon_entry_b","bo_dungeon_entry_b", []),
("dungeon_entry_c",0,"dungeon_entry_c","bo_dungeon_entry_c", []),
("dungeon_cell_a",0,"dungeon_cell_a","bo_dungeon_cell_a", []),
("dungeon_cell_b",0,"dungeon_cell_b","bo_dungeon_cell_b", []),
("dungeon_cell_c",0,"dungeon_cell_c","bo_dungeon_cell_c", []),
("dungeon_corridor_a",0,"dungeon_corridor_a","bo_dungeon_corridor_a", []),
("dungeon_corridor_b",0,"dungeon_corridor_b","bo_dungeon_corridor_b", []),
("dungeon_corridor_c",0,"dungeon_corridor_c","bo_dungeon_corridor_b", []),
("dungeon_corridor_d",0,"dungeon_corridor_d","bo_dungeon_corridor_b", []),
("dungeon_direction_a",0,"dungeon_direction_a","bo_dungeon_direction_a", []),
("dungeon_direction_b",0,"dungeon_direction_b","bo_dungeon_direction_a", []),
("dungeon_room_a",0,"dungeon_room_a","bo_dungeon_room_a", []),
("dungeon_tower_stairs_a",sokf_type_ladder,"dungeon_tower_stairs_a","bo_dungeon_tower_stairs_a", []),
("dungeon_tower_cell_a",0,"dungeon_tower_cell_a","bo_dungeon_tower_cell_a", []),
("tunnel_a",0,"tunnel_a","bo_tunnel_a", []),
("tunnel_salt",0,"tunnel_salt","bo_tunnel_salt", []),
("salt_a",0,"salt_a","bo_salt_a", []),
("door_destructible",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(2),"tutorial_door_a","bo_tutorial_door_a", [
check_item_use_trigger,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 2000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(play_sound, "snd_dummy_hit"),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
]),
]),
("tutorial_door_a",sokf_moveable,"tutorial_door_a","bo_tutorial_door_a", []),
("tutorial_door_b",sokf_moveable,"tutorial_door_b","bo_tutorial_door_b", []),
("tutorial_flag_yellow",sokf_moveable|sokf_face_player,"tutorial_flag_yellow","0", []),
("tutorial_flag_red",sokf_moveable|sokf_face_player,"tutorial_flag_red","0", []),
("tutorial_flag_blue",sokf_moveable|sokf_face_player,"tutorial_flag_blue","0", []),
("interior_prison_a",0,"interior_prison_a","bo_interior_prison_a", []),
("interior_prison_b",0,"interior_prison_b","bo_interior_prison_b", []),
("interior_prison_cell_a",0,"interior_prison_cell_a","bo_interior_prison_cell_a", []),
("interior_prison_d",0,"interior_prison_d","bo_interior_prison_d", []),
("arena_archery_target_a",0,"arena_archery_target_a","bo_arena_archery_target_a", []),
("archery_butt_a",0,"archery_butt","bo_archery_butt", [
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(prop_instance_get_position, pos2, ":instance_no"),
(get_player_agent_no, ":player_agent"),
(agent_get_position, pos3, ":player_agent"),
(get_distance_between_positions, ":player_distance", pos3, pos2),
(position_transform_position_to_local, pos4, pos2, pos1),
(position_set_y, pos4, 0),
(position_set_x, pos2, 0),
(position_set_y, pos2, 0),
(position_set_z, pos2, 0),
(get_distance_between_positions, ":target_distance", pos4, pos2),
(assign, ":point_earned", 43), #Calculating a point between 0-12
(val_sub, ":point_earned", ":target_distance"),
(val_mul, ":point_earned", 1299),
(val_div, ":point_earned", 4300),
(try_begin),
(lt, ":point_earned", 0),
(assign, ":point_earned", 0),
(try_end),
(val_div, ":player_distance", 91), #Converting to yards
(assign, reg60, ":point_earned"),
(assign, reg61, ":player_distance"),
(display_message, "str_archery_target_hit"),
]),
]),
("archery_target_with_hit_a",0,"arena_archery_target_a","bo_arena_archery_target_a", [
(ti_on_scene_prop_hit,
[
(set_fixed_point_multiplier, 100),
(store_trigger_param_1, ":instance_no"),
(position_get_x, ":attacker_agent_id", pos2),
(val_div, ":attacker_agent_id", 100),
(get_player_agent_no, ":player_agent"),
(try_begin),
(eq, ":player_agent", ":attacker_agent_id"),
(prop_instance_get_position, pos2, ":instance_no"),
(agent_get_position, pos3, ":player_agent"),
(get_distance_between_positions, ":player_distance", pos3, pos2),
(position_transform_position_to_local, pos4, pos2, pos1),
(position_set_y, pos4, 0),
(position_set_x, pos2, 0),
(position_set_y, pos2, 0),
(position_set_z, pos2, 0),
(get_distance_between_positions, ":target_distance", pos4, pos2),
(assign, ":point_earned", 43), #Calculating a point between 0-12
(val_sub, ":point_earned", ":target_distance"),
(val_mul, ":point_earned", 1299),
(val_div, ":point_earned", 4300),
(try_begin),
(lt, ":point_earned", 0),
(assign, ":point_earned", 0),
(try_end),
(assign, "$g_last_archery_point_earned", ":point_earned"),
(val_div, ":player_distance", 91), #Converting to yards
(assign, reg60, ":point_earned"),
(assign, reg61, ":player_distance"),
(display_message, "str_archery_target_hit"),
(eq, "$g_tutorial_training_ground_horseman_trainer_state", 6),
(eq, "$g_tutorial_training_ground_horseman_trainer_completed_chapters", 2),
(prop_instance_get_variation_id_2, ":var_id_2", ":instance_no"),
(val_sub, ":var_id_2", 1),
(eq, "$g_tutorial_training_ground_current_score", ":var_id_2"),
(val_add, "$g_tutorial_training_ground_current_score", 1),
(try_end),
]),
]),
("dummy_a",sokf_destructible|sokf_moveable,"arena_archery_target_b","bo_arena_archery_target_b", [
(ti_on_scene_prop_destroy,
[
(store_trigger_param_1, ":instance_no"),
(prop_instance_get_starting_position, pos1, ":instance_no"),
(get_player_agent_no, ":player_agent"),
(agent_get_position, 2, ":player_agent"),
(assign, ":rotate_side", 80),
(try_begin),
(position_is_behind_position, 2, 1),
(val_mul, ":rotate_side", -1),
(try_end),
(position_rotate_x, 1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", 1, 70), #animate to position 1 in 0.7 second
(val_add, "$tutorial_num_total_dummies_destroyed", 1),
(play_sound, "snd_dummy_destroyed"),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(assign, reg60, ":damage"),
(val_div, ":damage", 8),
(prop_instance_get_position, pos2, ":instance_no"),
(get_player_agent_no, ":player_agent"),
(agent_get_position, pos3, ":player_agent"),
(try_begin),
(position_is_behind_position, pos3, pos2),
(val_mul, ":damage", -1),
(try_end),
(position_rotate_x, 2, ":damage"),
(display_message, "str_delivered_damage"),
(prop_instance_animate_to_position, ":instance_no", 2, 30), #animate to position 1 in 0.3 second
(play_sound, "snd_dummy_hit"),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
]),
]),
("band_a",0,"band_a","0", []),
("arena_sign",0,"arena_arms","0", []),
("castle_h_battlement_a",0,"castle_h_battlement_a","bo_castle_h_battlement_a", []),
("castle_h_battlement_b",0,"castle_h_battlement_b","bo_castle_h_battlement_b", []),
("castle_h_battlement_c",0,"castle_h_battlement_c","bo_castle_h_battlement_c", []),
("castle_h_battlement_a2",0,"castle_h_battlement_a2","bo_castle_h_battlement_a2", []),
("castle_h_battlement_b2",0,"castle_h_battlement_b2","bo_castle_h_battlement_b2", []),
("castle_h_corner_a",0,"castle_h_corner_a","bo_castle_h_corner_a", []),
("castle_h_corner_c",0,"castle_h_corner_c","bo_castle_h_corner_c", []),
("castle_h_stairs_a",sokf_type_ladder,"castle_h_stairs_a","bo_castle_h_stairs_a", []),
("castle_h_stairs_b",0,"castle_h_stairs_b","bo_castle_h_stairs_b", []),
("castle_h_gatehouse_a",0,"castle_h_gatehouse_a","bo_castle_h_gatehouse_a", []),
("castle_h_keep_a",0,"castle_h_keep_a","bo_castle_h_keep_a", []),
("castle_h_keep_b",0,"castle_h_keep_b","bo_castle_h_keep_b", []),
("castle_h_house_a",0,"castle_h_house_a","bo_castle_h_house_a", []),
("castle_h_house_b",0,"castle_h_house_b","bo_castle_h_house_b", []),
("castle_h_house_c",0,"castle_h_house_c","bo_castle_h_house_b", []),
("castle_h_battlement_barrier",0,"castle_h_battlement_barrier","bo_castle_h_battlement_barrier", []),
("full_keep_b",0,"full_keep_b","bo_full_keep_b", []),
("castle_f_keep_a",0,"castle_f_keep_a","bo_castle_f_keep_a", []),
("castle_f_battlement_a",0,"castle_f_battlement_a","bo_castle_f_battlement_a", []),
("castle_f_battlement_a_destroyed",0,"castle_f_battlement_a_destroyed","bo_castle_f_battlement_a_destroyed", []),
("castle_f_battlement_b",0,"castle_f_battlement_b","bo_castle_f_battlement_b", []),
("castle_f_battlement_c",0,"castle_f_battlement_c","bo_castle_f_battlement_c", []),
("castle_f_battlement_d",0,"castle_f_battlement_d","bo_castle_f_battlement_d", []),
("castle_f_battlement_e",0,"castle_f_battlement_e","bo_castle_f_battlement_e", []),
("castle_f_sally_port_elevation",0,"castle_f_sally_port_elevation","bo_castle_f_sally_port_elevation", []),
("castle_f_battlement_corner_a",0,"castle_f_battlement_corner_a","bo_castle_f_battlement_corner_a", []),
("castle_f_battlement_corner_b",0,"castle_f_battlement_corner_b","bo_castle_f_battlement_corner_b", []),
("castle_f_battlement_corner_c",0,"castle_f_battlement_corner_c","bo_castle_f_battlement_corner_c", []),
("castle_f_door_a",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"castle_f_door_a","bo_castle_f_door_a", [
check_castle_door_use_trigger,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 1000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
#(assign, reg0, ":z_difference"),
#(display_message, "@{!}z dif : {reg0}"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
("castle_f_doors_top_a",0,"castle_f_doors_top_a","bo_castle_f_doors_top_a", []),
("castle_f_sally_door_a",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"castle_f_sally_door_a","bo_castle_f_sally_door_a", [
check_sally_door_use_trigger,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 1000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
("castle_f_stairs_a",sokf_type_ladder,"castle_f_stairs_a","bo_castle_f_stairs_a", []),
("castle_f_tower_a",0,"castle_f_tower_a","bo_castle_f_tower_a", []),
("castle_f_wall_stairs_a",sokf_type_ladder,"castle_f_wall_stairs_a","bo_castle_f_wall_stairs_a", []),
("castle_f_wall_stairs_b",sokf_type_ladder,"castle_f_wall_stairs_b","bo_castle_f_wall_stairs_b", []),
("castle_f_wall_way_a",0,"castle_f_wall_way_a","bo_castle_f_wall_way_a", []),
("castle_f_wall_way_b",0,"castle_f_wall_way_b","bo_castle_f_wall_way_b", []),
("castle_f_gatehouse_a",0,"castle_f_gatehouse_a","bo_castle_f_gatehouse_a", []),
("castle_g_battlement_a",0,"castle_g_battlement_a","bo_castle_g_battlement_a", []),
("castle_g_battlement_a1",0,"castle_g_battlement_a1","bo_castle_g_battlement_a1", []),
("castle_g_battlement_c",0,"castle_g_battlement_c","bo_castle_g_battlement_c", []),
("castle_g_corner_a",0,"castle_g_corner_a","bo_castle_g_corner_a", []),
("castle_g_corner_c",0,"castle_g_corner_c","bo_castle_g_corner_c", []),
("castle_g_tower_a",sokf_type_ladder,"castle_g_tower_a","bo_castle_g_tower_a", []),
("castle_g_gate_house",0,"castle_g_gate_house","bo_castle_g_gate_house", []),
("castle_g_gate_house_door_a",0,"castle_g_gate_house_door_a","bo_castle_g_gate_house_door_a", []),
("castle_g_gate_house_door_b",0,"castle_g_gate_house_door_b","bo_castle_g_gate_house_door_b", []),
("castle_g_square_keep_a",0,"castle_g_square_keep_a","bo_castle_g_square_keep_a", []),
("castle_i_battlement_a",0,"castle_i_battlement_a","bo_castle_i_battlement_a", []),
("castle_i_battlement_a1",0,"castle_i_battlement_a1","bo_castle_i_battlement_a1", []),
("castle_i_battlement_c",0,"castle_i_battlement_c","bo_castle_i_battlement_c", []),
("castle_i_corner_a",0,"castle_i_corner_a","bo_castle_i_corner_a", []),
("castle_i_corner_c",0,"castle_i_corner_c","bo_castle_i_corner_c", []),
("castle_i_tower_a",sokf_type_ladder,"castle_i_tower_a","bo_castle_i_tower_a", []),
("castle_i_gate_house",0,"castle_i_gate_house","bo_castle_i_gate_house", []),
("castle_i_gate_house_door_a",0,"castle_i_gate_house_door_a","bo_castle_i_gate_house_door_a", []),
("castle_i_gate_house_door_b",0,"castle_i_gate_house_door_b","bo_castle_i_gate_house_door_b", []),
("castle_i_square_keep_a",0,"castle_i_square_keep_a","bo_castle_i_square_keep_a", []),
("mosque_a",0,"mosque_a","bo_mosque_a", []),
("stone_minaret_a",0,"stone_minaret_a","bo_stone_minaret_a", []),
("stone_house_a",0,"stone_house_a","bo_stone_house_a", []),
("stone_house_b",0,"stone_house_b","bo_stone_house_b", []),
("stone_house_c",0,"stone_house_c","bo_stone_house_c", []),
("stone_house_d",0,"stone_house_d","bo_stone_house_d", []),
("stone_house_e",0,"stone_house_e","bo_stone_house_e", []),
("stone_house_f",0,"stone_house_f","bo_stone_house_f", []),
("banner_pole", sokf_moveable, "banner_pole", "bo_banner_pole", []),
("custom_banner_01",0,"custom_banner_01","0",
[
(ti_on_init_scene_prop,
[
(party_get_slot, ":leader_troop", "$g_encountered_party", slot_town_lord),
(try_begin),
(ge, ":leader_troop", 0),
(cur_scene_prop_set_tableau_material, "tableau_custom_banner_default", ":leader_troop"),
(try_end),
]),
]),
("custom_banner_02",0,"custom_banner_02","0",
[
(ti_on_init_scene_prop,
[
(party_get_slot, ":leader_troop", "$g_encountered_party", slot_town_lord),
(try_begin),
(ge, ":leader_troop", 0),
(cur_scene_prop_set_tableau_material, "tableau_custom_banner_default", ":leader_troop"),
(try_end),
]),
]),
("banner_a",0,"banner_a01","0", []),
("banner_b",0,"banner_a02","0", []),
("banner_c",0,"banner_a03","0", []),
("banner_d",0,"banner_a04","0", []),
("banner_e",0,"banner_a05","0", []),
("banner_f",0,"banner_a06","0", []),
("banner_g",0,"banner_a07","0", []),
("banner_h",0,"banner_a08","0", []),
("banner_i",0,"banner_a09","0", []),
("banner_j",0,"banner_a10","0", []),
("banner_k",0,"banner_a11","0", []),
("banner_l",0,"banner_a12","0", []),
("banner_m",0,"banner_a13","0", []),
("banner_n",0,"banner_a14","0", []),
("banner_o",0,"banner_f21","0", []),
("banner_p",0,"banner_a16","0", []),
("banner_q",0,"banner_a17","0", []),
("banner_r",0,"banner_a18","0", []),
("banner_s",0,"banner_a19","0", []),
("banner_t",0,"banner_a20","0", []),
("banner_u",0,"banner_a21","0", []),
("banner_ba",0,"banner_b01","0", []),
("banner_bb",0,"banner_b02","0", []),
("banner_bc",0,"banner_b03","0", []),
("banner_bd",0,"banner_b04","0", []),
("banner_be",0,"banner_b05","0", []),
("banner_bf",0,"banner_b06","0", []),
("banner_bg",0,"banner_b07","0", []),
("banner_bh",0,"banner_b08","0", []),
("banner_bi",0,"banner_b09","0", []),
("banner_bj",0,"banner_b10","0", []),
("banner_bk",0,"banner_b11","0", []),
("banner_bl",0,"banner_b12","0", []),
("banner_bm",0,"banner_b13","0", []),
("banner_bn",0,"banner_b14","0", []),
("banner_bo",0,"banner_b15","0", []),
("banner_bp",0,"banner_b16","0", []),
("banner_bq",0,"banner_b17","0", []),
("banner_br",0,"banner_b18","0", []),
("banner_bs",0,"banner_b19","0", []),
("banner_bt",0,"banner_b20","0", []),
("banner_bu",0,"banner_b21","0", []),
("banner_ca",0,"banner_c01","0", []),
("banner_cb",0,"banner_c02","0", []),
("banner_cc",0,"banner_c03","0", []),
("banner_cd",0,"banner_c04","0", []),
("banner_ce",0,"banner_c05","0", []),
("banner_cf",0,"banner_c06","0", []),
("banner_cg",0,"banner_c07","0", []),
("banner_ch",0,"banner_c08","0", []),
("banner_ci",0,"banner_c09","0", []),
("banner_cj",0,"banner_c10","0", []),
("banner_ck",0,"banner_c11","0", []),
("banner_cl",0,"banner_c12","0", []),
("banner_cm",0,"banner_c13","0", []),
("banner_cn",0,"banner_c14","0", []),
("banner_co",0,"banner_c15","0", []),
("banner_cp",0,"banner_c16","0", []),
("banner_cq",0,"banner_c17","0", []),
("banner_cr",0,"banner_c18","0", []),
("banner_cs",0,"banner_c19","0", []),
("banner_ct",0,"banner_c20","0", []),
("banner_cu",0,"banner_c21","0", []),
("banner_da",0,"banner_d01","0", []),
("banner_db",0,"banner_d02","0", []),
("banner_dc",0,"banner_d03","0", []),
("banner_dd",0,"banner_d04","0", []),
("banner_de",0,"banner_d05","0", []),
("banner_df",0,"banner_d06","0", []),
("banner_dg",0,"banner_d07","0", []),
("banner_dh",0,"banner_d08","0", []),
("banner_di",0,"banner_d09","0", []),
("banner_dj",0,"banner_d10","0", []),
("banner_dk",0,"banner_d11","0", []),
("banner_dl",0,"banner_d12","0", []),
("banner_dm",0,"banner_d13","0", []),
("banner_dn",0,"banner_d14","0", []),
("banner_do",0,"banner_d15","0", []),
("banner_dp",0,"banner_d16","0", []),
("banner_dq",0,"banner_d17","0", []),
("banner_dr",0,"banner_d18","0", []),
("banner_ds",0,"banner_d19","0", []),
("banner_dt",0,"banner_d20","0", []),
("banner_du",0,"banner_d21","0", []),
("banner_ea",0,"banner_e01","0", []),
("banner_eb",0,"banner_e02","0", []),
("banner_ec",0,"banner_e03","0", []),
("banner_ed",0,"banner_e04","0", []),
("banner_ee",0,"banner_e05","0", []),
("banner_ef",0,"banner_e06","0", []),
("banner_eg",0,"banner_e07","0", []),
("banner_eh",0,"banner_e08","0", []),
("banner_ei",0,"banner_e09","0", []),
("banner_ej",0,"banner_e10","0", []),
("banner_ek",0,"banner_e11","0", []),
("banner_el",0,"banner_e12","0", []),
("banner_em",0,"banner_e13","0", []),
("banner_en",0,"banner_e14","0", []),
("banner_eo",0,"banner_e15","0", []),
("banner_ep",0,"banner_e16","0", []),
("banner_eq",0,"banner_e17","0", []),
("banner_er",0,"banner_e18","0", []),
("banner_es",0,"banner_e19","0", []),
("banner_et",0,"banner_e20","0", []),
("banner_eu",0,"banner_e21","0", []),
("banner_f01", 0, "banner_f01", "0", []),
("banner_f02", 0, "banner_f02", "0", []),
("banner_f03", 0, "banner_f03", "0", []),
("banner_f04", 0, "banner_f04", "0", []),
("banner_f05", 0, "banner_f05", "0", []),
("banner_f06", 0, "banner_f06", "0", []),
("banner_f07", 0, "banner_f07", "0", []),
("banner_f08", 0, "banner_f08", "0", []),
("banner_f09", 0, "banner_f09", "0", []),
("banner_f10", 0, "banner_f10", "0", []),
("banner_f11", 0, "banner_f11", "0", []),
("banner_f12", 0, "banner_f12", "0", []),
("banner_f13", 0, "banner_f13", "0", []),
("banner_f14", 0, "banner_f14", "0", []),
("banner_f15", 0, "banner_f15", "0", []),
("banner_f16", 0, "banner_f16", "0", []),
("banner_f17", 0, "banner_f17", "0", []),
("banner_f18", 0, "banner_f18", "0", []),
("banner_f19", 0, "banner_f19", "0", []),
("banner_f20", 0, "banner_f20", "0", []),
("banner_g01", 0, "banner_f01", "0", []),
("banner_g02", 0, "banner_f02", "0", []),
("banner_g03", 0, "banner_f03", "0", []),
("banner_g04", 0, "banner_f04", "0", []),
("banner_g05", 0, "banner_f05", "0", []),
("banner_g06", 0, "banner_f06", "0", []),
("banner_g07", 0, "banner_f07", "0", []),
("banner_g08", 0, "banner_f08", "0", []),
("banner_g09", 0, "banner_f09", "0", []),
("banner_g10", 0, "banner_f10", "0", []),
("banner_kingdom_a", 0, "banner_kingdom_a", "0", []),
("banner_kingdom_b", 0, "banner_kingdom_b", "0", []),
("banner_kingdom_c", 0, "banner_kingdom_c", "0", []),
("banner_kingdom_d", 0, "banner_kingdom_d", "0", []),
("banner_kingdom_e", 0, "banner_kingdom_e", "0", []),
("banner_kingdom_f", 0, "banner_kingdom_f", "0", []),
("banner_f21", 0, "banner_a15", "0", []),
("tavern_chair_a",0,"tavern_chair_a","bo_tavern_chair_a", []),
("tavern_chair_b",0,"tavern_chair_b","bo_tavern_chair_b", []),
("tavern_table_a",0,"tavern_table_a","bo_tavern_table_a", []),
("tavern_table_b",0,"tavern_table_b","bo_tavern_table_b", []),
("fireplace_a",0,"fireplace_a","bo_fireplace_a", []),
("barrel",0,"barrel","bobarrel", []),
("bench_tavern",0,"bench_tavern","bobench_tavern", []),
("bench_tavern_b",0,"bench_tavern_b","bo_bench_tavern_b", []),
("bowl_wood",0,"bowl_wood","0", []),
("chandelier_table",0,"chandelier_table","0", []),
("chandelier_tavern",0,"chandelier_tavern","0", []),
("chest_gothic",0,"chest_gothic","bochest_gothic", []),
("chest_b",0,"chest_b","bo_chest_b", []),
("chest_c",0,"chest_c","bo_chest_c", []),
("counter_tavern",0,"counter_tavern","bocounter_tavern", []),
("cup",0,"cup","0", []),
("dish_metal",0,"dish_metal","0", []),
("gothic_chair",0,"gothic_chair","bogothic_chair", []),
("gothic_stool",0,"gothic_stool","bogothic_stool", []),
("grate",0,"grate","bograte", []),
("jug",0,"jug","0", []),
("potlamp",0,"potlamp","0", []),
("weapon_rack",0,"weapon_rack","boweapon_rack", []),
("weapon_rack_big",0,"weapon_rack_big","boweapon_rack_big", []),
("tavern_barrel",0,"barrel","bobarrel", []),
("tavern_barrel_b",0,"tavern_barrel_b","bo_tavern_barrel_b", []),
("merchant_sign",0,"merchant_sign","bo_tavern_sign", []),
("tavern_sign",0,"tavern_sign","bo_tavern_sign", []),
("sack",0,"sack","0", []),
("skull_a",0,"skull_a","0", []),
("skull_b",0,"skull_b","0", []),
("skull_c",0,"skull_c","0", []),
("skull_d",0,"skull_d","0", []),
("skeleton_cow",0,"skeleton_cow","0", []),
("cupboard_a",0,"cupboard_a","bo_cupboard_a", []),
("box_a",0,"box_a","bo_box_a", []),
("bucket_a",0,"bucket_a","bo_bucket_a", []),
("straw_a",0,"straw_a","0", []),
("straw_b",0,"straw_b","0", []),
("straw_c",0,"straw_c","0", []),
("cloth_a",0,"cloth_a","0", []),
("cloth_b",0,"cloth_b","0", []),
("mat_a",0,"mat_a","0", []),
("mat_b",0,"mat_b","0", []),
("mat_c",0,"mat_c","0", []),
("mat_d",0,"mat_d","0", []),
("wood_a",0,"wood_a","bo_wood_a", []),
("wood_b",0,"wood_b","bo_wood_b", []),
("wood_heap",0,"wood_heap_a","bo_wood_heap_a", []),
("wood_heap_b",0,"wood_heap_b","bo_wood_heap_b", []),
("water_well_a",0,"water_well_a","bo_water_well_a", []),
("net_a",0,"net_a","bo_net_a", []),
("net_b",0,"net_b","0", []),
("meat_hook",0,"meat_hook","0", []),
("cooking_pole",0,"cooking_pole","0", []),
("bowl_a",0,"bowl_a","0", []),
("bucket_b",0,"bucket_b","0", []),
("washtub_a",0,"washtub_a","bo_washtub_a", []),
("washtub_b",0,"washtub_b","bo_washtub_b", []),
("table_trunk_a",0,"table_trunk_a","bo_table_trunk_a", []),
("chair_trunk_a",0,"chair_trunk_a","bo_chair_trunk_a", []),
("chair_trunk_b",0,"chair_trunk_b","bo_chair_trunk_b", []),
("chair_trunk_c",0,"chair_trunk_c","bo_chair_trunk_c", []),
("table_trestle_long",0,"table_trestle_long","bo_table_trestle_long", []),
("table_trestle_small",0,"table_trestle_small","bo_table_trestle_small", []),
("chair_trestle",0,"chair_trestle","bo_chair_trestle", []),
("wheel",0,"wheel","bo_wheel", []),
("ladder",sokf_type_ladder,"ladder","boladder", []),
("cart",0,"cart","bo_cart", []),
("village_stand",0,"village_stand","bovillage_stand", []),
("wooden_stand",0,"wooden_stand","bowooden_stand", []),
("table_small",0,"table_small","bo_table_small", []),
("table_small_b",0,"table_small_b","bo_table_small_b", []),
("small_timber_frame_house_a",0,"small_timber_frame_house_a","bo_small_timber_frame_house_a", []),
("timber_frame_house_b",0,"tf_house_b","bo_tf_house_b", []),
("timber_frame_house_c",0,"tf_house_c","bo_tf_house_c", []),
("timber_frame_extension_a",0,"timber_frame_extension_a","bo_timber_frame_extension_a", []),
("timber_frame_extension_b",0,"timber_frame_extension_b","bo_timber_frame_extension_b", []),
("stone_stairs_a",sokf_type_ladder,"stone_stairs_a","bo_stone_stairs_a", []),
("stone_stairs_b",sokf_type_ladder,"stone_stairs_b","bo_stone_stairs_b", []),
("railing_a",0,"railing_a","bo_railing_a", []),
("side_building_a",0,"side_building_a","bo_side_building_a", []),
("battlement_a",0,"battlement_a","bo_battlement_a", []),
("battlement_a_destroyed",0,"battlement_a_destroyed","bo_battlement_a_destroyed", []),
("round_tower_a",0,"round_tower_a","bo_round_tower_a", []),
("small_round_tower_a",0,"small_round_tower_a","bo_small_round_tower_a", []),
("small_round_tower_roof_a",0,"small_round_tower_roof_a","bo_small_round_tower_roof_a", []),
("square_keep_a",0,"square_keep_a","bo_square_keep_a", []),
("square_tower_roof_a",0,"square_tower_roof_a","0", []),
("gate_house_a",0,"gate_house_a","bo_gate_house_a", []),
("gate_house_b",0,"gate_house_b","bo_gate_house_b", []),
("small_wall_a",0,"small_wall_a","bo_small_wall_a", []),
("small_wall_b",0,"small_wall_b","bo_small_wall_b", []),
("small_wall_c",0,"small_wall_c","bo_small_wall_c", []),
("small_wall_c_destroy",0,"small_wall_c_destroy","bo_small_wall_c_destroy", []),
("small_wall_d",0,"small_wall_d","bo_small_wall_d", []),
("small_wall_e",0,"small_wall_e","bo_small_wall_d", []),
("small_wall_f",0,"small_wall_f","bo_small_wall_f", []),
("small_wall_f2",0,"small_wall_f2","bo_small_wall_f2", []),
("town_house_a",0,"town_house_a","bo_town_house_a", []),
("town_house_b",0,"town_house_b","bo_town_house_b", []),
("town_house_c",0,"town_house_c","bo_town_house_c", []),
("town_house_d",0,"town_house_d","bo_town_house_d", []),
("town_house_e",0,"town_house_e","bo_town_house_e", []),
("town_house_f",0,"town_house_f","bo_town_house_f", []),
("town_house_g",0,"town_house_g","bo_town_house_g", []),
("town_house_h",0,"town_house_h","bo_town_house_h", []),
("town_house_i",0,"town_house_i","bo_town_house_i", []),
("town_house_j",0,"town_house_j","bo_town_house_j", []),
("town_house_l",0,"town_house_l","bo_town_house_l", []),
("town_house_m",0,"town_house_m","bo_town_house_m", []),
("town_house_n",0,"town_house_n","bo_town_house_n", []),
("town_house_o",0,"town_house_o","bo_town_house_o", []),
("town_house_p",0,"town_house_p","bo_town_house_p", []),
("town_house_q",0,"town_house_q","bo_town_house_q", []),
("passage_house_a",0,"passage_house_a","bo_passage_house_a", []),
("passage_house_b",0,"passage_house_b","bo_passage_house_b", []),
("passage_house_c",0,"passage_house_c","bo_passage_house_c", []),
("passage_house_d",0,"passage_house_d","bo_passage_house_d", []),
("passage_house_c_door",0,"passage_house_c_door","bo_passage_house_c_door", []),
("house_extension_a",0,"house_extension_a","bo_house_extension_a", []),
("house_extension_b",0,"house_extension_b","bo_house_extension_b", []),
("house_extension_c",0,"house_extension_c","bo_house_extension_a", []),#reuse
("house_extension_d",0,"house_extension_d","bo_house_extension_d", []),
("house_extension_e",0,"house_extension_e","bo_house_extension_e", []),
("house_extension_f",0,"house_extension_f","bo_house_extension_f", []),
("house_extension_f2",0,"house_extension_f2","bo_house_extension_f", []),
("house_extension_g",0,"house_extension_g","bo_house_extension_g", []),
("house_extension_g2",0,"house_extension_g2","bo_house_extension_g", []),
("house_extension_h",0,"house_extension_h","bo_house_extension_h", []),
("house_extension_i",0,"house_extension_i","bo_house_extension_i", []),
("house_roof_door",0,"house_roof_door","bo_house_roof_door", []),
("door_extension_a",0,"door_extension_a","bo_door_extension_a", []),
("stairs_arch_a",sokf_type_ladder,"stairs_arch_a","bo_stairs_arch_a", []),
("town_house_r",0,"town_house_r","bo_town_house_r", []),
("town_house_s",0,"town_house_s","bo_town_house_s", []),
("town_house_t",0,"town_house_t","bo_town_house_t", []),
("town_house_u",0,"town_house_u","bo_town_house_u", []),
("town_house_v",0,"town_house_v","bo_town_house_v", []),
("town_house_w",0,"town_house_w","bo_town_house_w", []),
("town_house_y",0,"town_house_y","bo_town_house_y", []),
("town_house_z",0,"town_house_z","bo_town_house_z", []),
("town_house_za",0,"town_house_za","bo_town_house_za", []),
("windmill",0,"windmill","bo_windmill", []),
("windmill_fan_turning",sokf_moveable,"windmill_fan_turning","bo_windmill_fan_turning", []),
("windmill_fan",0,"windmill_fan","bo_windmill_fan", []),
("fake_house_a",0,"fake_house_a","bo_fake_house_a", []),
("fake_house_b",0,"fake_house_b","bo_fake_house_b", []),
("fake_house_c",0,"fake_house_c","bo_fake_house_c", []),
("fake_house_d",0,"fake_house_d","bo_fake_house_d", []),
("fake_house_e",0,"fake_house_e","bo_fake_house_e", []),
("fake_house_f",0,"fake_house_f","bo_fake_house_f", []),
("fake_house_snowy_a",0,"fake_house_snowy_a","bo_fake_house_a", []),
("fake_house_snowy_b",0,"fake_house_snowy_b","bo_fake_house_b", []),
("fake_house_snowy_c",0,"fake_house_snowy_c","bo_fake_house_c", []),
("fake_house_snowy_d",0,"fake_house_snowy_d","bo_fake_house_d", []),
("fake_house_far_a",0,"fake_house_far_a","0", []),
("fake_house_far_b",0,"fake_house_far_b","0", []),
("fake_house_far_c",0,"fake_house_far_c","0", []),
("fake_house_far_d",0,"fake_house_far_d","0", []),
("fake_house_far_e",0,"fake_house_far_e","0", []),
("fake_house_far_f",0,"fake_house_far_f","0", []),
("fake_house_far_snowycrude_a",0,"fake_house_far_snowy_a","0", []),
("fake_house_far_snowy_b",0,"fake_house_far_snowy_b","0", []),
("fake_house_far_snowy_c",0,"fake_house_far_snowy_c","0", []),
("fake_house_far_snowy_d",0,"fake_house_far_snowy_d","0", []),
("earth_wall_a",0,"earth_wall_a","bo_earth_wall_a", []),
("earth_wall_a2",0,"earth_wall_a2","bo_earth_wall_a2", []),
("earth_wall_b",0,"earth_wall_b","bo_earth_wall_b", []),
("earth_wall_b2",0,"earth_wall_b2","bo_earth_wall_b2", []),
("earth_stairs_a",sokf_type_ladder,"earth_stairs_a","bo_earth_stairs_a", []),
("earth_stairs_b",sokf_type_ladder,"earth_stairs_b","bo_earth_stairs_b", []),
("earth_tower_small_a",0,"earth_tower_small_a","bo_earth_tower_small_a", []),
("earth_gate_house_a",0,"earth_gate_house_a","bo_earth_gate_house_a", []),
("earth_gate_a",0,"earth_gate_a","bo_earth_gate_a", []),
("earth_square_keep_a",0,"earth_square_keep_a","bo_earth_square_keep_a", []),
("earth_house_a",0,"earth_house_a","bo_earth_house_a", []),
("earth_house_b",0,"earth_house_b","bo_earth_house_b", []),
("earth_house_c",0,"earth_house_c","bo_earth_house_c", []),
("earth_house_d",0,"earth_house_d","bo_earth_house_d", []),
("village_steppe_a",0,"village_steppe_a","bo_village_steppe_a", []),
("village_steppe_b",0,"village_steppe_b","bo_village_steppe_b", []),
("village_steppe_c",0,"village_steppe_c","bo_village_steppe_c", []),
("village_steppe_d",0,"village_steppe_d","bo_village_steppe_d", []),
("village_steppe_e",0,"village_steppe_e","bo_village_steppe_e", []),
("village_steppe_f",0,"village_steppe_f","bo_village_steppe_f", []),
("town_house_aa",0,"town_house_aa","bo_town_house_aa", []),
("snowy_house_a",0,"snowy_house_a","bo_snowy_house_a", []),
("snowy_house_b",0,"snowy_house_b","bo_snowy_house_b", []),
("snowy_house_c",0,"snowy_house_c","bo_snowy_house_c", []),
("snowy_house_d",0,"snowy_house_d","bo_snowy_house_d", []),
("snowy_house_e",0,"snowy_house_e","bo_snowy_house_e", []),
("snowy_house_f",0,"snowy_house_f","bo_snowy_house_f", []),
("snowy_house_g",0,"snowy_house_g","bo_snowy_house_g", []),
("snowy_house_h",0,"snowy_house_h","bo_snowy_house_h", []),
("snowy_house_i",0,"snowy_house_i","bo_snowy_house_i", []),
("snowy_wall_a",0,"snowy_wall_a","bo_snowy_wall_a", []),
("snowy_stand",0,"snowy_stand","bo_snowy_stand", []),
("snowy_heap_a",0,"snowy_heap_a","bo_snowy_heap_a", []),
("snowy_trunks_a",0,"snowy_trunks_a","bo_snowy_trunks_a", []),
("snowy_castle_tower_a",0,"snowy_castle_tower_a","bo_snowy_castle_tower_a", []),
("snowy_castle_battlement_a",0,"snowy_castle_battlement_a","bo_snowy_castle_battlement_a", []),
("snowy_castle_battlement_a_destroyed",0,"snowy_castle_battlement_a_destroyed","bo_snowy_castle_battlement_a_destroyed", []),
("snowy_castle_battlement_b",0,"snowy_castle_battlement_b","bo_snowy_castle_battlement_b", []),
("snowy_castle_battlement_corner_a",0,"snowy_castle_battlement_corner_a","bo_snowy_castle_battlement_corner_a", []),
("snowy_castle_battlement_corner_b",0,"snowy_castle_battlement_corner_b","bo_snowy_castle_battlement_corner_b", []),
("snowy_castle_battlement_corner_c",0,"snowy_castle_battlement_corner_c","bo_snowy_castle_battlement_corner_c", []),
("snowy_castle_battlement_stairs_a",0,"snowy_castle_battlement_stairs_a","bo_snowy_castle_battlement_stairs_a", []),
("snowy_castle_battlement_stairs_b",0,"snowy_castle_battlement_stairs_b","bo_snowy_castle_battlement_stairs_b", []),
("snowy_castle_gate_house_a",0,"snowy_castle_gate_house_a","bo_snowy_castle_gate_house_a", []),
("snowy_castle_round_tower_a",0,"snowy_castle_round_tower_a","bo_snowy_castle_round_tower_a", []),
("snowy_castle_square_keep_a",0,"snowy_castle_square_keep_a","bo_snowy_castle_square_keep_a", []),
("snowy_castle_stairs_a",sokf_type_ladder,"snowy_castle_stairs_a","bo_snowy_castle_stairs_a", []),
("square_keep_b",0,"square_keep_b","bo_square_keep_b", []),
("square_keep_c",0,"square_keep_c","bo_square_keep_c", []),
("square_keep_d",0,"square_keep_d","bo_square_keep_d", []),
("square_keep_e",0,"square_keep_e","bo_square_keep_e", []),
("square_keep_f",0,"square_keep_f","bo_square_keep_f", []),
("square_extension_a",0,"square_extension_a","bo_square_extension_a", []),
("square_stairs_a",0,"square_stairs_a","bo_square_stairs_a", []),
("castle_courtyard_house_a",0,"castle_courtyard_house_a","bo_castle_courtyard_house_a", []),
("castle_courtyard_house_b",0,"castle_courtyard_house_b","bo_castle_courtyard_house_b", []),
("castle_courtyard_house_c",0,"castle_courtyard_house_c","bo_castle_courtyard_house_c", []),
("castle_courtyard_a",0,"castle_courtyard_a","bo_castle_courtyard_a", []),
("gatehouse_b",0,"gatehouse_b","bo_gatehouse_b", []),
("castle_gaillard",0,"castle_gaillard","bo_castle_gaillard", []),
("castle_e_battlement_a",0,"castle_e_battlement_a","bo_castle_e_battlement_a", []),
("castle_e_battlement_c",0,"castle_e_battlement_c","bo_castle_e_battlement_c", []),
("castle_e_battlement_a_destroyed",0,"castle_e_battlement_a_destroyed","bo_castle_e_battlement_a_destroyed", []),
("castle_e_sally_door_a",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"castle_e_sally_door_a","bo_castle_e_sally_door_a", [
check_sally_door_use_trigger,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 3000),
]),
## (ti_on_scene_prop_destroy,
## [
## (play_sound, "snd_dummy_destroyed"),
##
## (try_begin),
## (multiplayer_is_server),
## (store_trigger_param_1, ":instance_no"),
## (store_trigger_param_2, ":attacker_agent_no"),
##
## (try_begin),
## (ge, ":attacker_agent_no", 0),
## (prop_instance_get_position, pos1, ":instance_no"),
## (agent_get_position, pos2, ":attacker_agent_no"),
## (assign, ":rotate_side", 80),
## (try_begin),
## (position_is_behind_position, pos2, pos1),
## (val_mul, ":rotate_side", -1),
## (try_end),
## (else_try),
## (assign, ":rotate_side", 80),
## (try_end),
##
## (position_rotate_x, pos1, ":rotate_side"),
## (prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
## (try_end),
## ]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
#(assign, reg0, ":z_difference"),
#(display_message, "@{!}z dif : {reg0}"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
("castle_e_corner",0,"castle_e_corner","bo_castle_e_corner", []),
("castle_e_corner_b",0,"castle_e_corner_b","bo_castle_e_corner_b", []),
("castle_e_corner_c",0,"castle_e_corner_c","bo_castle_e_corner_c", []),
("castle_e_stairs_a",0,"castle_e_stairs_a","bo_castle_e_stairs_a", []),
("castle_e_tower",0,"castle_e_tower","bo_castle_e_tower", []),
("castle_e_gate_house_a",0,"castle_e_gate_house_a","bo_castle_e_gate_house_a", []),
("castle_e_keep_a",0,"castle_e_keep_a","bo_castle_e_keep_a", []),
("stand_thatched",0,"stand_thatched","bo_stand_thatched", []),
("stand_cloth",0,"stand_cloth","bo_stand_cloth", []),
("castle_e_house_a",0,"castle_e_house_a","bo_castle_e_house_a", []),
("castle_e_house_b",0,"castle_e_house_b","bo_castle_e_house_b", []),
("arena_block_a",0,"arena_block_a","bo_arena_block_ab", []),
("arena_block_b",0,"arena_block_b","bo_arena_block_ab", []),
("arena_block_c",0,"arena_block_c","bo_arena_block_c", []),
("arena_block_d",0,"arena_block_d","bo_arena_block_def", []),
("arena_block_e",0,"arena_block_e","bo_arena_block_def", []),
("arena_block_f",0,"arena_block_f","bo_arena_block_def", []),
("arena_block_g",0,"arena_block_g","bo_arena_block_ghi", []),
("arena_block_h",0,"arena_block_h","bo_arena_block_ghi", []),
("arena_block_i",0,"arena_block_i","bo_arena_block_ghi", []),
("arena_block_j",0,"arena_block_j","bo_arena_block_j", []),
("arena_block_j_awning",0,"arena_block_j_awning","bo_arena_block_j_awning", []),
("arena_palisade_a",0,"arena_palisade_a","bo_arena_palisade_a", []),
("arena_wall_a",0,"arena_wall_a","bo_arena_wall_ab", []),
("arena_wall_b",0,"arena_wall_b","bo_arena_wall_ab", []),
("arena_barrier_a",0,"arena_barrier_a","bo_arena_barrier_a", []),
("arena_barrier_b",0,"arena_barrier_b","bo_arena_barrier_bc", []),
("arena_barrier_c",0,"arena_barrier_c","bo_arena_barrier_bc", []),
("arena_tower_a",0,"arena_tower_a","bo_arena_tower_abc", []),
("arena_tower_b",0,"arena_tower_b","bo_arena_tower_abc", []),
("arena_tower_c",0,"arena_tower_c","bo_arena_tower_abc", []),
("arena_spectator_a",0,"arena_spectator_a","0", []),
("arena_spectator_b",0,"arena_spectator_b","0", []),
("arena_spectator_c",0,"arena_spectator_c","0", []),
("arena_spectator_sitting_a",0,"arena_spectator_sitting_a","0", []),
("arena_spectator_sitting_b",0,"arena_spectator_sitting_b","0", []),
("arena_spectator_sitting_c",0,"arena_spectator_sitting_c","0", []),
("courtyard_gate_a",0,"courtyard_entry_a","bo_courtyard_entry_a", []),
("courtyard_gate_b",0,"courtyard_entry_b","bo_courtyard_entry_b", []),
("courtyard_gate_c",0,"courtyard_entry_c","bo_courtyard_entry_c", []),
("courtyard_gate_snowy",0,"courtyard_entry_snowy","bo_courtyard_entry_a", []),
("castle_tower_a",0,"castle_tower_a","bo_castle_tower_a", []),
("castle_battlement_a",0,"castle_battlement_a","bo_castle_battlement_a", []),
("castle_battlement_b",0,"castle_battlement_b","bo_castle_battlement_b", []),
("castle_battlement_c",0,"castle_battlement_c","bo_castle_battlement_c", []),
("castle_battlement_a_destroyed",0,"castle_battlement_a_destroyed","bo_castle_battlement_a_destroyed", []),
("castle_battlement_b_destroyed",0,"castle_battlement_b_destroyed","bo_castle_battlement_b_destroyed", []),
("castle_battlement_corner_a",0,"castle_battlement_corner_a","bo_castle_battlement_corner_a", []),
("castle_battlement_corner_b",0,"castle_battlement_corner_b","bo_castle_battlement_corner_b", []),
("castle_battlement_corner_c",0,"castle_battlement_corner_c","bo_castle_battlement_corner_c", []),
("castle_battlement_stairs_a",0,"castle_battlement_stairs_a","bo_castle_battlement_stairs_a", []),
("castle_battlement_stairs_b",0,"castle_battlement_stairs_b","bo_castle_battlement_stairs_b", []),
("castle_gate_house_a",0,"castle_gate_house_a","bo_castle_gate_house_a", []),
("castle_round_tower_a",0,"castle_round_tower_a","bo_castle_round_tower_a", []),
("castle_square_keep_a",0,"castle_square_keep_a","bo_castle_square_keep_a", []),
("castle_stairs_a",sokf_type_ladder,"castle_stairs_a","bo_castle_stairs_a", []),
("castle_drawbridge_open",0,"castle_drawbridges_open","bo_castle_drawbridges_open", []),
("castle_drawbridge_closed",0,"castle_drawbridges_closed","bo_castle_drawbridges_closed", []),
("spike_group_a",0,"spike_group_a","bo_spike_group_a", []),
("spike_a",0,"spike_a","bo_spike_a", []),
("belfry_a",sokf_moveable,"belfry_a","bo_belfry_a", []),
("belfry_b",sokf_moveable,"belfry_b","bo_belfry_b", []),
("belfry_b_platform_a",sokf_moveable,"belfry_b_platform_a","bo_belfry_b_platform_a", []),
("belfry_old",0,"belfry_a","bo_belfry_a", []),
("belfry_platform_a",sokf_moveable,"belfry_platform_a","bo_belfry_platform_a", []),
("belfry_platform_b",sokf_moveable,"belfry_platform_b","bo_belfry_platform_b", []),
("belfry_platform_old",0,"belfry_platform_b","bo_belfry_platform_b", []),
("belfry_wheel",sokf_moveable,"belfry_wheel",0, []),
("belfry_wheel_old",0,"belfry_wheel",0, []),
("mangonel",0,"mangonel","bo_mangonel", []),
("trebuchet_old",0,"trebuchet_old","bo_trebuchet_old", []),
("trebuchet_new",0,"trebuchet_new","bo_trebuchet_old", []),
("trebuchet_destructible",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible,"trebuchet_new","bo_trebuchet_old", [
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 2400),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(prop_instance_get_position, pos1, ":instance_no"),
(particle_system_burst, "psys_dummy_smoke_big", pos1, 100),
(particle_system_burst, "psys_dummy_straw_big", pos1, 100),
(position_move_z, pos1, -500),
(position_rotate_x, pos1, 90),
(prop_instance_animate_to_position, ":instance_no", pos1, 300), #animate to 6 meters below in 6 second
(try_begin),
(eq, "$g_round_ended", 0),
(scene_prop_get_team, ":scene_prop_team_no", ":instance_no"),
(try_begin),
(eq, ":scene_prop_team_no", 0),
(assign, ":scene_prop_team_no_multiplier", -1),
(else_try),
(assign, ":scene_prop_team_no_multiplier", 1),
(try_end),
(try_begin),
(eq, "$g_number_of_targets_destroyed", 0),
(store_mul, ":target_no_mul_scene_prop_team", ":scene_prop_team_no_multiplier", 2), #2 means destroyed object is a trebuchet
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_show_multiplayer_message", multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"),
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_show_multiplayer_message, multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
(try_end),
(val_add, "$g_number_of_targets_destroyed", 1),
(else_try),
(store_mul, ":target_no_mul_scene_prop_team", ":scene_prop_team_no_multiplier", 9), #9 means attackers destroyed all targets
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_show_multiplayer_message", multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"),
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_show_multiplayer_message, multiplayer_message_type_target_destroyed, ":target_no_mul_scene_prop_team"),
(try_end),
(val_add, "$g_number_of_targets_destroyed", 1),
(try_end),
(try_end),
#giving gold for destroying target (for trebuchet)
#step-1 calculating total damage given to that scene prop
(assign, ":total_damage_given", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(try_begin),
(eq, "spr_trebuchet_destructible", "$g_destructible_target_1"),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_1),
(else_try),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_2),
(try_end),
(val_add, ":total_damage_given", ":damage_given"),
(try_end),
#step-2 sharing 1000 gold (if num active players < 20 then 50 * num active players) to players which gave damage with the damage amounts.
#(scene_prop_get_max_hit_points, ":max_hit_points", ":instance_no"),
(assign, ":destroy_money_addition", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(val_add, ":destroy_money_addition", 50),
(try_end),
(try_begin),
(ge, ":destroy_money_addition", multi_destroy_target_money_add),
(assign, ":destroy_money_addition", multi_destroy_target_money_add),
(try_end),
(val_mul, ":destroy_money_addition", "$g_multiplayer_battle_earnings_multiplier"),
(val_div, ":destroy_money_addition", 100),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(try_begin),
(eq, "spr_trebuchet_destructible", "$g_destructible_target_1"),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_1),
(else_try),
(player_get_slot, ":damage_given", ":player_no", slot_player_damage_given_to_target_2),
(try_end),
(player_get_gold, ":player_gold", ":player_no"), #give money to player which helped flag to be owned by new_flag_owner team
(val_mul, ":damage_given", ":destroy_money_addition"),
(store_div, ":gold_earned", ":damage_given", ":total_damage_given"),
(val_add, ":player_gold", ":gold_earned"),
(player_set_gold, ":player_no", ":player_gold", multi_max_gold_that_can_be_stored),
(try_end),
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(set_fixed_point_multiplier, 1),
(position_get_x, ":attacker_agent_id", pos2),
(try_begin),
(ge, ":attacker_agent_id", 0),
(agent_is_alive, ":attacker_agent_id"),
(agent_is_human, ":attacker_agent_id"),
(neg|agent_is_non_player, ":attacker_agent_id"),
(agent_get_player_id, ":attacker_player_id", ":attacker_agent_id"),
(ge, ":attacker_player_id", 0),
(player_is_active, ":attacker_player_id"),
(try_begin),
(eq, "spr_trebuchet_destructible", "$g_destructible_target_1"),
(player_get_slot, ":damage_given", ":attacker_player_id", slot_player_damage_given_to_target_1),
(val_add, ":damage_given", ":damage"),
(player_set_slot, ":attacker_player_id", slot_player_damage_given_to_target_1, ":damage_given"),
(else_try),
(player_get_slot, ":damage_given", ":attacker_player_id", slot_player_damage_given_to_target_2),
(val_add, ":damage_given", ":damage"),
(player_set_slot, ":attacker_player_id", slot_player_damage_given_to_target_2, ":damage_given"),
(try_end),
(try_end),
(try_end),
]),
]),
("stone_ball",0,"stone_ball","0", []),
("village_house_a",0,"village_house_a","bo_village_house_a", []),
("village_house_b",0,"village_house_b","bo_village_house_b", []),
("village_house_c",0,"village_house_c","bo_village_house_c", []),
("village_house_d",0,"village_house_d","bo_village_house_d", []),
("farm_house_a",0,"farm_house_a","bo_farm_house_a", []),
("farm_house_b",0,"farm_house_b","bo_farm_house_b", []),
("farm_house_c",0,"farm_house_c","bo_farm_house_c", []),
("mountain_house_a",0,"mountain_house_a","bo_mountain_house_a", []),
("mountain_house_b",0,"mountain_house_b","bo_mountain_house_b", []),
("village_hut_a",0,"village_hut_a","bo_village_hut_a", []),
("crude_fence",0,"fence","bo_fence", []),
("crude_fence_small",0,"crude_fence_small","bo_crude_fence_small", []),
("crude_fence_small_b",0,"crude_fence_small_b","bo_crude_fence_small_b", []),
("ramp_12m",0,"ramp_12m","bo_ramp_12m", []),
("ramp_14m",0,"ramp_14m","bo_ramp_14m", []),
("siege_ladder_6m",sokf_type_ladder,"siege_ladder_move_6m","bo_siege_ladder_move_6m", []),
("siege_ladder_8m",sokf_type_ladder,"siege_ladder_move_8m","bo_siege_ladder_move_8m", []),
("siege_ladder_10m",sokf_type_ladder,"siege_ladder_move_10m","bo_siege_ladder_move_10m", []),
("siege_ladder_12m",sokf_type_ladder,"siege_ladder_12m","bo_siege_ladder_12m", []),
("siege_ladder_14m",sokf_type_ladder,"siege_ladder_14m","bo_siege_ladder_14m", []),
("siege_ladder_move_6m",sokf_type_ladder|sokf_moveable|spr_use_time(2),"siege_ladder_move_6m","bo_siege_ladder_move_6m", [
check_item_use_trigger,
check_ladder_animate_trigger,
check_ladder_animation_finish_trigger,
]),
("siege_ladder_move_8m",sokf_type_ladder|sokf_moveable|spr_use_time(2),"siege_ladder_move_8m","bo_siege_ladder_move_8m", [
check_item_use_trigger,
check_ladder_animate_trigger,
check_ladder_animation_finish_trigger,
]),
("siege_ladder_move_10m",sokf_type_ladder|sokf_moveable|spr_use_time(3),"siege_ladder_move_10m","bo_siege_ladder_move_10m", [
check_item_use_trigger,
check_ladder_animate_trigger,
check_ladder_animation_finish_trigger,
]),
("siege_ladder_move_12m",sokf_type_ladder|sokf_moveable|spr_use_time(3),"siege_ladder_move_12m","bo_siege_ladder_move_12m", [
check_item_use_trigger,
check_ladder_animate_trigger,
check_ladder_animation_finish_trigger,
]),
("siege_ladder_move_14m",sokf_type_ladder|sokf_moveable|spr_use_time(4),"siege_ladder_move_14m","bo_siege_ladder_move_14m", [
check_item_use_trigger,
check_ladder_animate_trigger,
check_ladder_animation_finish_trigger,
]),
("portcullis",sokf_moveable,"portcullis_a","bo_portcullis_a", []),
("bed_a",0,"bed_a","bo_bed_a", []),
("bed_b",0,"bed_b","bo_bed_b", []),
("bed_c",0,"bed_c","bo_bed_c", []),
("bed_d",0,"bed_d","bo_bed_d", []),
("bed_e",0,"bed_e","bo_bed_e", []),
("bed_f",0,"bed_f","bo_bed_f", []),
("towngate_door_left",sokf_moveable,"door_g_left","bo_door_left", []),
("towngate_door_right",sokf_moveable,"door_g_right","bo_door_right", []),
("towngate_rectangle_door_left",sokf_moveable,"towngate_rectangle_door_left","bo_towngate_rectangle_door_left", []),
("towngate_rectangle_door_right",sokf_moveable,"towngate_rectangle_door_right","bo_towngate_rectangle_door_right", []),
("door_screen",sokf_moveable,"door_screen","0", []),
("door_a",sokf_moveable,"door_a","bo_door_a", []),
("door_b",sokf_moveable,"door_b","bo_door_a", []),
("door_c",sokf_moveable,"door_c","bo_door_a", []),
("door_d",sokf_moveable,"door_d","bo_door_a", []),
("tavern_door_a",sokf_moveable,"tavern_door_a","bo_tavern_door_a", []),
("tavern_door_b",sokf_moveable,"tavern_door_b","bo_tavern_door_a", []),
("door_e_left",sokf_moveable,"door_e_left","bo_door_left", []),
("door_e_right",sokf_moveable,"door_e_right","bo_door_right", []),
("door_f_left",sokf_moveable,"door_f_left","bo_door_left", []),
("door_f_right",sokf_moveable,"door_f_right","bo_door_right", []),
("door_h_left",sokf_moveable,"door_g_left","bo_door_left", []),
("door_h_right",sokf_moveable,"door_g_right","bo_door_right", []),
("draw_bridge_a",0,"draw_bridge_a","bo_draw_bridge_a", []),
("chain_1m",0,"chain_1m","0", []),
("chain_2m",0,"chain_2m","0", []),
("chain_5m",0,"chain_5m","0", []),
("chain_10m",0,"chain_10m","0", []),
("bridge_modular_a",0,"bridge_modular_a","bo_bridge_modular_a", []),
("bridge_modular_b",0,"bridge_modular_b","bo_bridge_modular_b", []),
("church_a",0,"church_a","bo_church_a", []),
("church_tower_a",0,"church_tower_a","bo_church_tower_a", []),
("stone_step_a",0,"floor_stone_a","bo_floor_stone_a", []),
("stone_step_b",0,"stone_step_b","0", []),
("stone_step_c",0,"stone_step_c","0", []),
("stone_heap",0,"stone_heap","bo_stone_heap", []),
("stone_heap_b",0,"stone_heap_b","bo_stone_heap", []),
("panel_door_a",0,"house_door_a","bo_house_door_a", []),
("panel_door_b",0,"house_door_b","bo_house_door_a", []),
("smoke_stain",0,"soot_a","0", []),
("brazier_with_fire",0,"brazier","bo_brazier", [
(ti_on_scene_prop_init,
[
(set_position_delta,0,0,85),
(particle_system_add_new, "psys_brazier_fire_1"),
(particle_system_add_new, "psys_fire_sparks_1"),
(set_position_delta,0,0,100),
(particle_system_add_new, "psys_fire_glow_1"),
(particle_system_emit, "psys_fire_glow_1",9000000),
]),
]),
("cooking_fire",0,"fire_floor","0",
[
(ti_on_scene_prop_init,
[
(set_position_delta,0,0,12),
(particle_system_add_new, "psys_cooking_fire_1"),
(particle_system_add_new, "psys_fire_sparks_1"),
(particle_system_add_new, "psys_cooking_smoke"),
(set_position_delta,0,0,50),
(particle_system_add_new, "psys_fire_glow_1"),
(particle_system_emit, "psys_fire_glow_1",9000000),
]),
]),
("cauldron_a",0,"cauldron_a","bo_cauldron_a", []),
("fry_pan_a",0,"fry_pan_a","0", []),
("tripod_cauldron_a",0,"tripod_cauldron_a","bo_tripod_cauldron_a", []),
("tripod_cauldron_b",0,"tripod_cauldron_b","bo_tripod_cauldron_b", []),
("open_stable_a",0,"open_stable_a","bo_open_stable_a", []),
("open_stable_b",0,"open_stable_b","bo_open_stable_b", []),
("plate_a",0,"plate_a","0", []),
("plate_b",0,"plate_b","0", []),
("plate_c",0,"plate_c","0", []),
("lettuce",0,"lettuce","0", []),
("hanger",0,"hanger","0", []),
("knife_eating",0,"knife_eating","0", []),
("colander",0,"colander","0", []),
("ladle",0,"ladle","0", []),
("spoon",0,"spoon","0", []),
("skewer",0,"skewer","0", []),
("grape_a",0,"grape_a","0", []),
("grape_b",0,"grape_b","0", []),
("apple_a",0,"apple_a","0", []),
("apple_b",0,"apple_b","0", []),
("maize_a",0,"maize_a","0", []),
("maize_b",0,"maize_b","0", []),
("cabbage",0,"cabbage","0", []),
("flax_bundle",0,"raw_flax","0",[]),
("olive_plane",0,"olive_plane","0",[]),
("grapes_plane",0,"grapes_plane","0",[]),
("date_fruit_plane",0,"date_fruit_plane","0",[]),
("bowl",0,"bowl_big","0",[]),
("bowl_small",0,"bowl_small","0",[]),
("dye_blue",0,"raw_dye_blue","0",[]),
("dye_red",0,"raw_dye_red","0",[]),
("dye_yellow",0,"raw_dye_yellow","0",[]),
("basket",0,"basket_small","0",[]),
("basket_big",0,"basket_large","0",[]),
("basket_big_green",0,"basket_big","0",[]),
("leatherwork_frame",0,"leatherwork_frame","0", []),
("cabbage_b",0,"cabbage_b","0", []),
("bean",0,"bean","0", []),
("basket_a",0,"basket_a","bo_basket_a", []),
("feeding_trough_a",0,"feeding_trough_a","bo_feeding_trough_a", []),
("marrow_a",0,"marrow_a","0", []),
("marrow_b",0,"marrow_b","0", []),
("squash_plant",0,"marrow_c","0", []),
("gatehouse_new_a",0,"gatehouse_new_a","bo_gatehouse_new_a", []),
("gatehouse_new_b",0,"gatehouse_new_b","bo_gatehouse_new_b", []),
("gatehouse_new_snowy_a",0,"gatehouse_new_snowy_a","bo_gatehouse_new_b", []),
("winch",sokf_moveable,"winch","bo_winch", []),
("winch_b",sokf_moveable|spr_use_time(5),"winch_b","bo_winch", [
(ti_on_scene_prop_use,
[
(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":instance_id"),
#for only server itself-----------------------------------------------------------------------------------------------
(call_script, "script_use_item", ":instance_id", ":agent_id"),
#for only server itself-----------------------------------------------------------------------------------------------
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_use_item, ":instance_id", ":agent_id"),
(try_end),
]),
]),
("drawbridge",0,"drawbridge","bo_drawbridge", []),
("gatehouse_door_left",sokf_moveable,"gatehouse_door_left","bo_gatehouse_door_left", []),
("gatehouse_door_right",sokf_moveable,"gatehouse_door_right","bo_gatehouse_door_right", []),
("cheese_a",0,"cheese_a","0", []),
("cheese_b",0,"cheese_b","0", []),
("cheese_slice_a",0,"cheese_slice_a","0", []),
("bread_a",0,"bread_a","0", []),
("bread_b",0,"bread_b","0", []),
("bread_slice_a",0,"bread_slice_a","0", []),
("fish_a",0,"fish_a","0", []),
("fish_roasted_a",0,"fish_roasted_a","0", []),
("chicken_roasted",0,"chicken","0", []),
("food_steam",0,"0","0",
[
(ti_on_scene_prop_init,
[
(set_position_delta,0,0,0),
(particle_system_add_new, "psys_food_steam"),
]),
]),
########################
("city_smoke",0,"0","0",
[
(ti_on_scene_prop_init,
[
(store_time_of_day,reg(12)),
(neg|is_between,reg(12),5,20),
(set_position_delta,0,0,0),
(particle_system_add_new, "psys_night_smoke_1"),
]),
]),
("city_fire_fly_night",0,"0","0",
[
(ti_on_scene_prop_init,
[
(store_time_of_day,reg(12)),
(neg|is_between,reg(12),5,20),
(set_position_delta,0,0,0),
(particle_system_add_new, "psys_fire_fly_1"),
]),
]),
("city_fly_day",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_bug_fly_1"),
]),
]),
("flue_smoke_tall",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_flue_smoke_tall"),
]),
]),
("flue_smoke_short",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_flue_smoke_short"),
]),
]),
("moon_beam",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_moon_beam_1"),
(particle_system_add_new, "psys_moon_beam_paricle_1"),
]),
]),
("fire_small",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_fireplace_fire_small"),
]),
]),
("fire_big",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_fireplace_fire_big"),
]),
]),
("battle_field_smoke",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_war_smoke_tall"),
]),
]),
("Village_fire_big",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_village_fire_big"),
(set_position_delta,0,0,100),
(particle_system_add_new, "psys_village_fire_smoke_big"),
]),
]),
#########################
("candle_a",0,"candle_a","0",
[
(ti_on_scene_prop_init,
[
(set_position_delta,0,0,27),
(particle_system_add_new, "psys_candle_light"),
]),
]),
("candle_b",0,"candle_b","0",
[
(ti_on_scene_prop_init,
[
(set_position_delta,0,0,25),
(particle_system_add_new, "psys_candle_light"),
]),
]),
("candle_c",0,"candle_c","0", [
(ti_on_scene_prop_init,
[
(set_position_delta,0,0,10),
(particle_system_add_new, "psys_candle_light_small"),
]),
]),
("lamp_a",0,"lamp_a","0", [
(ti_on_scene_prop_init,
[
(set_position_delta,66,0,2),
(particle_system_add_new, "psys_candle_light"),
]),
]),
("lamp_b",0,"lamp_b","0", [
(ti_on_scene_prop_init,
[
(set_position_delta,65,0,-7),
(particle_system_add_new, "psys_lamp_fire"),
(set_position_delta,70,0,-5),
(particle_system_add_new, "psys_fire_glow_1"),
(particle_system_emit, "psys_fire_glow_1",9000000),
(play_sound, "snd_fire_loop", 0),
]),
]),
("hook_a",0,"hook_a","0", []),
("window_night",0,"window_night","0", []),
("fried_pig",0,"pork","0", []),
("village_oven",0,"village_oven","bo_village_oven", []),
("dungeon_water_drops",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_dungeon_water_drops"),
]),
]),
("shadow_circle_1",0,"shadow_circle_1","0", []),
("shadow_circle_2",0,"shadow_circle_2","0", []),
("shadow_square_1",0,"shadow_square_1","0", []),
("shadow_square_2",0,"shadow_square_2","0", []),
("wheelbarrow",0,"wheelbarrow","bo_wheelbarrow", []),
("gourd",sokf_moveable|sokf_destructible|spr_hit_points(1),"gourd","bo_gourd",
[
(ti_on_scene_prop_destroy,
[
(store_trigger_param_1, ":instance_no"),
(val_add, "$g_last_destroyed_gourds", 1),
(prop_instance_get_position, pos1, ":instance_no"),
(copy_position, pos2, pos1),
(position_set_z, pos2, -100000),
(particle_system_burst, "psys_gourd_smoke", pos1, 2),
(particle_system_burst, "psys_gourd_piece_1", pos1, 1),
(particle_system_burst, "psys_gourd_piece_2", pos1, 5),
(prop_instance_animate_to_position, ":instance_no", pos2, 1),
(play_sound, "snd_gourd_destroyed"),
]),
]),
("gourd_spike",sokf_moveable,"gourd_spike","bo_gourd_spike",[]),
("obstacle_fence_1",0,"fence","bo_fence", []),
("obstacle_fallen_tree_a",0,"destroy_tree_a","bo_destroy_tree_a", []),
("obstacle_fallen_tree_b",0,"destroy_tree_b","bo_destroy_tree_b", []),
("siege_wall_a",0,"siege_wall_a","bo_siege_wall_a", []),
("siege_large_shield_a",0,"siege_large_shield_a","bo_siege_large_shield_a", []),
("granary_a",0,"granary_a","bo_granary_a", []),
("small_wall_connect_a",0,"small_wall_connect_a","bo_small_wall_connect_a", []),
("full_stable_a",0,"full_stable_a","bo_full_stable_a", []),
("full_stable_b",0,"full_stable_b","bo_full_stable_b", []),
("full_stable_c",0,"full_stable_c","bo_full_stable_c", []),
("full_stable_d",0,"full_stable_d","bo_full_stable_d", []),
("arabian_house_a",0,"arabian_house_a","bo_arabian_house_a", []),
("arabian_house_b",0,"arabian_house_b","bo_arabian_house_b", []),
("arabian_house_c",0,"arabian_house_c","bo_arabian_house_c", []),
("arabian_house_d",0,"arabian_house_d","bo_arabian_house_d", []),
("arabian_house_e",0,"arabian_house_e","bo_arabian_house_e", []),
("arabian_house_f",0,"arabian_house_f","bo_arabian_house_f", []),
("arabian_house_g",0,"arabian_house_g","bo_arabian_house_g", []),
("arabian_house_h",0,"arabian_house_h","bo_arabian_house_h", []),
("arabian_house_i",0,"arabian_house_i","bo_arabian_house_i", []),
("arabian_square_keep_a",0,"arabian_square_keep_a","bo_arabian_square_keep_a", []),
("arabian_passage_house_a",0,"arabian_passage_house_a","bo_arabian_passage_house_a", []),
("arabian_wall_a",0,"arabian_wall_a","bo_arabian_wall_a", []),
("arabian_wall_b",0,"arabian_wall_b","bo_arabian_wall_b", []),
("arabian_ground_a",0,"arabian_ground_a","bo_arabian_ground_a", []),
("arabian_parterre_a",0,"arabian_parterre_a","bo_arabian_parterre_a", []),
("well_shaft",0,"well_shaft","bo_well_shaft", []),
("horse_mill",0,"horse_mill","bo_horse_mill", []),
("horse_mill_collar",0,"horse_mill_collar","bo_horse_mill_collar", []),
("arabian_stable",0,"arabian_stable","bo_arabian_stable", []),
("arabian_tent",0,"arabian_tent","bo_arabian_tent", []),
("arabian_tent_b",0,"arabian_tent_b","bo_arabian_tent_b", []),
("desert_plant_a",0,"desert_plant_a","0", []),
("arabian_castle_battlement_a",0,"arabian_castle_battlement_a","bo_arabian_castle_battlement_a", []),
("arabian_castle_battlement_b_destroyed",0,"arabian_castle_battlement_b_destroyed","bo_arabian_castle_battlement_b_destroyed", []),
("arabian_castle_battlement_c",0,"arabian_castle_battlement_c","bo_arabian_castle_battlement_c", []),
("arabian_castle_battlement_d",0,"arabian_castle_battlement_d","bo_arabian_castle_battlement_d", []),
("arabian_castle_corner_a",0,"arabian_castle_corner_a","bo_arabian_castle_corner_a", []),
("arabian_castle_stairs",sokf_type_ladder,"arabian_castle_stairs","bo_arabian_castle_stairs", []),
("arabian_castle_stairs_b",sokf_type_ladder,"arabian_castle_stairs_b","bo_arabian_castle_stairs_b", []),
("arabian_castle_stairs_c",sokf_type_ladder,"arabian_castle_stairs_c","bo_arabian_castle_stairs_c", []),
("arabian_castle_battlement_section_a",0,"arabian_castle_battlement_section_a","bo_arabian_castle_battlement_section_a", []),
("arabian_castle_gate_house_a",0,"arabian_castle_gate_house_a","bo_arabian_castle_gate_house_a", []),
("arabian_castle_house_a",0,"arabian_castle_house_a","bo_arabian_castle_house_a", []),
("arabian_castle_house_b",0,"arabian_castle_house_b","bo_arabian_castle_house_b", []),
("arabian_castle_keep_a",0,"arabian_castle_keep_a","bo_arabian_castle_keep_a", []),
("arabian_house_a2",0,"arabian_house_a2","bo_arabian_house_a2", []),
("arabian_village_house_a",0,"arabian_village_house_a","bo_arabian_village_house_a", []),
("arabian_village_house_b",0,"arabian_village_house_b","bo_arabian_village_house_b", []),
("arabian_village_house_c",0,"arabian_village_house_c","bo_arabian_village_house_c", []),
("arabian_village_house_d",0,"arabian_village_house_d","bo_arabian_village_house_d", []),
("arabian_village_stable",0,"arabian_village_stable","bo_arabian_village_stable", []),
("arabian_village_hut",0,"arabian_village_hut","bo_arabian_village_hut", []),
("arabian_village_stairs",sokf_type_ladder,"arabian_village_stairs","bo_arabian_village_stairs", []),
("tree_a01",0,"tree_a01","bo_tree_a01", []),
("stairs_a",sokf_type_ladder,"stairs_a","bo_stairs_a", []),
("headquarters_flag_red",sokf_moveable|sokf_face_player,"tutorial_flag_red","0", []),
("headquarters_flag_blue",sokf_moveable|sokf_face_player,"tutorial_flag_blue","0", []),
("headquarters_flag_gray",sokf_moveable|sokf_face_player,"tutorial_flag_yellow","0", []),
("headquarters_flag_red_code_only",sokf_moveable|sokf_face_player,"mp_flag_red","0", []),
("headquarters_flag_blue_code_only",sokf_moveable|sokf_face_player,"mp_flag_blue","0", []),
("headquarters_flag_gray_code_only",sokf_moveable|sokf_face_player,"mp_flag_white","0", []),
("headquarters_pole_code_only",sokf_moveable,"mp_flag_pole","0", []),
("headquarters_flag_swadian",sokf_moveable|sokf_face_player,"flag_swadian","0", []),
("headquarters_flag_vaegir",sokf_moveable|sokf_face_player,"flag_vaegir","0", []),
("headquarters_flag_khergit",sokf_moveable|sokf_face_player,"flag_khergit","0", []),
("headquarters_flag_nord",sokf_moveable|sokf_face_player,"flag_nord","0", []),
("headquarters_flag_rhodok",sokf_moveable|sokf_face_player,"flag_rhodok","0", []),
("headquarters_flag_sarranid",sokf_moveable|sokf_face_player,"flag_sarranid","0", []),
("glow_a", 0, "glow_a", "0", []),
("glow_b", 0, "glow_b", "0", []),
("arabian_castle_corner_b",0,"arabian_castle_corner_b","bo_arabian_castle_corner_b", []),
("dummy_a_undestructable",sokf_destructible,"arena_archery_target_b","bo_arena_archery_target_b",
[
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 10000000),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(set_fixed_point_multiplier, 1),
(position_get_x, ":attacker_agent_id", pos2),
(get_player_agent_no, ":player_agent"),
(eq, ":player_agent", ":attacker_agent_id"),
(assign, reg60, ":damage"),
(display_message, "str_delivered_damage"),
(eq, "$g_tutorial_training_ground_horseman_trainer_state", 6),
(eq, "$g_tutorial_training_ground_horseman_trainer_completed_chapters", 1),
(prop_instance_get_variation_id_2, ":var_id_2", ":instance_no"),
(val_sub, ":var_id_2", 1),
(eq, "$g_tutorial_training_ground_current_score", ":var_id_2"),
(val_add, "$g_tutorial_training_ground_current_score", 1),
(try_end),
(play_sound, "snd_dummy_hit"),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
]),
]),
("cave_entrance_1",0,"cave_entrance_1","bo_cave_entrance_1", []),
("pointer_arrow", 0, "pointer_arrow", "0", []),
("fireplace_d_interior",0,"fireplace_d","bo_fireplace_d", []),
("ship_sail_off",0,"ship_sail_off","bo_ship_sail_off", []),
("ship_sail_off_b",0,"ship_sail_off_b","bo_ship_sail_off", []),
("ship_c_sail_off",0,"ship_c_sail_off","bo_ship_c_sail_off", []),
("ramp_small_a",0,"ramp_small_a","bo_ramp_small_a", []),
("castle_g_battlement_b",0,"castle_g_battlement_b","bo_castle_g_battlement_b", []),
("box_a_dynamic",sokf_moveable|sokf_dynamic_physics,"box_a","bo_box_a", []),
("desert_field",0,"desert_field","bo_desert_field", []),
("water_river",0,"water_plane","0", []),
("viking_house_a",0,"viking_house_a","bo_viking_house_a", []),
("viking_house_b",0,"viking_house_b","bo_viking_house_b", []),
("viking_house_c",0,"viking_house_c","bo_viking_house_c", []),
("viking_house_d",0,"viking_house_d","bo_viking_house_d", []),
("viking_house_e",0,"viking_house_e","bo_viking_house_e", []),
("viking_stable_a",0,"viking_stable_a","bo_viking_stable_a", []),
("viking_keep",0,"viking_keep","bo_viking_keep", []),
("viking_house_c_destroy",0,"viking_house_c_destroy","bo_viking_house_c_destroy", []),
("viking_house_b_destroy",0,"viking_house_b_destroy","bo_viking_house_b_destroy", []),
("harbour_a",0,"harbour_a","bo_harbour_a", []),
("sea_foam_a",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_sea_foam_a"),
]),
]),
("viking_keep_destroy",0,"viking_keep_destroy","bo_viking_keep_destroy", []),
("viking_keep_destroy_door",0,"viking_keep_destroy_door","bo_viking_keep_destroy_door", []),
("earth_tower_small_b",0,"earth_tower_small_b","bo_earth_tower_small_b", []),
("earth_gate_house_b",0,"earth_gate_house_b","bo_earth_gate_house_b", []),
("earth_tower_a",0,"earth_tower_a","bo_earth_tower_a", []),
("earth_stairs_c",0,"earth_stairs_c","bo_earth_stairs_c", []),
("earth_sally_gate_left",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"earth_sally_gate_left","bo_earth_sally_gate_left", [
check_sally_door_use_trigger_double,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 2000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
("earth_sally_gate_right",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"earth_sally_gate_right","bo_earth_sally_gate_right", [
check_sally_door_use_trigger_double,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 2000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
#("earth_sally_gate_left",0,"earth_sally_gate_left","bo_earth_sally_gate_left", []),
#("earth_sally_gate_right",0,"earth_sally_gate_right","bo_earth_sally_gate_right", []),
("barrier_box",sokf_invisible|sokf_type_barrier3d,"barrier_box","bo_barrier_box", []),
("barrier_capsule",sokf_invisible|sokf_type_barrier3d,"barrier_capsule","bo_barrier_capsule", []),
("barrier_cone" ,sokf_invisible|sokf_type_barrier3d,"barrier_cone" ,"bo_barrier_cone" , []),
("barrier_sphere" ,sokf_invisible|sokf_type_barrier3d,"barrier_sphere" ,"bo_barrier_sphere" , []),
("viking_keep_destroy_sally_door_right",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"viking_keep_destroy_sally_door_right","bo_viking_keep_destroy_sally_door_right", [
check_sally_door_use_trigger_double,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 3000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
("viking_keep_destroy_sally_door_left",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"viking_keep_destroy_sally_door_left","bo_viking_keep_destroy_sally_door_left", [
check_sally_door_use_trigger_double,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 3000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
("castle_f_door_b",sokf_moveable|sokf_show_hit_point_bar|sokf_destructible|spr_use_time(0),"castle_e_sally_door_a","bo_castle_e_sally_door_a", [
check_castle_door_use_trigger,
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 1000),
]),
(ti_on_scene_prop_destroy,
[
(play_sound, "snd_dummy_destroyed"),
(assign, ":rotate_side", 86),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":attacker_agent_no"),
(set_fixed_point_multiplier, 100),
(prop_instance_get_position, pos1, ":instance_no"),
(try_begin),
(ge, ":attacker_agent_no", 0),
(agent_get_position, pos2, ":attacker_agent_no"),
(try_begin),
(position_is_behind_position, pos2, pos1),
(val_mul, ":rotate_side", -1),
(try_end),
(try_end),
(init_position, pos3),
(try_begin),
(ge, ":rotate_side", 0),
(position_move_y, pos3, -100),
(else_try),
(position_move_y, pos3, 100),
(try_end),
(position_move_x, pos3, -50),
(position_transform_position_to_parent, pos4, pos1, pos3),
(position_move_z, pos4, 100),
(position_get_distance_to_ground_level, ":height_to_terrain", pos4),
(val_sub, ":height_to_terrain", 100),
(assign, ":z_difference", ":height_to_terrain"),
#(assign, reg0, ":z_difference"),
#(display_message, "@{!}z dif : {reg0}"),
(val_div, ":z_difference", 3),
(try_begin),
(ge, ":rotate_side", 0),
(val_add, ":rotate_side", ":z_difference"),
(else_try),
(val_sub, ":rotate_side", ":z_difference"),
(try_end),
(position_rotate_x, pos1, ":rotate_side"),
(prop_instance_animate_to_position, ":instance_no", pos1, 70), #animate to position 1 in 0.7 second
(try_end),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(try_end),
]),
]),
("ctf_flag_kingdom_1", sokf_moveable|sokf_face_player, "ctf_flag_kingdom_1", "0", []),
("ctf_flag_kingdom_2", sokf_moveable|sokf_face_player, "ctf_flag_kingdom_2", "0", []),
("ctf_flag_kingdom_3", sokf_moveable|sokf_face_player, "ctf_flag_kingdom_3", "0", []),
("ctf_flag_kingdom_4", sokf_moveable|sokf_face_player, "ctf_flag_kingdom_4", "0", []),
("ctf_flag_kingdom_5", sokf_moveable|sokf_face_player, "ctf_flag_kingdom_5", "0", []),
("ctf_flag_kingdom_6", sokf_moveable|sokf_face_player, "ctf_flag_kingdom_6", "0", []),
("ctf_flag_kingdom_7", sokf_moveable|sokf_face_player, "ctf_flag_kingdom_7", "0", []),
("headquarters_flag_rebel",sokf_moveable|sokf_face_player,"flag_rebel","0", []),
("arabian_lighthouse_a",0,"arabian_lighthouse_a","bo_arabian_lighthouse_a", []),
("arabian_ramp_a",0,"arabian_ramp_a","bo_arabian_ramp_a", []),
("arabian_ramp_b",0,"arabian_ramp_b","bo_arabian_ramp_b", []),
("winery_interior",0,"winery_interior","bo_winery_interior", []),
("winery_barrel_shelf",0,"winery_barrel_shelf","bo_winery_barrel_shelf", []),
("winery_wall_shelf",0,"winery_wall_shelf","bo_winery_wall_shelf", []),
("winery_huge_barrel",0,"winery_huge_barrel","bo_winery_huge_barrel", []),
("winery_wine_press",0,"winery_wine_press","bo_winery_wine_press", []),
("winery_middle_barrel",0,"winery_middle_barrel","bo_winery_middle_barrel", []),
("winery_wine_cart_small_loaded",0,"winery_wine_cart_small_loaded","bo_winery_wine_cart_small_loaded", []),
("winery_wine_cart_small_empty",0,"winery_wine_cart_small_empty","bo_winery_wine_cart_small_empty", []),
("winery_wine_cart_empty",0,"winery_wine_cart_empty","bo_winery_wine_cart_empty", []),
("winery_wine_cart_loaded",0,"winery_wine_cart_loaded","bo_winery_wine_cart_loaded", []),
("weavery_interior",0,"weavery_interior","bo_weavery_interior", []),
("weavery_loom_a",0,"weavery_loom_a","bo_weavery_loom_a", []),
("weavery_spinning_wheel",0,"weavery_spinning_wheel","bo_weavery_spinning_wheel", []),
("mill_interior",0,"mill_interior","bo_mill_interior", []),
("mill_flour_sack", 0,"mill_flour_sack","bo_mill_flour_sack", []),
("mill_flour_sack_desk_a", 0,"mill_flour_sack_desk_a","bo_mill_flour_sack_desk_a", []),
("mill_flour_sack_desk_b", 0,"mill_flour_sack_desk_b","bo_mill_flour_sack_desk_b", []),
("smithy_interior", 0,"smithy_interior","bo_smithy_interior", []),
("smithy_grindstone_wheel", 0,"smithy_grindstone_wheel","bo_smithy_grindstone_wheel", []),
("smithy_forge_bellows", 0,"smithy_forge_bellows","bo_smithy_forge_bellows", []),
("smithy_forge", 0,"smithy_forge","bo_smithy_forge", []),
("smithy_anvil", 0,"smithy_anvil","bo_smithy_anvil", []),
("tannery_hide_a", 0,"tannery_hide_a","bo_tannery_hide_a", []),
("tannery_hide_b", 0,"tannery_hide_b","bo_tannery_hide_b", []),
("tannery_pools_a", 0,"tannery_pools_a","bo_tannery_pools_a", []),
("tannery_pools_b", 0,"tannery_pools_b","bo_tannery_pools_b", []),
("fountain", 0, "fountain", "bo_fountain", []),
("rhodok_houses_a",0,"rhodok_houses_a","bo_rhodok_houses_a", []),
("rhodok_houses_b",0,"rhodok_houses_b","bo_rhodok_houses_b", []),
("rhodok_houses_c",0,"rhodok_houses_c","bo_rhodok_houses_c", []),
("rhodok_houses_d",0,"rhodok_houses_d","bo_rhodok_houses_d", []),
("rhodok_houses_e",0,"rhodok_houses_e","bo_rhodok_houses_e", []),
("rhodok_house_passage_a",0,"rhodok_house_passage_a","bo_rhodok_house_passage_a", []),
("bridge_b",0,"bridge_b","bo_bridge_b", []),
("brewery_pool", 0,"brewery_pool","bo_brewery_pool", []),
("brewery_big_bucket", 0,"brewery_big_bucket","bo_brewery_big_bucket", []),
("brewery_interior", 0,"brewery_interior","bo_brewery_interior", []),
("brewery_bucket_platform_a", 0,"brewery_bucket_platform_a","bo_brewery_bucket_platform_a", []),
("brewery_bucket_platform_b", 0,"brewery_bucket_platform_b","bo_brewery_bucket_platform_b", []),
("weavery_dye_pool_r",0,"weavery_dye_pool_r","bo_weavery_dye_pool_r", []),
("weavery_dye_pool_y",0,"weavery_dye_pool_y","bo_weavery_dye_pool_y", []),
("weavery_dye_pool_b",0,"weavery_dye_pool_b","bo_weavery_dye_pool_b", []),
("weavery_dye_pool_p",0,"weavery_dye_pool_p","bo_weavery_dye_pool_p", []),
("weavery_dye_pool_g",0,"weavery_dye_pool_g","bo_weavery_dye_pool_g", []),
("oil_press_interior",0,"oil_press_interior","bo_oil_press_interior", []),
("city_swad_01" ,0,"city_swad_01" ,"bo_city_swad_01" , []),
("city_swad_02" ,0,"city_swad_02" ,"bo_city_swad_02" , []),
("city_swad_03" ,0,"city_swad_03" ,"bo_city_swad_03" , []),
("city_swad_04" ,0,"city_swad_04" ,"bo_city_swad_04" , []),
("city_swad_passage_01" ,0,"city_swad_passage_01" ,"bo_city_swad_passage_01" , []),
("city_swad_05" ,0,"city_swad_05" ,"bo_city_swad_05" , []),
("arena_block_j_a",0,"arena_block_j_a","bo_arena_block_j_a", []),
("arena_underway_a",0,"arena_underway_a","bo_arena_underway_a", []),
("arena_circle_a",0,"arena_circle_a","bo_arena_circle_a", []),
("rope_bridge_15m",0,"rope_bridge_15m","bo_rope_bridge_15m", []),
("tree_house_a",0,"tree_house_a","bo_tree_house_a", []),
("tree_house_guard_a",0,"tree_house_guard_a","bo_tree_house_guard_a", []),
("tree_house_guard_b",0,"tree_house_guard_b","bo_tree_house_guard_b", []),
("tree_shelter_a",0,"tree_shelter_a","bo_tree_shelter_a", []),
("yellow_fall_leafs_a",0,"0","0",
[
(ti_on_scene_prop_init,
[
(particle_system_add_new, "psys_fall_leafs_a"),
]),
]),
("rock_bridge_a",0,"rock_bridge_a","bo_rock_bridge_a", []),
("suspension_bridge_a",0,"suspension_bridge_a","bo_suspension_bridge_a", []),
("mine_a",0,"mine_a","bo_mine_a", []),
("snowy_destroy_house_a",0,"snowy_destroy_house_a","bo_snowy_destroy_house_a", []),
("snowy_destroy_house_b",0,"snowy_destroy_house_b","bo_snowy_destroy_house_b", []),
("snowy_destroy_house_c",0,"snowy_destroy_house_c","bo_snowy_destroy_house_c", []),
("snowy_destroy_heap",0,"snowy_destroy_heap","bo_snowy_destroy_heap", []),
("snowy_destroy_castle_a",0,"snowy_destroy_castle_a","bo_snowy_destroy_castle_a", []),
("snowy_destroy_castle_b",0,"snowy_destroy_castle_b","bo_snowy_destroy_castle_b", []),
("snowy_destroy_castle_c",0,"snowy_destroy_castle_c","bo_snowy_destroy_castle_c", []),
("snowy_destroy_castle_d",0,"snowy_destroy_castle_d","bo_snowy_destroy_castle_d", []),
("snowy_destroy_windmill",0,"snowy_destroy_windmill","bo_snowy_destroy_windmill", []),
("snowy_destroy_tree_a",0,"snowy_destroy_tree_a","bo_snowy_destroy_tree_a", []),
("snowy_destroy_tree_b",0,"snowy_destroy_tree_b","bo_snowy_destroy_tree_b", []),
("snowy_destroy_bridge_a",0,"snowy_destroy_bridge_a","bo_snowy_destroy_bridge_a", []),
("snowy_destroy_bridge_b",0,"snowy_destroy_bridge_b","bo_snowy_destroy_bridge_b", []),
#INVASION MODE START
#MCA
#prisoner cart
("prison_cart", sokf_moveable,"prison_cart","bo_prison_cart", []),
("prison_cart_door_right", sokf_show_hit_point_bar|sokf_destructible|sokf_moveable,"prison_cart_door_right","bo_prison_cart_door_right",
[
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 300),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(set_fixed_point_multiplier, 1),
(try_end),
]),
]), # added blank prop_hit trigger so hit point bar is displayed
("prison_cart_door_left", sokf_show_hit_point_bar|sokf_destructible|sokf_moveable,"prison_cart_door_left","bo_prison_cart_door_left",
[
(ti_on_init_scene_prop,
[
(store_trigger_param_1, ":instance_no"),
(scene_prop_set_hit_points, ":instance_no", 300),
]),
(ti_on_scene_prop_hit,
[
(store_trigger_param_1, ":instance_no"),
(store_trigger_param_2, ":damage"),
(try_begin),
(scene_prop_get_hit_points, ":hit_points", ":instance_no"),
(val_sub, ":hit_points", ":damage"),
(gt, ":hit_points", 0),
(play_sound, "snd_dummy_hit"),
(else_try),
(neg|multiplayer_is_server),
(play_sound, "snd_dummy_destroyed"),
(try_end),
(try_begin),
(this_or_next|multiplayer_is_server),
(neg|game_in_multiplayer_mode),
(particle_system_burst, "psys_dummy_smoke", pos1, 3),
(particle_system_burst, "psys_dummy_straw", pos1, 10),
(set_fixed_point_multiplier, 1),
(try_end),
]),
]), # added blank prop_hit trigger so hit point bar is displayed
("multiplayer_coop_item_drop", sokf_moveable|sokf_type_player_limiter|spr_use_time(1), "package", "bobaggage", [
(ti_on_scene_prop_use,
[
]),
(ti_on_scene_prop_start_use,
[
(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":instance_id"),
(agent_get_player_id, ":player_no", ":agent_id"),
(player_is_active, ":player_no"),
(assign, ":living_companion_1", -1),
(assign, ":living_companion_2", -1),
#(assign, reg1, ":agent_id"),
#(assign, reg2, ":instance_id"),
#(display_message, "@prop use trigger item: {reg0} agent: {reg1} instance: {reg2} "),
(try_for_agents, ":agent_id"),
#(this_or_next|eq, ":living_companion_1", -1),
#(eq, ":living_companion_1", -1),
(agent_is_active, ":agent_id"),
(agent_is_alive, ":agent_id"),
(agent_is_human, ":agent_id"),
(agent_is_non_player, ":agent_id"),
(agent_get_team, ":team_id", ":agent_id"),
(eq, ":team_id", 0),
(agent_get_group, ":agent_group", ":agent_id"),
(eq, ":agent_group", ":player_no"),
(agent_get_troop_id, ":troop_id", ":agent_id"),
(this_or_next|player_slot_eq, ":player_no", slot_player_companion_ids_begin, ":troop_id"),
(player_slot_eq, ":player_no", slot_player_companion_ids_begin + 1, ":troop_id"),
(try_begin),
(eq, ":living_companion_1", -1),
(assign, ":living_companion_1", ":agent_id"),
(else_try),
(eq, ":living_companion_2", -1),
(assign, ":living_companion_2", ":agent_id"),
(try_end),
(try_end),
#(display_message, "@sending to player"),
#(assign, reg1, ":living_companion_1"),
#(assign, reg2, ":living_companion_2"),
#(display_message, "@living_companion_1: {reg1} living_companion_2: {reg2} "),
(assign, ":new_chest", 1),
(assign, ":empty_slot", -1),
(try_for_range, ":cur_slot", slot_player_coop_opened_chests_begin, slot_player_coop_opened_chests_end),
(eq, ":new_chest", 1),
(player_get_slot, ":cur_instance", ":player_no", ":cur_slot"),
(try_begin),
(eq, ":cur_instance", ":instance_id"),
(assign, ":new_chest", 0),
(try_end),
(try_end),
(try_for_range, ":cur_slot", slot_player_coop_opened_chests_begin, slot_player_coop_opened_chests_end),
(eq, ":new_chest", 1),
(player_get_slot, ":cur_instance", ":player_no", ":cur_slot"),
(try_begin),
(eq, ":cur_instance", 0),
(eq, ":empty_slot", -1),
(assign, ":empty_slot", ":cur_slot"),
(try_end),
(try_end),
(try_begin),
(eq, ":new_chest", 1),
(call_script, "script_coop_generate_item_drop", ":player_no"),
(neq, ":empty_slot", -1),
(player_set_slot, ":player_no", ":empty_slot", ":instance_id"),
(multiplayer_send_2_int_to_player, ":player_no", multiplayer_event_coop_chest_opened, ":empty_slot", ":instance_id"),
(try_end),
(assign, reg1, ":new_chest"),
#(display_message, "@new chest: {reg1}"),
(try_begin),
(eq, ":new_chest", 1),
(try_begin),
(neq, ":player_no", 0),
(multiplayer_send_3_int_to_player, ":player_no", multiplayer_event_coop_drop_item, "$g_ccoop_currently_dropping_item", ":living_companion_1", ":living_companion_2"),
#(display_message, "@script called"), #debug
(else_try),
(call_script, "script_coop_drop_item", "$g_ccoop_currently_dropping_item", ":living_companion_1", ":living_companion_2"),
#(assign, reg1, ":player_no"),
#(display_message, "@sending to player no: {reg1} "),
(try_end),
(try_end),
(assign, "$g_ccoop_currently_dropping_item", -1),
]),
]),
#INVASION MODE END
]
|
Ikaguia/LWBR-WarForge
|
module_scene_props.py
|
Python
|
unlicense
| 133,561
|
#!/usr/local/bin/python3.4
# -*- coding: utf-8 -*-
import threading
import time
import sys
import trace
from inspect import isgeneratorfunction
import format
class KillableThread(threading.Thread):
"""A subclass of threading.Thread, with a kill() method provided by courtsey of Connelly Barnes."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
class FunctionExecutor(KillableThread):
def __init__(self, _f: 'the function to execute', _callback, args, kwargs):
super().__init__()
self._f = _f
self._callback = _callback
self.args = args
self.kwargs = kwargs
def run(self):
ret = self._f(*self.args, **self.kwargs)
if ret is not None:
if repr(type(ret)) == '<class \'generator\'>':
for i in ret:
self._callback(i.format(color=format.color))
else: # TODO: make function to be only generators, not normal functions
print('DEPRECATED: function "', self._f.cmdname, '" is using the return statement', sep='')
self._callback(ret.format(color=format.color))
class ControlThread(threading.Thread):
def __init__(self, _f, _callback, *args, **kwargs):
super().__init__()
self.watched_thread = FunctionExecutor(_f, _callback, args, kwargs)
self._callback = _callback
def run(self):
self.watched_thread.start()
time.sleep(3)
if self.watched_thread.is_alive():
self.watched_thread.kill()
self._callback('timeout')
|
suut/psychic-happiness
|
async_core.py
|
Python
|
unlicense
| 2,381
|
from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/t6d9sg/the-daily-show-20038-highlights/be3cwo',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
})
return {
'_type': 'playlist',
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
|
rzhxeo/youtube-dl
|
youtube_dl/extractor/comedycentral.py
|
Python
|
unlicense
| 10,050
|
from whiffle import wikidotapi
from util import hook
@hook.command
def author(inp):
".author <Author Name> -- Will return details regarding the author"
if firstrefresh == 0:#make sure the cache actually exists
return "Cache has not yet updated, please wait a minute and search again."
api = wikidotapi.connection()
api.Site = "wanderers-library"
pages = api.refresh_pages()
authpages = []
totalrating = 0
pagetotal = 0
pagerating = 0
author = "None"
multimatch = []
authorpage = ""
for page in pages:
for item in pagecache: #these two for loops iterate through every item within each page dictionary, the proper syntax for accessing a specific item is item[page][itemname],
try:
if "entry" in item[page]["tags"]: #makes sure only articles are counted
if author == item[page]["created_by"]:
authpages.append(page)
pagetitle = item[page]["title"]
pagerating = item[page]["rating"]
totalrating = totalrating + pagerating
print page
pagetotal = pagetotal + 1
if inp.lower() in item[page]["created_by"].lower() and author == "None": #this just matches the author with the first author match
author = item[page]["created_by"]
authpages.append(page)
pagetitle = item[page]["title"]
pagerating = item[page]["rating"]
totalrating = totalrating + pagerating
print page
pagetotal = pagetotal + 1 #all lines above provide page data, math is pretty easy and self-explanatory
else:
if "author" in item[page]["tags"]:
if author == item[page]["created_by"]:
authorpage = "http://wanderers-library.wikidot.com/"+item[page]["fullname"] +" - "
except KeyError: #must do error handling for code to be valid, iterates through incorrect keys multiple times, do not print things in the except clause, slows down program immensely
pass
for page in pages: #this loop checks to see if multiple authors match input
for item in pagecache:
try:
if "entry" in item[page]["tags"]:
if inp.lower() in item[page]["created_by"].lower():
multimatch.append(item[page]["created_by"])
except KeyError:
pass
for authors in multimatch: #checks to see if multiple authors found
if authors != author:
return "There are "+ str(len(multimatch)) + " authors matching you query. Please be more specifc. "
avgrating = 0
if pagetotal is not 0: #just so no division by zero
avgrating = totalrating/pagetotal
if not authpages: #if no author pages are added
return "Author not found."
return "nonick::"+ authorpage+""+author +" has written " + str(pagetotal) + " pages. They have " + str(totalrating)+ " net upvotes with an average rating of " + str(avgrating) + ". Their most recent article is " + pagetitle + "(Rating:" + str(pagerating) + ")"#+"- http://wanderers-library.wikidot.com/" + authpages[-1].lower()
|
pixeltasim/IRCBot-Pixeltasim
|
plugins/author.py
|
Python
|
unlicense
| 2,861
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# =================================================================
# =================================================================
# NOTE: notify message MUST follow these rules:
#
# - Messages must be wrappered with _() for translation
#
# - Replacement variables must be wrappered with brackets
#
# - Replacement variables must be from the following list:'
# {instance_id}
# {instance_name}
# {host_name}
# {source_host_name}
# {target_host_name}
# {error}
from paxes_nova import _
PAUSE_SUCCESS = (_("Pause of virtual machine {instance_name} on host "
"{host_name} was successful."))
PAUSE_ERROR = (_("Pause of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
SUSPEND_SUCCESS = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} was successful."))
SUSPEND_ERROR = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESUME_SUCCESS = (_("Resume of virtual machine {instance_name} on host "
"{host_name} was successful."))
RESUME_ERROR = (_("Resume of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DEPLOY_SUCCESS = (_("Deploy of virtual machine {instance_name} on host "
"{host_name} was successful."))
DEPLOY_ERROR = (_("Deploy of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
START_SUCCESS = (_("Start of virtual machine {instance_name} on host "
"{host_name} was successful."))
START_ERROR = (_("Start of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
STOP_SUCCESS = (_("Stop of virtual machine {instance_name} on host "
"{host_name} was successful."))
STOP_ERROR = (_("Stop of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESTART_SUCCESS = (_("Restart of virtual machine {instance_name} on host "
"{host_name} was successful."))
RESTART_ERROR = (_("Restart of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
LPM_SUCCESS = (_("Migration of virtual machine {instance_name} from host "
"{source_host_name} to host {target_host_name} was "
"successful."))
LPM_ERROR = (_("Migration of virtual machine {instance_name} to host "
"{target_host_name} failed with exception: {error}"))
LPM_ERROR_DEST = (_("Migration of virtual machine {instance_name} to host "
"{host_name} failed with exception: {error}"))
DELETE_ERROR = (_("Delete of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DELETE_SUCCESS = (_("Delete of virtual machine {instance_name} on host "
"{host_name} was successful. "))
RESIZE_ERROR = (_("Resize of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESIZE_SUCCESS = (_("Resize of virtual machine {instance_name} on host "
"{host_name} was successful."))
CAPTURE_SUCCESS = (_("Capture of virtual machine {instance_name} on host "
"{host_name} was successful"))
CAPTURE_ERROR = (_("Capture of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
ATTACH_SUCCESS = (_("Volume {volume_id} was successfully attached to "
"virtual machine {instance_name}."))
ATTACH_ERROR = (_("Volume {volume_id} could not be attached to "
"virtual machine {instance_name}. Error message: {error}"))
DETACH_SUCCESS = (_("Volume {volume_id} was successfully detached from "
"virtual machine {instance_name}."))
DETACH_ERROR = (_("Volume {volume_id} could not be detached from "
"virtual machine {instance_name}. Error message: {error}"))
|
windskyer/k_nova
|
paxes_nova/compute/notify_messages.py
|
Python
|
apache-2.0
| 4,325
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def f(x):
y = g(x)
z = h(y)
return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
assert not pytest.main([__file__])
|
google/tangent
|
tests/test_optimization.py
|
Python
|
apache-2.0
| 2,734
|
# Copyright (C) 2020 OpenIO SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from swift.common.middleware.proxy_logging import ProxyLoggingMiddleware
from swift.common.swob import Request
from swift.common.utils import config_true_value, get_logger
def flat_dict_from_dict(dict_):
"""
Create a dictionary without depth.
{
'depth0': {
'depth1': {
'depth2': 'test1',
'depth2': 'test2'
}
}
}
=>
depth0.depth1.depth2:test1;depth0.depth1.depth2:test2
"""
flat_dict = dict()
for key, value in dict_.items():
if not isinstance(value, dict):
flat_dict[key] = value
continue
flat_dict_ = flat_dict_from_dict(value)
for key_, value_ in flat_dict_.items():
flat_dict[key + '.' + key_] = value_
return flat_dict
def perfdata_to_str(perfdata):
flat_perfdata = flat_dict_from_dict(perfdata)
perfdata_list = list()
perfdata_list.append('PERFDATA')
for key, value in sorted(flat_perfdata.items()):
if key.startswith('rawx.'):
if 'http' in key[5:]:
key = key[:key.index('http') + 4]
perfdata_list.append(key + ':' + '%.4f' % value)
return '...'.join(perfdata_list)
class OioProxyLoggingMiddleware(ProxyLoggingMiddleware):
"""
Keep the same behavior as ProxyLoggingMiddleware,
but add the values of 'perfdata' if it is enabled.
"""
def __init__(self, app, conf):
super(OioProxyLoggingMiddleware, self).__init__(app, conf)
self.logger = get_logger(conf)
self.perfdata = config_true_value(conf.get('oio_perfdata', 'false'))
self.perfdata_user_agents = None
if self.perfdata:
pattern_dict = {k: v for k, v in conf.items()
if k.startswith("oio_perfdata_user_agent")}
self.perfdata_user_agents = [re.compile(pattern_dict[k])
for k in sorted(pattern_dict.keys())]
if not self.perfdata_user_agents:
self.logger.warn('No user_agent pattern defined, '
'all clients will add perfdata.')
def log_request(self, req, *args, **kwargs):
oio_perfdata = req.environ.get('oio.perfdata')
if oio_perfdata is not None:
req.environ.setdefault('swift.log_info', []).append(
perfdata_to_str(oio_perfdata))
super(OioProxyLoggingMiddleware, self).log_request(
req, *args, **kwargs)
def __call__(self, env, start_response):
if self.perfdata:
add_perfata = False
if not self.perfdata_user_agents:
add_perfata = True
else:
req = Request(env)
if req.user_agent:
for pat in self.perfdata_user_agents:
if pat.match(req.user_agent):
add_perfata = True
break
if add_perfata:
env.setdefault('oio.perfdata', dict())
return super(OioProxyLoggingMiddleware, self).__call__(
env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def proxy_logger(app):
return OioProxyLoggingMiddleware(app, conf)
return proxy_logger
|
open-io/oio-swift
|
oioswift/common/middleware/proxy_logging.py
|
Python
|
apache-2.0
| 3,938
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
from oslo_utils import importutils
from midonet.neutron.db import task # noqa
from neutron.extensions import portbindings
from neutron.tests.unit import _test_extension_portbindings as test_bindings
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_ext_gw_mode as test_gw_mode
import neutron.tests.unit.test_extension_security_group as sg
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
from oslo_config import cfg
MIDOKURA_PKG_PATH = "midonet.neutron.plugin"
MIDOKURA_EXT_PATH = "midonet.neutron.extensions"
MIDONET_PLUGIN_NAME = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH)
class MidonetPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self,
plugin=MIDONET_PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
self.midoclient_mock = mock.MagicMock()
self.midoclient_mock.midonetclient.neutron.client.return_value = True
modules = {
'midonetclient': self.midoclient_mock,
'midonetclient.neutron': self.midoclient_mock.neutron,
'midonetclient.neutron.client': self.midoclient_mock.client,
}
self.module_patcher = mock.patch.dict('sys.modules', modules)
self.module_patcher.start()
# import midonetclient here because it needs proper mock objects to be
# assigned to this module first. 'midoclient_mock' object is the
# mock object used for this module.
from midonetclient.neutron.client import MidonetClient
client_class = MidonetClient
self.mock_class = client_class()
extensions_path = importutils.import_module(
MIDOKURA_EXT_PATH).__file__
cfg.CONF.set_override('api_extensions_path',
os.path.dirname(extensions_path))
super(MidonetPluginV2TestCase, self).setUp(plugin=plugin)
def tearDown(self):
super(MidonetPluginV2TestCase, self).tearDown()
self.module_patcher.stop()
class TestMidonetNetworksV2(MidonetPluginV2TestCase,
test_plugin.TestNetworksV2):
pass
class TestMidonetL3NatTestCase(MidonetPluginV2TestCase,
test_l3_plugin.L3NatDBIntTestCase):
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(MIDONET_PLUGIN_NAME)
class TestMidonetSecurityGroup(MidonetPluginV2TestCase,
sg.TestSecurityGroups):
pass
class TestMidonetSubnetsV2(MidonetPluginV2TestCase,
test_plugin.TestSubnetsV2):
pass
class TestMidonetPortsV2(MidonetPluginV2TestCase,
test_plugin.TestPortsV2):
def test_vif_port_binding(self):
with self.port(name='myname') as port:
self.assertEqual('midonet', port['port']['binding:vif_type'])
self.assertTrue(port['port']['admin_state_up'])
class TestMidonetPluginPortBinding(MidonetPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_MIDONET
HAS_PORT_FILTER = True
class TestExtGwMode(MidonetPluginV2TestCase,
test_gw_mode.ExtGwModeIntTestCase):
pass
|
midonet/python-neutron-plugin-midonet
|
midonet/neutron/tests/unit/test_midonet_plugin.py
|
Python
|
apache-2.0
| 4,032
|
import logging
from flask import abort
from flask import Blueprint
from flask import current_app
from flask import flash
from flask import redirect
from flask import session
from flask import url_for
from testrail_reporting.auth.models import AuthUser
from testrail_reporting.auth.oauth import get_google
log = logging.getLogger(__name__)
auth = Blueprint('auth', __name__)
@auth.route('/login')
def login():
callback = url_for('auth.authorized', _external=True)
return get_google().authorize(callback=callback)
@auth.route('/authorized')
def authorized():
resp = get_google().authorized_response()
if resp is None:
abort(401)
google_token = resp['access_token']
session['google_token'] = (google_token, '')
user_info = get_google().get('userinfo').data
domain = user_info.get('hd', None)
if domain != current_app.config['GOOGLE_APP_DOMAIN']:
flash('Domain is not allowed')
return redirect(url_for('pages.index'))
user_info.update({'google_token': google_token})
AuthUser.objects(email=user_info["email"]).update_one(upsert=True,
**user_info)
return redirect(url_for('pages.index'))
@auth.route('/logout')
def logout():
session.pop('google_token', None)
return redirect(url_for('pages.login'))
|
romansalin/testrail-reporting
|
testrail_reporting/auth/views.py
|
Python
|
apache-2.0
| 1,344
|
import dask
from .scheduler import ray_dask_get, ray_dask_get_sync
from .callbacks import (
RayDaskCallback,
local_ray_callbacks,
unpack_ray_callbacks,
)
from .optimizations import dataframe_optimize
dask_persist = dask.persist
def ray_dask_persist(*args, **kwargs):
kwargs["ray_persist"] = True
return dask_persist(*args, **kwargs)
ray_dask_persist.__doc__ = dask_persist.__doc__
dask_persist_mixin = dask.base.DaskMethodsMixin.persist
def ray_dask_persist_mixin(self, **kwargs):
kwargs["ray_persist"] = True
return dask_persist_mixin(self, **kwargs)
ray_dask_persist_mixin.__doc__ = dask_persist_mixin.__doc__
# We patch dask in order to inject a kwarg into its `dask.persist()` calls,
# which the Dask-on-Ray scheduler needs.
# FIXME(Clark): Monkey patching is bad and we should try to avoid this.
def patch_dask(ray_dask_persist, ray_dask_persist_mixin):
dask.persist = ray_dask_persist
dask.base.DaskMethodsMixin.persist = ray_dask_persist_mixin
patch_dask(ray_dask_persist, ray_dask_persist_mixin)
__all__ = [
# Schedulers
"ray_dask_get",
"ray_dask_get_sync",
# Helpers
"ray_dask_persist",
# Callbacks
"RayDaskCallback",
"local_ray_callbacks",
"unpack_ray_callbacks",
# Optimizations
"dataframe_optimize",
]
|
pcmoritz/ray-1
|
python/ray/util/dask/__init__.py
|
Python
|
apache-2.0
| 1,309
|
import os
import requests
import socket
import subprocess
import unittest
import json
import ray
from ray.rllib import _register_all
from ray.tune.trial import Trial, Resources
from ray.tune.web_server import TuneClient
from ray.tune.trial_runner import TrialRunner
def get_valid_port():
port = 4321
while True:
try:
print("Trying port", port)
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
return port
class TuneServerSuite(unittest.TestCase):
def basicSetup(self):
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
ray.init(num_cpus=4, num_gpus=1)
port = get_valid_port()
self.runner = TrialRunner(server_port=port)
runner = self.runner
kwargs = {
"stopping_criterion": {
"training_iteration": 3
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
client = TuneClient("localhost", port)
return runner, client
def tearDown(self):
print("Tearing down....")
try:
self.runner._server.shutdown()
self.runner = None
except Exception as e:
print(e)
ray.shutdown()
_register_all()
def testAddTrial(self):
runner, client = self.basicSetup()
for i in range(3):
runner.step()
spec = {
"run": "__fake",
"stop": {
"training_iteration": 3
},
"resources_per_trial": {
"cpu": 1,
"gpu": 1
},
}
client.add_trial("test", spec)
runner.step()
all_trials = client.get_all_trials()["trials"]
runner.step()
self.assertEqual(len(all_trials), 3)
def testGetTrials(self):
runner, client = self.basicSetup()
for i in range(3):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(len(all_trials), 2)
tid = all_trials[0]["id"]
client.get_trial(tid)
runner.step()
self.assertEqual(len(all_trials), 2)
def testGetTrialsWithFunction(self):
runner, client = self.basicSetup()
test_trial = Trial(
"__fake",
trial_id="function_trial",
stopping_criterion={"training_iteration": 3},
config={"callbacks": {
"on_episode_start": lambda x: None
}})
runner.add_trial(test_trial)
for i in range(3):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(len(all_trials), 3)
client.get_trial("function_trial")
runner.step()
self.assertEqual(len(all_trials), 3)
def testStopTrial(self):
"""Check if Stop Trial works."""
runner, client = self.basicSetup()
for i in range(2):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 1)
tid = [t for t in all_trials if t["status"] == Trial.RUNNING][0]["id"]
client.stop_trial(tid)
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 0)
def testStopExperiment(self):
"""Check if stop_experiment works."""
runner, client = self.basicSetup()
for i in range(2):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 1)
client.stop_experiment()
runner.step()
self.assertTrue(runner.is_finished())
self.assertRaises(
requests.exceptions.ReadTimeout,
lambda: client.get_all_trials(timeout=1))
def testCurlCommand(self):
"""Check if Stop Trial works."""
runner, client = self.basicSetup()
for i in range(2):
runner.step()
stdout = subprocess.check_output(
"curl \"http://{}:{}/trials\"".format(client.server_address,
client.server_port),
shell=True)
self.assertNotEqual(stdout, None)
curl_trials = json.loads(stdout.decode())["trials"]
client_trials = client.get_all_trials()["trials"]
for curl_trial, client_trial in zip(curl_trials, client_trials):
self.assertEqual(curl_trial.keys(), client_trial.keys())
self.assertEqual(curl_trial["id"], client_trial["id"])
self.assertEqual(curl_trial["trainable_name"],
client_trial["trainable_name"])
self.assertEqual(curl_trial["status"], client_trial["status"])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
pcmoritz/ray-1
|
python/ray/tune/tests/test_tune_server.py
|
Python
|
apache-2.0
| 5,470
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.operators import special_values
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
LIMIT_PYTHON_ITERATIONS = True
PYTHON_MAX_ITERATIONS = 100000000 # Fails in about one minute for empty loops.
WARN_INEFFICIENT_UNROLL = True
INEFFICIENT_UNROLL_MIN_ITERATIONS = 3000
INEFFICIENT_UNROLL_MIN_OPS = 1
def for_stmt(iter_, extra_test, body, init_state):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the iterate as well as the
variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
The state is represented by the variables geo_mean and arith_mean. The
argument for initial_state may contain the tuple (1, 0), the body will
include the arguments geo_mean and arith_mean and will return a tuple
representing the new values for geo_mean and respectively arith_mean.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additional loop condition.
body: Callable with the iterate and the state as arguments, and
state as return type. The actual loop body.
init_state: Tuple containing the initial state.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
elif isinstance(iter_, dataset_ops.DatasetV2):
# Check for undefined symbols and report an error. This prevents the error
# from propagating into the TF runtime. We have more information here and
# can provide a clearer error message.
undefined = tuple(filter(special_values.is_undefined, init_state))
if undefined:
raise ValueError(
'TensorFlow requires that the following symbols must be defined'
' before the loop: {}'.format(
tuple(s.symbol_name for s in undefined)))
return _dataset_for_stmt(iter_, extra_test, body, init_state)
else:
return _py_for_stmt(iter_, extra_test, body, init_state)
def _py_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that executes a Python for loop."""
state = init_state
for target in iter_:
if extra_test is not None and not extra_test(*state):
break
state = body(target, *state)
return state
def _known_len_tf_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that iterates over objects that admit a length."""
n = py_builtins.len_(iter_)
def while_body(iterate_index, *state):
iterate = iter_[iterate_index]
new_state = body(iterate, *state)
state = (iterate_index + 1,)
if new_state:
state += new_state
return state
def while_cond(iterate_index, *state):
if extra_test is not None:
return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
return iterate_index < n
results = _tf_while_stmt(
while_cond,
while_body,
init_state=(0,) + init_state,
opts=dict(maximum_iterations=n))
# Dropping the iteration index because it's not syntactically visible.
# TODO(mdan): Don't.
if isinstance(results, (tuple, list)):
assert len(results) >= 1 # Has at least the iterate.
if len(results) > 1:
results = results[1:]
else:
results = ()
return results
def _dataset_for_stmt(ds, extra_test, body, init_state):
"""Overload of for_stmt that iterates over TF Datasets."""
if extra_test is not None:
raise NotImplementedError(
'break and return statements are not yet supported in '
'for/Dataset loops.')
def reduce_body(state, iterate):
new_state = body(iterate, *state)
return new_state
if init_state:
return ds.reduce(init_state, reduce_body)
# Workaround for Datset.reduce not allowing empty state tensors - create
# a dummy state variable that remains unused.
def reduce_body_with_dummy_state(state, iterate):
reduce_body((), iterate)
return state
ds.reduce((constant_op.constant(0),), reduce_body_with_dummy_state)
return ()
def while_stmt(test, body, init_state, opts=None):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type.
The loop condition.
body: Callable with the state as arguments, and state as return type.
The actual loop body.
init_state: Tuple containing the initial state.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# Evaluate the initial test once in order to do the dispatch. The evaluation
# is isolated to minimize unwanted side effects.
# TODO(mdan): Do a full iteration - some state types might lower to Tensor.
with func_graph.FuncGraph('tmp').as_default():
init_test = test(*init_state)
# TensorFlow: Multiple evaluations are acceptable in this case, so we're fine
# with the re-evaluation of `test` that `_tf_while_stmt` will make.
if tensor_util.is_tensor(init_test):
return _tf_while_stmt(test, body, init_state, opts)
# Normal Python: We already consumed one evaluation of `test`; consistently,
# unroll one iteration before dispatching to a normal loop.
# TODO(mdan): Push the "init_test" value via opts into _py_while_stmt?
if not init_test:
return init_state
init_state = body(*init_state)
return _py_while_stmt(test, body, init_state, opts)
def _tf_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
if opts is None:
opts = {}
undefined = tuple(filter(special_values.is_undefined, init_state))
if undefined:
raise ValueError(
'TensorFlow requires that the following symbols must be initialized '
'to a Tensor, Variable or TensorArray before the loop: {}'.format(
tuple(s.symbol_name for s in undefined)))
# Non-v2 while_loop unpacks the results when there is only one return value.
# This enforces consistency across versions.
opts['return_same_structure'] = True
retval = control_flow_ops.while_loop(test, body, init_state, **opts)
return retval
class _PythonLoopChecker(object):
"""Verifies Python loops for TF-specific limits."""
def __init__(self):
self.iterations = 0
self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL
# Triggered when we decided to test the op counts.
self.check_op_count_after_iteration = False
def _get_ops(self):
return ops.get_default_graph().get_operations()
def _check_unroll_limits(self):
if LIMIT_PYTHON_ITERATIONS and self.iterations > PYTHON_MAX_ITERATIONS:
raise errors.ExecutionError('Python', 'iteration limit exceeded')
def _stop_checking_inefficient_unroll(self):
self.check_inefficient_unroll = False
self.ops_before_iteration = None
def _verify_ineffcient_unroll(self):
"""Checks for possibly-inefficient creation of ops in a Python loop."""
assert self.ops_before_iteration is not None
ops_after_iteration = self._get_ops()
new_ops = tuple(
op for op in ops_after_iteration if op not in self.ops_before_iteration)
if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS:
return False
# TODO(mdan): Add location information.
ag_logging.warn(
'TensorFlow ops are being created in a Python loop with large number'
' of iterations. This can lead to slow startup. Did you mean to use a'
' TensorFlow loop? For example, `while True:` is a Python loop, and'
' `while tf.constant(True):` is a TensorFlow loop. The following'
' ops were created after iteration %s: %s', self.iterations, new_ops)
return True
def before_iteration(self):
"""Called before each iteration in a Python loop."""
if (self.check_inefficient_unroll and
self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS):
self.ops_before_iteration = self._get_ops()
self.check_op_count_after_iteration = True
def after_iteration(self):
"""Called after each iteration in a Python loop."""
self.iterations += 1
self._check_unroll_limits()
if self.check_inefficient_unroll and self.check_op_count_after_iteration:
did_warn = self._verify_ineffcient_unroll()
if did_warn:
self._stop_checking_inefficient_unroll() # Only warn once.
elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3:
# Once deciding to check the op counts, only do it for a few iterations.
self._stop_checking_inefficient_unroll()
def _py_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts
if __debug__:
checker = _PythonLoopChecker()
state = init_state
while test(*state):
if __debug__:
checker.before_iteration()
state = body(*state)
if __debug__:
checker.after_iteration()
return state
def if_stmt(cond, body, orelse, get_state, set_state):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch
as return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
get_state: Function that returns a tuple containing the values of all
composite symbols modified within the conditional. This allows access to
state that branches may mutate through side effects. This function is
not needed and should not be called when dispatching to code matching
Python's default semantics. This is useful for checkpointing to avoid
unintended side-effects when staging requires evaluating all code-paths.
set_state: Function to set the values of all composite symbols modified
within the conditional. This is the complement to get_state, used to
restore checkpointed values. The single argument a tuple containing
values for each composite symbol that may be modified in a branch of the
conditional. The is usually the result of a call to get_state.
Returns:
Tuple containing the statement outputs.
"""
if tensor_util.is_tensor(cond):
return tf_if_stmt(cond, body, orelse, get_state, set_state)
else:
return _py_if_stmt(cond, body, orelse)
def tf_if_stmt(cond, body, orelse, get_state, set_state):
"""Overload of if_stmt that stages a TF cond."""
body = _disallow_undefs(body, branch_name='if')
orelse = _disallow_undefs(orelse, branch_name='else')
body = _isolate_state(body, get_state, set_state)
orelse = _isolate_state(orelse, get_state, set_state)
# `state` currently includes the values of any composite symbols (e.g. `a.b`)
# composites modified by the loop. `outputs` includes the values of basic
# symbols (e.g. `a`) which cannot be passed by reference and must be returned.
# See _isolate_state.
# TODO(mdan): We should minimize calls to get/set_state.
outputs, final_state = control_flow_ops.cond(cond, body, orelse)
set_state(final_state)
return outputs
def _isolate_state(func, get_state, set_state):
"""Wraps func to (best-effort) isolate state mutations that func may do.
The simplest example of state mutation is mutation of variables (via e.g.
attributes), or modification of globals.
This allows us to more safely execute this function without worrying about
side effects when the function wasn't normally expected to execute. For
example, staging requires that the function is executed ahead of time, and
we need to ensure its effects are not observed during normal execution.
Args:
func: () -> Any
get_state: () -> Any, returns the current state
set_state: (Any) -> None, resets the state to the specified values.
Typically the result of an earlier call to `get_state`.
Returns:
Tuple[Any, Any], where the first element is the return value of `func`,
and the second is the final state values.
"""
def wrapper():
init_state = get_state()
outputs = func()
# TODO(mdan): These should be copies, lest set_state might affect them.
final_state = get_state()
set_state(init_state)
return outputs, final_state
return wrapper
def _disallow_undefs(func, branch_name):
"""Wraps function to raise useful error when it returns undefined symbols."""
def wrapper():
"""Calls function and raises an error if undefined symbols are returned."""
results = func()
if isinstance(results, tuple):
results_tuple = results
else:
results_tuple = results,
undefined = tuple(filter(special_values.is_undefined, results_tuple))
if undefined:
raise ValueError(
'The following symbols must also be initialized in the {} branch: {}.'
' Alternatively, you may initialize them before the if'
' statement.'.format(branch_name,
tuple(s.symbol_name for s in undefined)))
return results
return wrapper
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
|
kevin-coder/tensorflow-fork
|
tensorflow/python/autograph/operators/control_flow.py
|
Python
|
apache-2.0
| 14,926
|
import platform
class OSCollector(object):
def __init__(self, docker_client=None):
self.docker_client = docker_client
def key_name(self):
return "osInfo"
def _zip_fields_values(self, keys, values):
data = {}
for key, value in zip(keys, values):
if len(value) > 0:
data[key] = value
else:
data[key] = None
return data
def _get_docker_version(self):
data = {}
if platform.system() == 'Linux':
version = "Unknown"
if self.docker_client:
ver_resp = self.docker_client.version()
version = "Docker version {0}, build {1}".format(
ver_resp.get("Version", "Unknown"),
ver_resp.get("GitCommit", "Unknown"))
data['dockerVersion'] = version
return data
def _get_os(self):
data = {}
if platform.system() == 'Linux':
info = platform.linux_distribution()
keys = ["distribution", "version", "versionDescription"]
data = self._zip_fields_values(keys, info)
data['kernelVersion'] = \
platform.release() if len(platform.release()) > 0 else None
return data
def get_data(self):
data = self._get_os()
data.update(self._get_docker_version())
return data
|
dx9/python-agent
|
cattle/plugins/host_info/os_c.py
|
Python
|
apache-2.0
| 1,411
|
import datetime
import sys
from django.utils.timezone import utc, now
from django.core.management.base import BaseCommand
from feeds.models import Feed, Meta
from feeds.exceptions import FeedException
class Command(BaseCommand):
help = 'Fetches the items from all stored feeds.'
def handle(self, *args, **options):
for feed in Feed.objects.all():
print('updating', feed)
try:
feed.fetchItems()
except FeedException as e:
print('Error: %s' % e.message, file=sys.stderr)
# clean up everything older than 60 days but keep at least 30 Items
keep = feed.items.all()[:30].values_list("id", flat=True)
feed.items.filter(published__lt=(now() - datetime.timedelta(days=60))).exclude(pk__in=list(keep)).delete()
# if everything went well set the updated field
meta = Meta.load()
meta.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
meta.save()
|
jochenklar/reader2
|
feeds/management/commands/fetch.py
|
Python
|
apache-2.0
| 1,002
|
import os
import threading
from cachetools import LRUCache
from customUtilities.logger import logger
class Cache():
def __init__(self,cachefilename,CACHE_SIZE,logger=logger('detoursCache.log')):
self.lock = threading.RLock()
self.cachefilename = cachefilename
self.entry = LRUCache(maxsize=CACHE_SIZE)
self.logger=logger
self.hitcount=0
def hit(self):
self.lock.acquire(blocking=1)
try:
self.hitcount+=1
finally:
self.lock.release()
def reset(self):
self.lock.acquire(blocking=1)
try:
self.hitcount=0
finally:
self.lock.release()
def push(self,key,val):
self.lock.acquire(blocking=1)
try:
self.entry[key]=val
except:
return
finally:
self.lock.release()
def get(self,key):
self.lock.acquire(blocking=1)
try:
return self.entry[key]
except:
return False
finally:
self.lock.release()
def write_to_disk(self):
self.lock.acquire(blocking=1)
try:
cachefile = open(self.cachefilename,'w')
for key,val in self.entry.items():
print(key+'\t'+val,file=cachefile)
cachefile.close()
finally:
self.lock.release()
def load_from_disk(self):
self.lock.acquire(blocking=1)
try:
if os.path.exists(self.cachefilename):
with open(self.cachefilename, 'r') as f:
for line in f:
if line == "":
continue
rline = line.strip()
splitvals=rline.split('\t')
if len(splitvals) == 2:
key=splitvals[0]
valstr=splitvals[1]
self.entry[key]=valstr
else:
continue
except:
self.logger.error("Failed to read existing cache file")
raise("Error in loading previous cache file")
finally:
self.lock.release()
|
akshah/netra
|
Cache/detoursCache.py
|
Python
|
apache-2.0
| 2,309
|
#!/usr/bin/env python
"""
This script constructs an Marathon application definition for the stress tester container.
Be sure to deploy the latest stress tester docker image to the registry before running this.
"""
import argparse
import json
import os
import sys
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
PROJECT_PATH = os.path.dirname(BASE_PATH)
sys.path.append(os.path.join(PROJECT_PATH, 'lib/'))
from marathon_autoscaler.marathon import Marathon
def load_app_definition():
with open(os.path.join(os.getcwd(), "data", "stress_tester_app.json"), 'r') as f:
test_app_definition = json.load(f)
return test_app_definition
def load_stress_parameters():
with open(os.path.join(os.getcwd(), "data", "stress-parameters.json"), 'r') as f:
test_app_definition = json.load(f)
return test_app_definition
def load_autoscaler_parameters():
with open(os.path.join(os.getcwd(), "data", "autoscaler-parameters.json"), 'r') as f:
test_app_definition = json.load(f)
return test_app_definition
def parse_cli_args():
parser = argparse.ArgumentParser(description="Stress Tester Deployer")
parser.add_argument("--marathon-uri", dest="marathon_uri", type=str,
required=True, help="The Marathon Endpoint")
parser.add_argument("--marathon-user", dest="marathon_user", type=str,
required=True, help="Username for Marathon access")
parser.add_argument("--marathon-pass", dest="marathon_pass", type=str,
required=True, help="Password for Marathon access")
return parser.parse_args()
if __name__ == "__main__":
args = parse_cli_args()
app_def = load_app_definition()
mara = Marathon(args.marathon_uri, (args.marathon_user, args.marathon_pass))
stress_params = load_stress_parameters()
autoscaler_params = load_autoscaler_parameters()
print("""
Stress Parameters:
{0}
""".format(stress_params))
print("""
Scaling Parameters:
{0}
""".format(autoscaler_params))
app_def["labels"]["use_marathon_autoscaler"] = "0.0.3"
app_def["labels"]["min_instances"] = str(autoscaler_params["min_instances"])
app_def["labels"]["max_instances"] = str(autoscaler_params["max_instances"])
app_def["labels"]["mas_rule_scaleup_1"] = "cpu | >90 | PT2M | 1 | PT2M"
app_def["labels"]["mas_rule_scaleup_2"] = "mem | >90 | PT2M | 1 | PT2M"
app_def["labels"]["mas_rule_scaledown"] = "cpu | <90 | PT2M | -1 | PT2M"
app_def["env"]["INSTRUCTIONS"] = json.dumps(stress_params).replace("\n", "").replace(" ", "")
response = mara.create_app(app_def)
print(response)
|
tendrilinc/marathon-autoscaler
|
scripts/test_autoscaler.py
|
Python
|
apache-2.0
| 2,665
|
# Copyright (c) 2015 Alex Meade
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Navneet Singh
# Copyright (c) 2015 Yogesh Kshirsagar
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import socket
import time
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils as cinder_utils
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(na_opts.netapp_basicauth_opts)
CONF.register_opts(na_opts.netapp_connection_opts)
CONF.register_opts(na_opts.netapp_eseries_opts)
CONF.register_opts(na_opts.netapp_transport_opts)
class NetAppESeriesLibrary(object):
"""Executes commands relating to Volumes."""
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips',
'netapp_login', 'netapp_password',
'netapp_storage_pools']
SLEEP_SECS = 5
MAX_LUNS_PER_HOST = 255
HOST_TYPES = {'aix': 'AIX MPIO',
'avt': 'AVT_4M',
'factoryDefault': 'FactoryDefault',
'hpux': 'HP-UX TPGS',
'linux_atto': 'LnxTPGSALUA',
'linux_dm_mp': 'LnxALUA',
'linux_mpp_rdac': 'Linux',
'linux_pathmanager': 'LnxTPGSALUA_PM',
'macos': 'MacTPGSALUA',
'ontap': 'ONTAP',
'svc': 'SVC',
'solaris_v11': 'SolTPGSALUA',
'solaris_v10': 'Solaris',
'vmware': 'VmwTPGSALUA',
'windows':
'Windows 2000/Server 2003/Server 2008 Non-Clustered',
'windows_atto': 'WinTPGSALUA',
'windows_clustered':
'Windows 2000/Server 2003/Server 2008 Clustered'
}
# NOTE(ameade): This maps what is reported by the e-series api to a
# consistent set of values that are reported by all NetApp drivers
# to the cinder scheduler.
SSC_DISK_TYPE_MAPPING = {
'scsi': 'SCSI',
'fibre': 'FCAL',
'sas': 'SAS',
'sata': 'SATA',
}
SSC_UPDATE_INTERVAL = 60 # seconds
WORLDWIDENAME = 'worldWideName'
def __init__(self, driver_name, driver_protocol="iSCSI",
configuration=None, **kwargs):
self.configuration = configuration
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(
na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_eseries_opts)
self.lookup_service = fczm_utils.create_lookup_service()
self._backend_name = self.configuration.safe_get(
"volume_backend_name") or "NetApp_ESeries"
self.driver_name = driver_name
self.driver_protocol = driver_protocol
self._stats = {}
self._ssc_stats = {}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
port = self.configuration.netapp_server_port
scheme = self.configuration.netapp_transport_type.lower()
if port is None:
if scheme == 'http':
port = 8080
elif scheme == 'https':
port = 8443
self._client = client.RestClient(
scheme=scheme,
host=self.configuration.netapp_server_hostname,
port=port,
service_path=self.configuration.netapp_webservice_path,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password)
self._check_mode_get_or_register_storage_system()
def _start_periodic_tasks(self):
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc_info)
ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL)
def check_for_setup_error(self):
self._check_host_type()
self._check_multipath()
self._check_storage_system()
self._start_periodic_tasks()
def _check_host_type(self):
self.host_type =\
self.HOST_TYPES.get(self.configuration.netapp_eseries_host_type,
None)
if not self.host_type:
raise exception.NetAppDriverException(
_('Configured host type is not supported.'))
def _check_multipath(self):
if not self.configuration.use_multipath_for_image_xfer:
msg = _LW('Production use of "%(backend)s" backend requires the '
'Cinder controller to have multipathing properly set up '
'and the configuration option "%(mpflag)s" to be set to '
'"True".') % {'backend': self._backend_name,
'mpflag': 'use_multipath_for_image_xfer'}
LOG.warning(msg)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
def _resolve_host(host):
try:
ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
% {'host': host, 'e': e})
raise exception.NoValidHost(
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
% {'host': host, 'e': e})
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
ips = [x for x in ips if _resolve_host(x)]
host = na_utils.resolve_hostname(
self.configuration.netapp_server_hostname)
if host in ips:
LOG.info(_LI('Embedded mode detected.'))
system = self._client.list_storage_systems()[0]
else:
LOG.info(_LI('Proxy mode detected.'))
system = self._client.register_storage_system(
ips, password=self.configuration.netapp_sa_password)
self._client.set_system_id(system.get('id'))
def _check_storage_system(self):
"""Checks whether system is registered and has good status."""
try:
system = self._client.list_storage_system()
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
msg = _LI("System with controller addresses [%s] is not"
" registered with web service.")
LOG.info(msg % self.configuration.netapp_controller_ips)
password_not_in_sync = False
if system.get('status', '').lower() == 'passwordoutofsync':
password_not_in_sync = True
new_pwd = self.configuration.netapp_sa_password
self._client.update_stored_system_password(new_pwd)
time.sleep(self.SLEEP_SECS)
sa_comm_timeout = 60
comm_time = 0
while True:
system = self._client.list_storage_system()
status = system.get('status', '').lower()
# wait if array not contacted or
# password was not in sync previously.
if ((status == 'nevercontacted') or
(password_not_in_sync and status == 'passwordoutofsync')):
LOG.info(_LI('Waiting for web service array communication.'))
time.sleep(self.SLEEP_SECS)
comm_time = comm_time + self.SLEEP_SECS
if comm_time >= sa_comm_timeout:
msg = _("Failure in communication between web service and"
" array. Waited %s seconds. Verify array"
" configuration parameters.")
raise exception.NetAppDriverException(msg %
sa_comm_timeout)
else:
break
msg_dict = {'id': system.get('id'), 'status': status}
if (status == 'passwordoutofsync' or status == 'notsupported' or
status == 'offline'):
msg = _("System %(id)s found with bad status - %(status)s.")
raise exception.NetAppDriverException(msg % msg_dict)
LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
return True
def _get_volume(self, uid):
label = utils.convert_uuid_to_es_fmt(uid)
return self._get_volume_with_label_wwn(label)
def _get_volume_with_label_wwn(self, label=None, wwn=None):
"""Searches volume with label or wwn or both."""
if not (label or wwn):
raise exception.InvalidInput(_('Either volume label or wwn'
' is required as input.'))
wwn = wwn.replace(':', '').upper() if wwn else None
eseries_volume = None
for vol in self._client.list_volumes():
if label and vol.get('label') != label:
continue
if wwn and vol.get(self.WORLDWIDENAME).upper() != wwn:
continue
eseries_volume = vol
break
if not eseries_volume:
raise KeyError()
return eseries_volume
def _get_snapshot_group_for_snapshot(self, snapshot_id):
label = utils.convert_uuid_to_es_fmt(snapshot_id)
for group in self._client.list_snapshot_groups():
if group['label'] == label:
return group
msg = _("Specified snapshot group with label %s could not be found.")
raise exception.NotFound(msg % label)
def _get_latest_image_in_snapshot_group(self, snapshot_id):
group = self._get_snapshot_group_for_snapshot(snapshot_id)
images = self._client.list_snapshot_images()
if images:
filtered_images = filter(lambda img: (img['pitGroupRef'] ==
group['pitGroupRef']),
images)
sorted_imgs = sorted(filtered_images, key=lambda x: x[
'pitTimestamp'])
return sorted_imgs[0]
msg = _("No snapshot image found in snapshot group %s.")
raise exception.NotFound(msg % group['label'])
def _is_volume_containing_snaps(self, label):
"""Checks if volume contains snapshot groups."""
vol_id = utils.convert_es_fmt_to_uuid(label)
for snap in self._client.list_snapshot_groups():
if snap['baseVolume'] == vol_id:
return True
return False
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
eseries_volume = self._get_volume(volume['name_id'])
storage_pool = self._client.get_storage_pool(
eseries_volume['volumeGroupRef'])
if storage_pool:
return storage_pool.get('label')
def create_volume(self, volume):
"""Creates a volume."""
LOG.debug('create_volume on %s' % volume['host'])
# get E-series pool label as pool name
eseries_pool_label = volume_utils.extract_host(volume['host'],
level='pool')
if eseries_pool_label is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id'])
# get size of the requested volume creation
size_gb = int(volume['size'])
self._create_volume(eseries_pool_label,
eseries_volume_label,
size_gb)
def _create_volume(self, eseries_pool_label, eseries_volume_label,
size_gb):
"""Creates volume with given label and size."""
target_pool = None
pools = self._get_storage_pools()
for pool in pools:
if pool["label"] == eseries_pool_label:
target_pool = pool
break
if not target_pool:
msg = _("Pools %s does not exist")
raise exception.NetAppDriverException(msg % eseries_pool_label)
try:
vol = self._client.create_volume(target_pool['volumeGroupRef'],
eseries_volume_label, size_gb)
LOG.info(_LI("Created volume with "
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating volume. Msg - %s."),
six.text_type(e))
return vol
def _schedule_and_create_volume(self, label, size_gb):
"""Creates volume with given label and size."""
avl_pools = self._get_sorted_available_storage_pools(size_gb)
for pool in avl_pools:
try:
vol = self._client.create_volume(pool['volumeGroupRef'],
label, size_gb)
LOG.info(_LI("Created volume with label %s."), label)
return vol
except exception.NetAppDriverException as e:
LOG.error(_LE("Error creating volume. Msg - %s."), e)
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
label = utils.convert_uuid_to_es_fmt(volume['id'])
size = volume['size']
dst_vol = self._schedule_and_create_volume(label, size)
try:
src_vol = None
src_vol = self._create_snapshot_volume(snapshot['id'])
self._copy_volume_high_prior_readonly(src_vol, dst_vol)
LOG.info(_LI("Created volume with label %s."), label)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
self._client.delete_volume(dst_vol['volumeRef'])
finally:
if src_vol:
try:
self._client.delete_snapshot_volume(src_vol['id'])
except exception.NetAppDriverException as e:
LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
else:
LOG.warning(_LW("Snapshot volume not found."))
def _create_snapshot_volume(self, snapshot_id):
"""Creates snapshot volume for given group with snapshot_id."""
group = self._get_snapshot_group_for_snapshot(snapshot_id)
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_latest_image_in_snapshot_group(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
capacity = int(image['pitCapacity']) / units.Gi
storage_pools = self._get_sorted_available_storage_pools(capacity)
s_id = storage_pools[0]['volumeGroupRef']
return self._client.create_snapshot_volume(image['pitRef'], label,
group['baseVolume'], s_id)
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
% {'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
job = self._client.create_volume_copy_job(src_vol['id'],
dst_vol['volumeRef'])
while True:
j_st = self._client.list_vol_copy_job(job['volcopyRef'])
if (j_st['status'] == 'inProgress' or j_st['status'] ==
'pending' or j_st['status'] == 'unknown'):
time.sleep(self.SLEEP_SECS)
continue
if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
msg = _("Vol copy job for dest %s failed.")\
% dst_vol['label']
raise exception.NetAppDriverException(msg)
LOG.info(_LI("Vol copy job completed for dest %s.")
% dst_vol['label'])
break
finally:
if job:
try:
self._client.delete_vol_copy_job(job['volcopyRef'])
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting "
"job %s."), job['volcopyRef'])
else:
LOG.warning(_LW('Volume copy job for src vol %s not found.'),
src_vol['id'])
LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
snapshot = {'id': uuid.uuid4(), 'volume_id': src_vref['id'],
'volume': src_vref}
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
self.delete_snapshot(snapshot)
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting temp snapshot %s."),
snapshot['id'])
def delete_volume(self, volume):
"""Deletes a volume."""
try:
vol = self._get_volume(volume['name_id'])
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException:
LOG.warning(_LI("Volume %s already deleted."), volume['id'])
return
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snap_grp, snap_image = None, None
snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
os_vol = snapshot['volume']
vol = self._get_volume(os_vol['name_id'])
vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_available_storage_pools(vol_size_gb)
try:
snap_grp = self._client.create_snapshot_group(
snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
snap_image = self._client.create_snapshot_image(
snap_grp['pitGroupRef'])
LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
self.delete_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
snap_grp = self._get_snapshot_group_for_snapshot(snapshot['id'])
except exception.NotFound:
LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def initialize_connection_iscsi(self, volume, connector):
"""Allow connection to connector and return connection info."""
initiator_name = connector['initiator']
eseries_vol = self._get_volume(volume['name_id'])
mapping = self._map_volume_to_host(eseries_vol, [initiator_name])
lun_id = mapping['lun']
msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
iscsi_details = self._get_iscsi_service_details()
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
iscsi_details)
msg = _("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.")
LOG.debug(msg % msg_fmt)
iqn = iscsi_portal['iqn']
address = iscsi_portal['ip']
port = iscsi_portal['tcp_port']
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
Assigns the specified volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '500a098280feeba5',
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5'],
'21000024ff406cc2': ['500a098280feeba5']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5'],
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5',
'500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5',
'500a098180feeba5']
}
}
}
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
eseries_vol = self._get_volume(volume['name_id'])
mapping = self._map_volume_to_host(eseries_vol, initiators)
lun_id = mapping['lun']
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
if target_wwpns:
msg = ("Successfully fetched target details for LUN %(id)s "
"and initiator(s) %(initiators)s.")
msg_fmt = {'id': volume['id'], 'initiators': initiators}
LOG.debug(msg, msg_fmt)
else:
msg = _('Failed to get LUN target details for the LUN %s.')
raise exception.VolumeBackendAPIException(data=msg % volume['id'])
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map}}
return target_info
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:returns: data - the target_wwns and initiator_target_map if the
zone is to be removed, otherwise the same map with
an empty dict for the 'data' key
"""
eseries_vol = self._get_volume(volume['name_id'])
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
host = self._get_host_with_matching_port(initiators)
mappings = eseries_vol.get('listOfMappings', [])
# There can only be one or zero mappings on a volume in E-Series
mapping = mappings[0] if mappings else None
if not mapping:
msg = _("Mapping not found for %(vol)s to host %(ht)s.")
raise exception.NotFound(msg % {'vol': eseries_vol['volumeRef'],
'ht': host['hostRef']})
self._client.delete_volume_mapping(mapping['lunMappingRef'])
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if len(self._client.get_volume_mappings_for_host(
host['hostRef'])) == 0:
# No more exports for this host, so tear down zone.
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map."))
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map_fc(self, connector):
"""Build the target_wwns and the initiator target map."""
# get WWPNs from controller and strip colons
all_target_wwpns = self._client.list_target_wwpns()
all_target_wwpns = [six.text_type(wwpn).replace(':', '')
for wwpn in all_target_wwpns]
target_wwpns = []
init_targ_map = {}
num_paths = 0
if self.lookup_service:
# Use FC SAN lookup to determine which ports are visible.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwpns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwpns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for target in init_targ_map[initiator]:
num_paths += 1
target_wwpns = list(set(target_wwpns))
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwpns
return target_wwpns, init_targ_map, num_paths
def _get_iscsi_service_details(self):
"""Gets iscsi iqn, ip and port information."""
ports = []
hw_inventory = self._client.list_hardware_inventory()
iscsi_ports = hw_inventory.get('iscsiPorts')
if iscsi_ports:
for port in iscsi_ports:
if (port.get('ipv4Enabled') and port.get('iqn') and
port.get('ipv4Data') and
port['ipv4Data'].get('ipv4AddressData') and
port['ipv4Data']['ipv4AddressData']
.get('ipv4Address') and port['ipv4Data']
['ipv4AddressData'].get('configState')
== 'configured'):
iscsi_det = {}
iscsi_det['ip'] =\
port['ipv4Data']['ipv4AddressData']['ipv4Address']
iscsi_det['iqn'] = port['iqn']
iscsi_det['tcp_port'] = port.get('tcpListenPort')
iscsi_det['controller'] = port.get('controllerId')
ports.append(iscsi_det)
if not ports:
msg = _('No good iscsi portals found for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
return ports
def _get_iscsi_portal_for_vol(self, volume, portals, anyController=True):
"""Get the iscsi portal info relevant to volume."""
for portal in portals:
if portal.get('controller') == volume.get('currentManager'):
return portal
if anyController and portals:
return portals[0]
msg = _('No good iscsi portal found in supplied list for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
@cinder_utils.synchronized('map_es_volume')
def _map_volume_to_host(self, vol, initiators):
"""Maps the e-series volume to host with initiator."""
host = self._get_or_create_host(initiators, self.host_type)
vol_maps = self._get_host_mapping_for_vol_frm_array(vol)
for vol_map in vol_maps:
if vol_map.get('mapRef') == host['hostRef']:
return vol_map
else:
self._client.delete_volume_mapping(vol_map['lunMappingRef'])
mappings = self._get_vol_mapping_for_host_frm_array(host['hostRef'])
lun = self._get_free_lun(host, mappings)
return self._client.create_volume_mapping(vol['volumeRef'],
host['hostRef'], lun)
def _get_or_create_host(self, port_ids, host_type):
"""Fetch or create a host by given port."""
try:
host = self._get_host_with_matching_port(port_ids)
ht_def = self._get_host_type_definition(host_type)
if host.get('hostTypeIndex') == ht_def.get('index'):
return host
else:
try:
return self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
msg = _LW("Unable to update host type for host with "
"label %(l)s. %(e)s")
LOG.warning(msg % {'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
return self._create_host(port_ids, host_type)
def _get_host_with_matching_port(self, port_ids):
"""Gets or creates a host with given port id."""
# Remove any extra colons
port_ids = [six.text_type(wwpn).replace(':', '')
for wwpn in port_ids]
hosts = self._client.list_hosts()
for port_id in port_ids:
for host in hosts:
if host.get('hostSidePorts'):
ports = host.get('hostSidePorts')
for port in ports:
address = port.get('address').upper().replace(':', '')
if address == port_id.upper():
return host
msg = _("Host with ports %(ports)s not found.")
raise exception.NotFound(msg % {'ports': port_ids})
def _create_host(self, port_ids, host_type):
"""Creates host on system with given initiator as port_id."""
LOG.info(_LI("Creating host with ports %s."), port_ids)
host_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
host_type = self._get_host_type_definition(host_type)
port_type = self.driver_protocol.lower()
return self._client.create_host_with_ports(host_label,
host_type,
port_ids,
port_type=port_type)
def _get_host_type_definition(self, host_type):
"""Gets supported host type if available on storage system."""
host_types = self._client.list_host_types()
for ht in host_types:
if ht.get('name', 'unknown').lower() == host_type.lower():
return ht
raise exception.NotFound(_("Host type %s not supported.") % host_type)
def _get_free_lun(self, host, maps=None):
"""Gets free LUN for given host."""
ref = host['hostRef']
luns = maps or self._get_vol_mapping_for_host_frm_array(ref)
used_luns = set(map(lambda lun: int(lun['lun']), luns))
for lun in xrange(self.MAX_LUNS_PER_HOST):
if lun not in used_luns:
return lun
msg = _("No free LUNs. Host might exceeded max LUNs.")
raise exception.NetAppDriverException(msg)
def _get_vol_mapping_for_host_frm_array(self, host_ref):
"""Gets all volume mappings for given host from array."""
mappings = self._client.get_volume_mappings() or []
host_maps = filter(lambda x: x.get('mapRef') == host_ref, mappings)
return host_maps
def _get_host_mapping_for_vol_frm_array(self, volume):
"""Gets all host mappings for given volume from array."""
mappings = self._client.get_volume_mappings() or []
host_maps = filter(lambda x: x.get('volumeRef') == volume['volumeRef'],
mappings)
return host_maps
def terminate_connection_iscsi(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
eseries_vol = self._get_volume(volume['name_id'])
host = self._get_host_with_matching_port([connector['initiator']])
mapping = self._get_cached_vol_mapping_for_host(eseries_vol, host)
self._client.delete_volume_mapping(mapping['lunMappingRef'])
def _get_cached_vol_mapping_for_host(self, volume, host):
"""Gets cached volume mapping for given host."""
mappings = volume.get('listOfMappings') or []
for mapping in mappings:
if mapping.get('mapRef') == host['hostRef']:
return mapping
msg = _("Mapping not found for %(vol)s to host %(ht)s.")
raise exception.NotFound(msg % {'vol': volume['volumeRef'],
'ht': host['hostRef']})
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
if refresh:
if not self._ssc_stats:
self._update_ssc_info()
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Update volume statistics."""
LOG.debug("Updating volume stats.")
data = dict()
data["volume_backend_name"] = self._backend_name
data["vendor_name"] = "NetApp"
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.driver_protocol
data["pools"] = []
for storage_pool in self._get_storage_pools():
cinder_pool = {}
cinder_pool["pool_name"] = storage_pool.get("label")
cinder_pool["QoS_support"] = False
cinder_pool["reserved_percentage"] = 0
tot_bytes = int(storage_pool.get("totalRaidedSpace", 0))
used_bytes = int(storage_pool.get("usedSpace", 0))
cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) /
units.Gi)
cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi
pool_ssc_stats = self._ssc_stats.get(
storage_pool["volumeGroupRef"])
if pool_ssc_stats:
cinder_pool.update(pool_ssc_stats)
data["pools"].append(cinder_pool)
self._stats = data
self._garbage_collect_tmp_vols()
@cinder_utils.synchronized("netapp_update_ssc_info", external=False)
def _update_ssc_info(self):
"""Periodically runs to update ssc information from the backend.
The self._ssc_stats attribute is updated with the following format.
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'") % self._backend_name)
self._ssc_stats = \
self._update_ssc_disk_encryption(self._get_storage_pools())
self._ssc_stats = \
self._update_ssc_disk_types(self._get_storage_pools())
def _update_ssc_disk_types(self, volume_groups):
"""Updates the given ssc dictionary with new disk type information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_disks = self._client.list_drives()
pool_ids = set(pool.get("volumeGroupRef") for pool in volume_groups)
relevant_disks = filter(lambda x: x.get('currentVolumeGroupRef') in
pool_ids, all_disks)
for drive in relevant_disks:
current_vol_group = drive.get('currentVolumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
if drive.get("driveMediaType") == 'ssd':
ssc_stats[current_vol_group]['netapp_disk_type'] = 'SSD'
else:
disk_type = drive.get('interfaceType').get('driveType')
ssc_stats[current_vol_group]['netapp_disk_type'] = \
self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
return ssc_stats
def _update_ssc_disk_encryption(self, volume_groups):
"""Updates the given ssc dictionary with new disk encryption information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
for pool in volume_groups:
current_vol_group = pool.get('volumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
ssc_stats[current_vol_group]['netapp_disk_encryption'] = 'true' \
if pool['securityType'] == 'enabled' else 'false'
return ssc_stats
def _get_storage_pools(self):
conf_enabled_pools = []
for value in self.configuration.netapp_storage_pools.split(','):
if value:
conf_enabled_pools.append(value.strip().lower())
filtered_pools = []
storage_pools = self._client.list_storage_pools()
for storage_pool in storage_pools:
# Check if pool can be used
if (storage_pool.get('raidLevel') == 'raidDiskPool'
and storage_pool['label'].lower() in conf_enabled_pools):
filtered_pools.append(storage_pool)
return filtered_pools
def _get_sorted_available_storage_pools(self, size_gb):
"""Returns storage pools sorted on available capacity."""
size = size_gb * units.Gi
sorted_pools = sorted(self._get_storage_pools(), key=lambda x:
(int(x.get('totalRaidedSpace', 0))
- int(x.get('usedSpace', 0))), reverse=True)
avl_pools = filter(lambda x: ((int(x.get('totalRaidedSpace', 0)) -
int(x.get('usedSpace', 0)) >= size)),
sorted_pools)
if not avl_pools:
msg = _LW("No storage pool found with available capacity %s.")
LOG.warning(msg % size_gb)
return avl_pools
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
stage_1, stage_2 = 0, 0
src_vol = self._get_volume(volume['name_id'])
src_label = src_vol['label']
stage_label = 'tmp-%s' % utils.convert_uuid_to_es_fmt(uuid.uuid4())
extend_vol = {'id': uuid.uuid4(), 'size': new_size}
self.create_cloned_volume(extend_vol, volume)
new_vol = self._get_volume(extend_vol['id'])
try:
stage_1 = self._client.update_volume(src_vol['id'], stage_label)
stage_2 = self._client.update_volume(new_vol['id'], src_label)
new_vol = stage_2
LOG.info(_LI('Extended volume with label %s.'), src_label)
except exception.NetAppDriverException:
if stage_1 == 0:
with excutils.save_and_reraise_exception():
self._client.delete_volume(new_vol['id'])
if stage_2 == 0:
with excutils.save_and_reraise_exception():
self._client.update_volume(src_vol['id'], src_label)
self._client.delete_volume(new_vol['id'])
def _garbage_collect_tmp_vols(self):
"""Removes tmp vols with no snapshots."""
try:
if not na_utils.set_safe_attr(self, 'clean_job_running', True):
LOG.warning(_LW('Returning as clean tmp '
'vol job already running.'))
return
for vol in self._client.list_volumes():
label = vol['label']
if (label.startswith('tmp-') and
not self._is_volume_containing_snaps(label)):
try:
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException as e:
LOG.debug("Error deleting vol with label %s: %s",
(label, e))
finally:
na_utils.set_safe_attr(self, 'clean_job_running', False)
@cinder_utils.synchronized('manage_existing')
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management."""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
label = utils.convert_uuid_to_es_fmt(volume['id'])
if label == vol['label']:
LOG.info(_LI("Volume with given ref %s need not be renamed during"
" manage operation."), existing_ref)
managed_vol = vol
else:
managed_vol = self._client.update_volume(vol['id'], label)
LOG.info(_LI("Manage operation completed for volume with new label"
" %(label)s and wwn %(wwn)s."),
{'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
return int(math.ceil(float(vol['capacity']) / units.Gi))
def _get_existing_vol_with_manage_ref(self, volume, existing_ref):
try:
return self._get_volume_with_label_wwn(
existing_ref.get('source-name'), existing_ref.get('source-id'))
except exception.InvalidInput:
reason = _('Reference must contain either source-name'
' or source-id element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
except KeyError:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_('Volume not found on configured storage pools.'))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. Logs a
message to indicate the volume is no longer under Cinder's control.
"""
managed_vol = self._get_volume(volume['id'])
LOG.info(_LI("Unmanaged volume with current label %(label)s and wwn "
"%(wwn)s."), {'label': managed_vol['label'],
'wwn': managed_vol[self.WORLDWIDENAME]})
|
tmenjo/cinder-2015.1.1
|
cinder/volume/drivers/netapp/eseries/library.py
|
Python
|
apache-2.0
| 45,592
|
from pathlib import Path
from jobman.jobman import JobMan
from mc.clients.job_record_client import JobRecordClient
from mc.clients.flow_record_client import FlowRecordClient
from mc.flows.flow_engine import FlowEngine
from mc.db.db import Db
from mc.runners.flow_runner import FlowRunner
from mc.runners.jobman_job_runner.job_runner import JobRunner
class HoustonUtils(object):
JOBS_SUBDIRS = ['pending', 'queued', 'executed', 'archive']
def __init__(self, houston=None):
self.houston = houston
@property
def cfg(self): return self.houston.cfg
@property
def db(self):
if not hasattr(self, '_db'):
self._db = self.generate_db(db_uri=self.cfg['MC_DB_URI'])
return self._db
def generate_db(self, db_uri=None, schema=None):
return Db(db_uri=db_uri, schema=schema)
@db.setter
def db(self, value): self._subcommands = value
def ensure_queues(self):
self.ensure_queue(queue_cfg=self.cfg['FLOW_QUEUE'])
self.ensure_queue(queue_cfg=self.cfg['JOB_QUEUE'])
def ensure_queue(self, queue_cfg=None):
try:
self.db.get_item_by_key(item_type='queue', key=queue_cfg['key'])
except self.db.ItemNotFoundError:
self.db.create_item(
item_type='queue',
item_kwargs={
'key': queue_cfg['key'],
**queue_cfg.get('queue_kwargs', {})
}
)
@property
def flow_runner(self):
if not hasattr(self, '_flow_runner'):
self._flow_runner = FlowRunner(
flow_engine=self.flow_engine,
flow_record_client=self.flow_record_client,
task_ctx={
'mc.flow_record_client': self.flow_record_client,
'mc.job_record_client': self.job_record_client,
}
)
return self._flow_runner
@flow_runner.setter
def flow_runner(self, new_value): self._flow_runner = new_value
@property
def flow_engine(self):
if not hasattr(self, '_flow_engine'):
self._flow_engine = FlowEngine()
return self._flow_engine
@flow_engine.setter
def flow_engine(self, new_value): self._flow_engine = new_value
@property
def flow_record_client(self):
if not hasattr(self, '_flow_record_client'):
self._flow_record_client = self._get_mc_client(record_type='flow')
return self._flow_record_client
@flow_record_client.setter
def flow_record_client(self, new_value):
self._flow_record_client = new_value
@property
def job_record_client(self):
if not hasattr(self, '_job_record_client'):
self._job_record_client = self._get_mc_client(record_type='job')
return self._job_record_client
def _get_mc_client(self, record_type=None):
client_cls = None
if record_type == 'flow':
client_cls = FlowRecordClient
elif record_type == 'job':
client_cls = JobRecordClient
assert client_cls is not None
queue_cfg = self.cfg[record_type.upper() + '_QUEUE']
return client_cls(mc_db=self.db,
use_locks=self.cfg.get('USE_LOCKS', True),
queue_key=queue_cfg['key'])
@job_record_client.setter
def job_record_client(self, new_value): self._job_record_client = new_value
@property
def job_runner(self, mc_clients=None):
if not hasattr(self, '_job_runner'):
self._job_runner = JobRunner(
artifact_handler=self.cfg['ARTIFACT_HANDLER'],
job_record_client=self.job_record_client,
jobman=self.jobman,
jobdirs_dir=self.cfg.get('JOBDIRS_DIR', None),
build_jobdir_fn=self.build_jobdir,
)
return self._job_runner
@job_runner.setter
def job_runner(self, new_value): self._job_runner = new_value
@property
def jobman(self):
if not hasattr(self, '_jobman'):
self._jobman = JobMan.from_cfg(cfg=self.cfg['JOBMAN_CFG'])
return self._jobman
@jobman.setter
def jobman(self, new_value): self._jobman = new_value
def build_jobdir(self, *args, **kwargs):
try:
build_jobdir_fn = self.cfg['BUILD_JOBDIR_FN']
except:
def build_jobdir_fn(*args, **kwargs):
return self.houston.run_command('build_job_dir')
return build_jobdir_fn(*args, **kwargs)
def has_unfinished_mc_records(self):
unfinished_records = self.get_unfinished_mc_records()
for record_type, records in unfinished_records.items():
if len(records) > 0:
return True
return False
def get_unfinished_mc_records(self):
return {
record_type: self._get_unfinished_mc_items(item_type=record_type)
for record_type in ['flow', 'job']
}
def _get_unfinished_mc_items(self, item_type=None):
return self.db.query_items(item_type=item_type, query={
'filters': [
{'field': 'status', 'op': '! IN',
'arg': ['FAILED', 'COMPLETED']}
]
})
def ensure_job_dirs(self):
for dir in self.job_dirs.values():
Path(dir).mkdir(parents=True, exist_ok=True)
@property
def job_dirs(self):
if not hasattr(self, '_job_dirs'):
self._job_dirs = {'root': self.cfg.get('JOB_DIRS_ROOT', None)}
for jobs_subdir in self.JOBS_SUBDIRS:
self._job_dirs[jobs_subdir] = str(Path(self._job_dirs['root'],
jobs_subdir))
return self._job_dirs
@job_dirs.setter
def job_dirs(self, value): self._job_dirs = value
@property
def archiver(self):
if not hasattr(self, '_archiver'):
self._archiver = self._generate_archiver()
return self._archiver
def _generate_archiver(self):
from mc.utils.archivers.dir_archiver import DirArchiver
return DirArchiver(root_dir=self.job_dirs['archive'])
@property
def entity_selector(self):
if not hasattr(self, '_entity_selector'):
from mc.utils.selectors.basic_entity_selector import (
BasicEntitySelector)
self._entity_selector = BasicEntitySelector(db=self.db)
return self._entity_selector
@property
def request_selector(self):
if not hasattr(self, '_request_selector'):
from mc.utils.selectors.basic_request_selector import (
BasicRequestSelector)
self._request_selector = BasicRequestSelector(db=self.db)
return self._request_selector
|
aspuru-guzik-group/mission_control
|
mc/houston/utils.py
|
Python
|
apache-2.0
| 6,799
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from datetime import datetime
def get_id(obj):
"""Get obj's uuid or object itself if no uuid
Abstracts the common pattern of allowing both an object or
an object's ID (UUID) as a parameter when dealing with relationships.
"""
try:
return obj.uuid or obj['uuid']
except AttributeError:
return obj
def remove_empty_from_dict(original):
"""get a new dict which removes keys with empty values
:param dict original: original dict, should not be None
:return: a new dict which removes keys with empty values
"""
return dict((k, v) for k, v in original.iteritems()
if v is not None and v != '' and v != [] and v != {})
def str_range(start, end):
"""get range with string type"""
return [str(i) for i in range(start, end)]
def format_time(time_in_long, _format='%Y-%m-%d %H:%M:%S'):
if time_in_long:
# if time-long is with mill seconds
if time_in_long > pow(10, 12):
time_in_long /= 1000
timestamp = datetime.fromtimestamp(time_in_long)
return timestamp.strftime(_format)
else:
return ''
|
Huawei/OpenStackClient_Auto-Scaling
|
asclient/common/utils.py
|
Python
|
apache-2.0
| 1,745
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility for GitHub REST API.
This script handles GitHub Issue, Pull Request, Comment, Label and Artifact
"""
import requests
import json
import shutil
import re
from absl import logging
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
RETRIES = 3
BACKOFF = 5
RETRY_STATUS = (403, 500, 502, 504)
TIMEOUT = 5
OWNER = 'firebase'
REPO = 'firebase-cpp-sdk'
BASE_URL = 'https://api.github.com'
GITHUB_API_URL = '%s/repos/%s/%s' % (BASE_URL, OWNER, REPO)
logging.set_verbosity(logging.INFO)
def set_repo_url(repo):
match = re.match(r'https://github\.com/([^/]+)/([^/.]+)', repo)
if not match:
logging.info('Error, only pattern https://github.com/\{repo_owner\}/\{repo_name\} are allowed.')
return False
(repo_owner, repo_name) = match.groups()
global OWNER, REPO, GITHUB_API_URL
OWNER = repo_owner
REPO = repo_name
GITHUB_API_URL = '%s/repos/%s/%s' % (BASE_URL, OWNER, REPO)
return True
def requests_retry_session(retries=RETRIES,
backoff_factor=BACKOFF,
status_forcelist=RETRY_STATUS):
session = requests.Session()
retry = Retry(total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def create_issue(token, title, label, body):
"""Create an issue: https://docs.github.com/en/rest/reference/issues#create-an-issue"""
url = f'{GITHUB_API_URL}/issues'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = {'title': title, 'labels': [label], 'body': body}
with requests.post(url, headers=headers, data=json.dumps(data), timeout=TIMEOUT) as response:
logging.info("create_issue: %s response: %s", url, response)
return response.json()
def get_issue_body(token, issue_number):
"""https://docs.github.com/en/rest/reference/issues#get-an-issue-comment"""
url = f'{GITHUB_API_URL}/issues/{issue_number}'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
with requests_retry_session().get(url, headers=headers, timeout=TIMEOUT) as response:
logging.info("get_issue_body: %s response: %s", url, response)
return response.json()["body"]
def update_issue(token, issue_number, data):
"""Update an issue: https://docs.github.com/en/rest/reference/issues#update-an-issue"""
url = f'{GITHUB_API_URL}/issues/{issue_number}'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
with requests_retry_session().patch(url, headers=headers, data=json.dumps(data), timeout=TIMEOUT) as response:
logging.info("update_issue: %s response: %s", url, response)
def open_issue(token, issue_number):
update_issue(token, issue_number, data={'state': 'open'})
def close_issue(token, issue_number):
update_issue(token, issue_number, data={'state': 'closed'})
def update_issue_comment(token, issue_number, comment):
update_issue(token, issue_number, data={'body': comment})
def search_issues_by_label(label):
"""https://docs.github.com/en/rest/reference/search#search-issues-and-pull-requests"""
url = f'{BASE_URL}/search/issues?q=repo:{OWNER}/{REPO}+label:"{label}"+is:issue'
headers = {'Accept': 'application/vnd.github.v3+json'}
with requests_retry_session().get(url, headers=headers, timeout=TIMEOUT) as response:
logging.info("search_issues_by_label: %s response: %s", url, response)
return response.json()["items"]
def list_comments(token, issue_number):
"""https://docs.github.com/en/rest/reference/issues#list-issue-comments"""
url = f'{GITHUB_API_URL}/issues/{issue_number}/comments'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
with requests_retry_session().get(url, headers=headers, timeout=TIMEOUT) as response:
logging.info("list_comments: %s response: %s", url, response)
return response.json()
def add_comment(token, issue_number, comment):
"""https://docs.github.com/en/rest/reference/issues#create-an-issue-comment"""
url = f'{GITHUB_API_URL}/issues/{issue_number}/comments'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = {'body': comment}
with requests.post(url, headers=headers, data=json.dumps(data), timeout=TIMEOUT) as response:
logging.info("add_comment: %s response: %s", url, response)
def update_comment(token, comment_id, comment):
"""https://docs.github.com/en/rest/reference/issues#update-an-issue-comment"""
url = f'{GITHUB_API_URL}/issues/comments/{comment_id}'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = {'body': comment}
with requests_retry_session().patch(url, headers=headers, data=json.dumps(data), timeout=TIMEOUT) as response:
logging.info("update_comment: %s response: %s", url, response)
def delete_comment(token, comment_id):
"""https://docs.github.com/en/rest/reference/issues#delete-an-issue-comment"""
url = f'{GITHUB_API_URL}/issues/comments/{comment_id}'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
with requests.delete(url, headers=headers, timeout=TIMEOUT) as response:
logging.info("delete_comment: %s response: %s", url, response)
def add_label(token, issue_number, label):
"""https://docs.github.com/en/rest/reference/issues#add-labels-to-an-issue"""
url = f'{GITHUB_API_URL}/issues/{issue_number}/labels'
headers={}
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = [label]
with requests.post(url, headers=headers, data=json.dumps(data), timeout=TIMEOUT) as response:
logging.info("add_label: %s response: %s", url, response)
def delete_label(token, issue_number, label):
"""https://docs.github.com/en/rest/reference/issues#delete-a-label"""
url = f'{GITHUB_API_URL}/issues/{issue_number}/labels/{label}'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
with requests.delete(url, headers=headers, timeout=TIMEOUT) as response:
logging.info("delete_label: %s response: %s", url, response)
def list_artifacts(token, run_id):
"""https://docs.github.com/en/rest/reference/actions#list-workflow-run-artifacts"""
url = f'{GITHUB_API_URL}/actions/runs/{run_id}/artifacts'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
with requests_retry_session().get(url, headers=headers, timeout=TIMEOUT) as response:
logging.info("list_artifacts: %s response: %s", url, response)
return response.json()["artifacts"]
def download_artifact(token, artifact_id, output_path):
"""https://docs.github.com/en/rest/reference/actions#download-an-artifact"""
url = f'{GITHUB_API_URL}/actions/artifacts/{artifact_id}/zip'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
with requests.get(url, headers=headers, stream=True, timeout=TIMEOUT) as response:
logging.info("download_artifact: %s response: %s", url, response)
with open(output_path, 'wb') as file:
shutil.copyfileobj(response.raw, file)
def dismiss_review(token, pull_number, review_id, message):
"""https://docs.github.com/en/rest/reference/pulls#dismiss-a-review-for-a-pull-request"""
url = f'{GITHUB_API_URL}/pulls/{pull_number}/reviews/{review_id}/dismissals'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = {'message': message}
with requests_retry_session().put(url, headers=headers, data=json.dumps(data),
stream=True, timeout=TIMEOUT) as response:
logging.info("dismiss_review: %s response: %s", url, response)
return response.json()
def get_reviews(token, pull_number):
"""https://docs.github.com/en/rest/reference/pulls#list-reviews-for-a-pull-request"""
url = f'{GITHUB_API_URL}/pulls/{pull_number}/reviews'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
page = 1
per_page = 100
results = []
keep_going = True
while keep_going:
params = {'per_page': per_page, 'page': page}
page = page + 1
keep_going = False
with requests_retry_session().get(url, headers=headers, params=params,
stream=True, timeout=TIMEOUT) as response:
logging.info("get_reviews: %s response: %s", url, response)
results = results + response.json()
# If exactly per_page results were retrieved, read the next page.
keep_going = (len(response.json()) == per_page)
return results
def create_workflow_dispatch(token, workflow_id, ref, inputs):
"""https://docs.github.com/en/rest/reference/actions#create-a-workflow-dispatch-event"""
url = f'{GITHUB_API_URL}/actions/workflows/{workflow_id}/dispatches'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = {'ref': ref, 'inputs': inputs}
with requests.post(url, headers=headers, data=json.dumps(data),
stream=True, timeout=TIMEOUT) as response:
logging.info("create_workflow_dispatch: %s response: %s", url, response)
# Response Status: 204 No Content
return True if response.status_code == 204 else False
def list_workflows(token, workflow_id, branch):
"""https://docs.github.com/en/rest/reference/actions#list-workflow-runs-for-a-repository"""
url = f'{GITHUB_API_URL}/actions/workflows/{workflow_id}/runs'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = {'event': 'workflow_dispatch', 'branch': branch}
with requests.get(url, headers=headers, data=json.dumps(data),
stream=True, timeout=TIMEOUT) as response:
logging.info("list_workflows: %s response: %s", url, response)
return response.json()
def create_pull_request(token, head, base, title, body, maintainer_can_modify):
"""https://docs.github.com/en/rest/reference/pulls#create-a-pull-request"""
url = f'{GITHUB_API_URL}/pulls'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
data = {'head': head, 'base': base, 'title': title, 'body': body,
'maintainer_can_modify': maintainer_can_modify}
with requests.post(url, headers=headers, data=json.dumps(data),
stream=True, timeout=TIMEOUT) as response:
logging.info("create_pull_request: %s response: %s", head, response)
return True if response.status_code == 201 else False
def list_pull_requests(token, state, head, base):
"""https://docs.github.com/en/rest/reference/pulls#list-pull-requests"""
url = f'{GITHUB_API_URL}/pulls'
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'}
page = 1
per_page = 100
results = []
keep_going = True
while keep_going:
params = {'per_page': per_page, 'page': page}
if state: params.update({'state': state})
if head: params.update({'head': head})
if base: params.update({'base': base})
page = page + 1
keep_going = False
with requests_retry_session().get(url, headers=headers, params=params,
stream=True, timeout=TIMEOUT) as response:
logging.info("get_reviews: %s response: %s", url, response)
results = results + response.json()
# If exactly per_page results were retrieved, read the next page.
keep_going = (len(response.json()) == per_page)
return results
|
firebase/firebase-cpp-sdk
|
scripts/gha/github.py
|
Python
|
apache-2.0
| 12,287
|
"""
Records for SMART Reference EMR
Ben Adida & Josh Mandel
"""
from base import *
from django.utils import simplejson
from django.conf import settings
from smart.common.rdf_tools.rdf_ontology import ontology
from smart.common.rdf_tools.util import rdf, foaf, vcard, sp, serialize_rdf, parse_rdf, bound_graph, URIRef, Namespace
from smart.lib import utils
from smart.models.apps import *
from smart.models.accounts import *
from smart.triplestore import *
from string import Template
import re
import datetime
class Record(Object):
Meta = BaseMeta()
full_name = models.CharField(max_length=150, null=False)
def __unicode__(self):
return 'Record %s' % self.id
def generate_direct_access_token(self, account, token_secret=None):
u = RecordDirectAccessToken.objects.create(
record=self,
account=account,
token_secret=token_secret
)
u.save()
return u
@classmethod
def search_records(cls, query):
try:
c = TripleStore()
ids = parse_rdf(c.sparql(query))
except Exception, e:
return None
from smart.models.record_object import RecordObject
demographics = RecordObject[sp.Demographics]
subjects = [p[0] for p in ids.triples((None, rdf['type'],
sp.Demographics))]
ret = c.get_contexts(subjects)
return ret
@classmethod
def rdf_to_objects(cls, res):
if res is None:
return None
m = parse_rdf(res)
record_list = []
q = """
PREFIX sp:<http://smartplatforms.org/terms#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dcterms:<http://purl.org/dc/terms/>
PREFIX v:<http://www.w3.org/2006/vcard/ns#>
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
SELECT ?gn ?fn ?dob ?gender ?zipcode ?d
WHERE {
?d rdf:type sp:Demographics.
?d v:n ?n.
?n v:given-name ?gn.
?n v:family-name ?fn.
optional{?d foaf:gender ?gender.}
optional{?d v:bday ?dob.}
optional{
?d v:adr ?a.
?a rdf:type v:Pref.
?a v:postal-code ?zipcode.
}
optional{
?d v:adr ?a.
?a v:postal-code ?zipcode.
}
}"""
people = list(m.query(q))
for p in people:
record = Record()
record.id = re.search(
"\/records\/(.*?)\/demographics", str(p[5])).group(1)
record.fn, record.ln, record.dob, record.gender, record.zipcode = p[:5]
record_list.append(record)
return record_list
class AccountApp(Object):
account = models.ForeignKey(Account)
app = models.ForeignKey(PHA)
# uniqueness
class Meta:
app_label = APP_LABEL
unique_together = (('account', 'app'),)
# Not an OAuth token, but an opaque token that can be used to support
# auto-login via a direct link to a smart_ui_server.
class RecordDirectAccessToken(Object):
record = models.ForeignKey(
Record, related_name='direct_access_tokens', null=False)
account = models.ForeignKey(
Account, related_name='direct_record_shares', null=False)
token = models.CharField(max_length=40, unique=True)
token_secret = models.CharField(max_length=60, null=True)
expires_at = models.DateTimeField(null=False)
def save(self, *args, **kwargs):
if not self.token:
self.token = utils.random_string(30)
if self.expires_at is None:
minutes_to_expire = 30
try:
minutes_to_expire = settings.MINUTES_TO_EXPIRE_DIRECT_ACCESS
except:
pass
self.expires_at = datetime.datetime.utcnow(
) + datetime.timedelta(minutes=minutes_to_expire)
super(RecordDirectAccessToken, self).save(*args, **kwargs)
class Meta:
app_label = APP_LABEL
class RecordAlert(Object):
record = models.ForeignKey(Record)
alert_text = models.TextField(null=False)
alert_time = models.DateTimeField(auto_now_add=True, null=False)
triggering_app = models.ForeignKey(
'OAuthApp', null=False, related_name='alerts')
acknowledged_by = models.ForeignKey('Account', null=True)
acknowledged_at = models.DateTimeField(null=True)
# uniqueness
class Meta:
app_label = APP_LABEL
@classmethod
def from_rdf(cls, rdfstring, record, app):
s = parse_rdf(rdfstring)
q = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX sp: <http://smartplatforms.org/terms#>
SELECT ?notes ?severity
WHERE {
?a rdf:type sp:Alert.
?a sp:notes ?notes.
?a sp:severity ?scv.
?scv sp:code ?severity.
}"""
r = list(s.query(q))
assert len(r) == 1, "Expected one alert in post, found %s" % len(r)
(notes, severity) = r[0]
assert type(notes) == Literal
spcodes = Namespace("http://smartplatforms.org/terms/code/alertLevel#")
assert severity in [spcodes.information, spcodes.warning,
spcodes.critical]
a = RecordAlert(
record=record,
alert_text=str(notes),
triggering_app=app
)
a.save()
return a
def acknowledge(self, account):
self.acknowledged_by = account
self.acknowledged_at = datetime.datetime.now()
self.save()
class LimitedAccount(Account):
records = models.ManyToManyField(Record, related_name="+")
|
smart-classic/smart_server
|
smart/models/records.py
|
Python
|
apache-2.0
| 5,560
|
from django.db import models
class Snippet(models.Model):
"""A text snippet. Not meant for use by anyone other than a designer"""
name = models.CharField(max_length=255)
snippet = models.TextField(blank=True)
class Meta:
pass
def __unicode__(self):
return self.snippet
|
callowayproject/django-snippets
|
snippets/models.py
|
Python
|
apache-2.0
| 321
|
import os
import sys
from utils.SinfonierConstants import Environment as EnvConst
SINFONIER_API_NAME = os.environ[EnvConst.SINFONIER_ENV_KEY]
if SINFONIER_API_NAME == EnvConst.DEVELOP_ENVIRONMENT:
from environmentConfig.Develop import *
elif SINFONIER_API_NAME == EnvConst.PROD_ENVIRONMENT:
from environmentConfig.Production import *
elif SINFONIER_API_NAME == EnvConst.DOCKER_ENVIRONMENT:
from environmentConfig.Docker import *
else:
sys.exit('ERROR: Environment not found: ' + EnvConst.SINFONIER_ENV_KEY)
|
telefonicaid/fiware-sinfonier
|
sinfonier-backend-api/config/config.py
|
Python
|
apache-2.0
| 525
|
#!/usr/bin/env python
print("""# ******* WARNING - AUTO GENERATED CODE - DO NOT EDIT *******
module VimSdk
module VmomiSupport
""")
import ServerObjects
import PbmObjects
import SmsObjects
print(""" end
end
""")
|
cloudfoundry-incubator/bosh-vsphere-cpi-release
|
scripts/pyvmomi_to_ruby/gen_server_objects.py
|
Python
|
apache-2.0
| 219
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
"""
Publish and subscribe to MQTT messages.
Additional information at http://mqtt.org and
http://ibmstreams.github.io/streamsx.messaging
"""
from future.builtins import *
from streamsx.topology.topology import *
from streamsx.topology import schema
class MqttStreams(object):
"""
A simple connector to a MQTT broker for publishing
string tuples to MQTT topics, and
subscribing to MQTT topics and creating streams.
A connector is for a specific MQTT Broker as specified in
the configuration object config. Any number of publish()and subscribe()
connections may be created from a single mqtt_streams connector.
Sample use:
::
topo = Topology("An MQTT application")
# define configuration information
config = {}
config['clientID'] = "test_MQTTpublishClient"
config['qos'] = int("1") #(needs to be int vs long)
config['keepAliveInterval'] = int(20) (needs to be int vs long)
config['commandTimeout'] = 30000 (needs to be int vs long)
config['period'] = 5000 (needs to be int vs long)
config['messageQueueSize'] = 10 (needs to be int vs long)
config['reconnectionBound'] = int(20)
config['retain'] = True
config['password'] = "foobar"
config['trustStore'] = "/tmp/no-such-trustStore"
config['trustStorePassword'] = "woohoo"
config['keyStore'] = "/tmp/no-such-keyStore"
config['keyStorePassword'] = "woohoo"
# create the connector's configuration property map
config['serverURI'] = "tcp://localhost:1883"
config['userID'] = "user1id"
config[' password'] = "user1passwrd"
# create the connector
mqstream = MqttStreams(topo, config)
# publish a python source stream to the topic "python.topic1"
topic = "python.topic1"
src = topo.source(test_functions.mqtt_publish)
mqs = mqstream.publish(src, topic)
# subscribe to the topic "python.topic1"
topic = ["python.topic1", ]
mqs = mqstream.subscribe(topic)
mqs.print()
Configuration properties apply to publish and
subscribe unless stated otherwise.
serverURI
Required String. URI to the MQTT server, either
tcp://<hostid>[:<port>]}
or ssl://<hostid>[:<port>]}.
The port defaults to 1883 for "tcp:" and 8883 for "ssl:" URIs.
clientID
Optional String. A unique identifier for a connection
to the MQTT server.
he MQTT broker only allows a single
onnection for a particular clientID.
By default a unique client ID is automatically
generated for each use of publish() and subscribe().
The specified clientID is used for the first
publish() or subscribe() use and
suffix is added for each subsequent uses.
keepAliveInterval
Optional Integer. Automatically generate a MQTT
ping message to the server if a message or ping hasn't been
sent or received in the last keelAliveInterval seconds.
Enables the client to detect if the server is no longer available
without having to wait for the TCP/IP timeout.
A value of 0 disables keepalive processing.
The default is 60.
commandTimeout
Optional Long. The maximum time in milliseconds
to wait for a MQTT connect or publish action to complete.
A value of 0 causes the client to wait indefinitely.
The default is 0.
period
Optional Long. The time in milliseconds before
attempting to reconnect to the server following a connection failure.
The default is 60000.
userID
Optional String. The identifier to use when authenticating
with a server configured to require that form of authentication.
password
Optional String. The identifier to use when authenticating
with server configured to require that form of authentication.
trustStore
Optional String. The pathname to a file containing the
public certificate of trusted MQTT servers. If a relative path
is specified, the path is relative to the application directory.
Required when connecting to a MQTT server with an
ssl:/... serverURI.
trustStorePassword
Required String when trustStore is used.
The password needed to access the encrypted trustStore file.
keyStore
Optional String. The pathname to a file containing the
MQTT client's public private key certificates.
If a relative path is specified, the path is relative to the
application directory.
Required when an MQTT server is configured to use SSL client authentication.
keyStorePassword
Required String when keyStore is used.
The password needed to access the encrypted keyStore file.
messageQueueSize
[subscribe] Optional Integer. The size, in number
of messages, of the subscriber's internal receive buffer. Received
messages are added to the buffer prior to being converted to a
stream tuple. The receiver blocks when the buffer is full.
The default is 50.
retain
[publish] Optional Boolean. Indicates if messages should be
retained on the MQTT server. Default is false.
qos
Optional Integer. The default
MQTT quality of service used for message handling.
The default is 0.
"""
def __init__(self, topology, config):
self.topology = topology
self.config = config.copy()
self.opCnt = 0
def publish(self, pub_stream, topic):
parms = self.config.copy()
parms['topic'] = topic
parms['dataAttributeName'] = "string"
if (++self.opCnt > 1):
# each op requires its own clientID
clientId = parms['clientID']
if (clientId is not None and len(clientId) > 0):
parms['clientID'] = clientId + "-" + str(id(self)) + "-" + str(self.opCnt)
# convert pub_stream outputport schema from spl po to spl rstring type
forOp = pub_stream._map(streamsx.topology.functions.identity, schema.CommonSchema.String)
op = self.topology.graph.addOperator(kind="com.ibm.streamsx.messaging.mqtt::MQTTSink")
op.addInputPort(outputPort=forOp.oport)
op.setParameters(parms)
return None
def subscribe(self, topic):
parms = self.config.copy()
if (parms['retain'] is not None):
del parms['retain']
parms['topics'] = topic
parms['topicOutAttrName'] = "topic"
parms['dataAttributeName'] = "string"
if (++self.opCnt > 1):
# each op requires its own clientID
clientId = parms['clientID']
if (clientId is not None and len(clientId) > 0):
parms['clientID'] = clientId + "-" + str(id(self)) + "-" + str(self.opCnt)
op = self.topology.graph.addOperator(kind="com.ibm.streamsx.messaging.mqtt::MQTTSource")
oport = op.addOutputPort(schema=schema.StreamSchema("tuple<rstring topic, rstring string>"))
op.setParameters(parms)
pop = self.topology.graph.addPassThruOperator()
pop.addInputPort(outputPort=oport)
pOport = pop.addOutputPort(schema=schema.CommonSchema.String)
return Stream(self.topology, pOport)
|
wmarshall484/streamsx.topology
|
com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/mqtt.py
|
Python
|
apache-2.0
| 7,558
|
# This file contains the WSGI configuration required to serve up your
# web application at http://khasm08.pythonanywhere.com/
# It works by setting the variable 'application' to a WSGI handler of some
# description.
#
# +++++++++++ GENERAL DEBUGGING TIPS +++++++++++
# getting imports and sys.path right can be fiddly!
# We've tried to collect some general tips here:
# https://www.pythonanywhere.com/wiki/DebuggingImportError
# +++++++++++ HELLO WORLD +++++++++++
# A little pure-wsgi hello world we've cooked up, just
# to prove everything works. You should delete this
# code to get your own working.
#HELLO_WORLD = """<html>
#<head>
# <title>Python Anywhere hosted web application</title>
#</head>
#<body>
#<h1>Hello, World!</h1>
#<p>
# This is the default welcome page for a
# <a href="https://www.pythonanywhere.com/">PythonAnywhere</a>
# hosted web application.
#</p>
#<p>
# Find out more about how to configure your own web application
# by visiting the <a href="https://www.pythonanywhere.com/web_app_setup/">web app setup</a> page
#</p>
#</body>
#</html>"""
#def application(environ, start_response):
# if environ.get('PATH_INFO') == '/':
# status = '200 OK'
# content = HELLO_WORLD
# else:
# status = '404 NOT FOUND'
# content = 'Page not found.'
# response_headers = [('Content-Type', 'text/html'), ('Content-Length', str(len(content)))]
# start_response(status, response_headers)
# yield content.encode('utf8')
# Below are templates for Django and Flask. You should update the file
# appropriately for the web framework you're using, and then
# click the 'Reload /yourdomain.com/' button on the 'Web' tab to make your site
# live.
# +++++++++++ VIRTUALENV +++++++++++
# If you want to use a virtualenv, set its path on the web app setup tab.
# Then come back here and import your application object as per the
# instructions below
# +++++++++++ CUSTOM WSGI +++++++++++
# If you have a WSGI file that you want to serve using PythonAnywhere, perhaps
# in your home directory under version control, then use something like this:
#
import sys
path = '/home/khasm08/PythonSandbox/KFrame_0_01'
if path not in sys.path:
sys.path.append(path)
from index import application
# +++++++++++ DJANGO +++++++++++
# To use your own django app use code like this:
#import os
#import sys
#
## assuming your django settings file is at '/home/khasm08/mysite/mysite/settings.py'
## and your manage.py is is at '/home/khasm08/mysite/manage.py'
#path = '/home/khasm08/mysite'
#if path not in sys.path:
# sys.path.append(path)
#
#os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
#
## then, for django >=1.5:
#from django.core.wsgi import get_wsgi_application
#application = get_wsgi_application()
## or, for older django <=1.4
#import django.core.handlers.wsgi
#application = django.core.handlers.wsgi.WSGIHandler()
# +++++++++++ FLASK +++++++++++
# Flask works like any other WSGI-compatible framework, we just need
# to import the application. Often Flask apps are called "app" so we
# may need to rename it during the import:
#
#
#import sys
#
## The "/home/khasm08" below specifies your home
## directory -- the rest should be the directory you uploaded your Flask
## code to underneath the home directory. So if you just ran
## "git clone git@github.com/myusername/myproject.git"
## ...or uploaded files to the directory "myproject", then you should
## specify "/home/khasm08/myproject"
#path = '/home/khasm08/path/to/flask_app_directory'
#if path not in sys.path:
# sys.path.append(path)
#
## After you uncomment the line below, the yellow triangle on the left
## side in our in-browser editor shows a warning saying:
## 'application' imported but unused.
## You can ignore this error. The line is necessary, and the variable
## is used externally.
#from main_flask_app_file import app as application
#
# NB -- many Flask guides suggest you use a file called run.py; that's
# not necessary on PythonAnywhere. And you should make sure your code
# does *not* invoke the flask development server with app.run(), as it
# will prevent your wsgi file from working.
|
khasm08/PythonSandbox
|
wsgi.py
|
Python
|
apache-2.0
| 4,161
|
import requests
import pytest
import subprocess
# ============================================================================
class TestAuto(object):
PREFIX = 'http://localhost:8089'
USER = 'testauto'
LIST_ID = ''
AUTO_ID = ''
NUM_BROWSERS = 2
@classmethod
def setup_class(cls):
cls.session = requests.session()
@classmethod
def teardown_class(cls):
pass
def get(self, url, **kwargs):
full_url = self.PREFIX + url
return self.session.get(full_url, **kwargs)
def post(self, url, **kwargs):
full_url = self.PREFIX + url
return self.session.post(full_url, **kwargs)
@classmethod
def delete(self, url, **kwargs):
full_url = self.PREFIX + url
return self.session.delete(full_url, **kwargs)
@pytest.mark.always
def test_create_user(self):
res = subprocess.run(['docker', 'exec', 'webrecorder_app_1', "python", "-m", "webrecorder.admin",
"-c", "testauto@example.com", "testauto", "TestTest123", "archivist", "Auto Test"],
stdout=subprocess.PIPE)
assert b'Created user testauto' in res.stdout or b'A user already exists' in res.stdout
assert res.returncode == 0
@pytest.mark.always
def test_login(self):
params = {'username': self.USER,
'password': 'TestTest123',
}
res = self.post('/api/v1/auth/login', json=params)
assert res.json()['user']['username'] == self.USER
def test_create_coll(self):
res = self.post('/api/v1/collections?user=testauto',
json={'title': 'Auto Test'})
assert res.json()['collection']['id'] == 'auto-test'
assert res.json()['collection']['title'] == 'Auto Test'
def test_create_auto(self):
params = {'scope_type': 'single-page',
'num_browsers': self.NUM_BROWSERS,
}
res = self.post('/api/v1/auto?user=testauto&coll=auto-test', json=params)
assert res.json()['auto']
TestAuto.AUTO_ID = res.json()['auto']
def test_add_urls(self):
params = {'urls': [
'https://twitter.com/webrecorder_io',
'https://rhizome.org/'
]}
res = self.post('/api/v1/auto/{0}/queue_urls?user=testauto&coll=auto-test'.format(self.AUTO_ID), json=params)
assert res.json()['success']
def test_start(self):
res = self.post('/api/v1/auto/{0}/start?user=testauto&coll=auto-test'.format(self.AUTO_ID))
print(res.json())
assert res.json()['success']
@pytest.mark.append
def _test_append_only(self, append, auto_id):
params = {'title': 'Add Url'}
res = self.post('/api/v1/lists?user=testauto&coll=auto-test', json=params)
list_id = res.json()['list']['id']
bookmarks = [{'url': append, 'title': append}]
res = self.post('/api/v1/list/%s/bulk_bookmarks?user=testauto&coll=auto-test' % list_id,
json=bookmarks)
assert res.json()['list']
params = {'list': list_id}
res = self.post('/api/v1/auto/{0}/queue_list?user=testauto&coll=auto-test'.format(auto_id), json=params)
assert res.json()['status']
def test_get_auto(self):
res = self.get('/api/v1/auto/{0}?user=testauto&coll=auto-test'.format(self.AUTO_ID))
auto = res.json()['auto']
assert auto['queue'] is not None
assert auto['seen'] is not None
assert auto['pending'] is not None
assert len(auto['browsers']) == self.NUM_BROWSERS
assert auto['scope_type'] == 'single-page'
@pytest.mark.delete
def _test_delete_auto(self):
res = self.delete('/api/v1/auto/{0}?user=testauto&coll=auto-test'.format(self.AUTO_ID))
assert res.json() == {'deleted_id': str(self.AUTO_ID)}
@pytest.mark.delete
def test_delete_coll(self):
res = self.delete('/api/v1/collection/auto-test?user=testauto')
assert res.json() == {'deleted_id': 'auto-test'} or res.json() == {'error': 'no_such_collection'}
|
webrecorder/webrecorder
|
webrecorder/auto_tests/runauto.py
|
Python
|
apache-2.0
| 4,145
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
import six
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import notifier
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(notifier.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""
Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = notifier.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if not isinstance(k, six.string_types):
msg = _("Metadata property key '%s' is not a string.") % k
raise exception.InvalidMetadata(reason=msg)
if not isinstance(v, six.string_types):
msg = (_("Metadata property value '%(v)s' for key '%(k)s' is "
"not a string.") % {'v': v, 'k': k})
raise exception.InvalidMetadata(reason=msg)
if len(k) == 0:
msg = _("Metadata property key blank")
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""
Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""
Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
return self.network_api.validate_networks(context, requested_networks,
max_count)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _check_and_transform_bdm(self, base_options, image_meta, min_count,
max_count, block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# Get the block device mappings defined by the image.
image_defined_bdms = \
image_meta.get('properties', {}).get('block_device_mapping', [])
if legacy_bdm:
block_device_mapping += image_defined_bdms
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name)
elif image_defined_bdms:
# NOTE (ndipanov): For now assume that image mapping is legacy
block_device_mapping += block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id,
max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# considertaion of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug(_("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota"),
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(
base_options, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping)
for mapping in (image_mapping, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
self.db.instance_destroy(context, instance['uuid'])
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance.uuid))
reservations = None
if context.is_admin and context.project_id != instance.project_id:
project_id = instance.project_id
else:
project_id = context.project_id
if context.user_id != instance.user_id:
user_id = instance.user_id
else:
user_id = context.user_id
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
downsize_reservations = self._reserve_quota_delta(context,
deltas)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, downsize_reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
instance_memory_mb = old_inst_type['memory_mb']
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, bdm['volume_id'])
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
self.db.block_device_mapping_destroy(context, bdm['id'])
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
num_instances, quota_reservations = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance['host']:
instance = self.update(context, instance,
task_state=task_states.RESTORING,
expected_task_state=[None],
deleted_at=None)
self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=[None],
deleted_at=None)
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
#NOTE(bcwaldon): no policy check here since it should be rolled in to
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
return self.db.instance_get_active_by_window_joined(context, begin,
end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
return flavors.get_flavor(instance_type_id, ctxt=context)
def get(self, context, instance_id, want_objects=False):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
expected_attrs = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = flavor_obj.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir,
limit=limit,
marker=marker)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
# TODO(xqueralt): Use new style BDM in volume snapshots
bdms = self.get_instance_bdms(context, instance)
mapping = []
for bdm in bdms:
if bdm['no_device']:
continue
# Clean the BDM of the database related fields to prevent
# duplicates in the future (e.g. the id was being preserved)
for field in block_device.BlockDeviceDict._db_only_fields:
bdm.pop(field, None)
volume_id = bdm.get('volume_id')
if volume_id:
# create snapshot based on volume_id
volume = self.volume_api.get(context, volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
# Clean the extra volume related fields that will be generated
# when booting from the new snapshot.
bdm.pop('volume_id')
bdm.pop('connection_info')
mapping.append(bdm)
for m in block_device.mappings_prepend_dev(properties.get('mappings',
[])):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] in [vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED,
vm_states.ERROR])):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] == task_states.REBOOTING) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""
Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context,
instance.uuid))
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
reservations = self._reserve_quota_delta(context, deltas)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
reservations = self._reserve_quota_delta(context, deltas)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
reservations)
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
"""
Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
"""
Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""
Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""
Calculate deltas required to adjust quota for an instance downsize.
"""
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, project_id=None):
if not deltas:
return
return QUOTAS.reserve(context, project_id=project_id, **deltas)
@staticmethod
def _resize_cells_support(context, reservations, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
if reservations:
# With cells, the best we can do right now is commit the
# reservations immediately...
QUOTAS.commit(context, reservations,
project_id=instance.project_id)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
reservations = self._reserve_quota_delta(context, deltas,
project_id=instance[
'project_id'])
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, reservations, instance,
current_instance_type,
new_instance_type)
reservations = []
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type, reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance, legacy=False)
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attached(context, volume)
# TODO(ndipanov): This check can be generalized as a decorator to
# check for valid combinations of src and dests - for now check
# if it's booted from volume only
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
file_contents=file_contents)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return device
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance. This method is separated to make
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
instance['metadata'] = {}
notifications.send_update(context, instance, instance)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = self.get_instance_metadata(context, instance)
if delete:
_metadata = metadata
else:
_metadata = orig.copy()
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
metadata = self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
instance['metadata'] = metadata
notifications.send_update(context, instance, instance)
diff = _diff_dict(orig, _metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def get_instance_bdms(self, context, instance, legacy=True):
"""Get all bdm tables for specified instance."""
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
return block_device.legacy_mapping(bdms)
return bdms
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = self.get_instance_bdms(context, instance, legacy=False)
root_bdm = block_device.get_root_bdm(bdms)
if root_bdm and root_bdm.get('destination_type') == 'volume':
return True
return False
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=[None],
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NODE(danms): Transitional until evacuate supports objects
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=['metadata', 'system_metadata'])
return self.compute_rpcapi.rebuild_instance(context,
instance=inst_obj,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm['instance'],
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm['instance'],
volume_id, snapshot_id, delete_info)
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.update_metadata(metadata)
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason='not empty')
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = notifier.get_notifier(service='api')
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""
Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
|
sacharya/nova
|
nova/compute/api.py
|
Python
|
apache-2.0
| 164,307
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" tests for supporting multiple NIC's in advanced zone with security groups in cloudstack 4.14.0.0
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.sshClient import SshClient
from marvin.lib.utils import (validateList,
cleanup_resources,
get_host_credentials,
get_process_status,
execute_command_in_host,
random_gen)
from marvin.lib.base import (PhysicalNetwork,
Account,
Host,
TrafficType,
Domain,
Network,
NetworkOffering,
VirtualMachine,
ServiceOffering,
Zone,
NIC,
SecurityGroup)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_virtual_machines,
list_routers,
list_hosts,
get_free_vlan)
from marvin.codes import (PASS, FAILED)
import logging
import random
import time
class TestMulipleNicSupport(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestMulipleNicSupport,
cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.services = cls.testClient.getParsedTestDataConfig()
zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.zone = Zone(zone.__dict__)
cls._cleanup = []
cls.skip = False
if str(cls.zone.securitygroupsenabled) != "True":
cls.skip = True
return
cls.logger = logging.getLogger("TestMulipleNicSupport")
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
# Get Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, hypervisor="KVM")
if cls.template == FAILED:
cls.skip = True
return
# Create new domain, account, network and VM
cls.user_domain = Domain.create(
cls.apiclient,
services=cls.testdata["acl"]["domain2"],
parentdomainid=cls.domain.id)
# Create account
cls.account1 = Account.create(
cls.apiclient,
cls.testdata["acl"]["accountD2"],
admin=True,
domainid=cls.user_domain.id
)
# Create small service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offerings"]["small"]
)
cls._cleanup.append(cls.service_offering)
cls.services["network"]["zoneid"] = cls.zone.id
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["network_offering"],
)
# Enable Network offering
cls.network_offering.update(cls.apiclient, state='Enabled')
cls._cleanup.append(cls.network_offering)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.testdata["virtual_machine"]["template"] = cls.template.id
if cls.zone.securitygroupsenabled:
# Enable networking for reaching to VM thorugh SSH
security_group = SecurityGroup.create(
cls.apiclient,
cls.testdata["security_group"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule2 = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule_ICMP"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
cls.testdata["shared_network_offering_sg"]["specifyVlan"] = 'True'
cls.testdata["shared_network_offering_sg"]["specifyIpRanges"] = 'True'
cls.shared_network_offering = NetworkOffering.create(
cls.apiclient,
cls.testdata["shared_network_offering_sg"],
conservemode=False
)
NetworkOffering.update(
cls.shared_network_offering,
cls.apiclient,
id=cls.shared_network_offering.id,
state="enabled"
)
physical_network, vlan = get_free_vlan(cls.apiclient, cls.zone.id)
cls.testdata["shared_network_sg"]["physicalnetworkid"] = physical_network.id
random_subnet_number = random.randrange(90, 99)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network1 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(100, 110)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network2 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(111, 120)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network3 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
try:
cls.virtual_machine1 = VirtualMachine.create(
cls.apiclient,
cls.testdata["virtual_machine"],
accountid=cls.account1.name,
domainid=cls.account1.domainid,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id,
securitygroupids=[security_group.id],
networkids=cls.network1.id
)
for nic in cls.virtual_machine1.nic:
if nic.isdefault:
cls.virtual_machine1.ssh_ip = nic.ipaddress
cls.virtual_machine1.default_network_id = nic.networkid
break
except Exception as e:
cls.fail("Exception while deploying virtual machine: %s" % e)
try:
cls.virtual_machine2 = VirtualMachine.create(
cls.apiclient,
cls.testdata["virtual_machine"],
accountid=cls.account1.name,
domainid=cls.account1.domainid,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id,
securitygroupids=[security_group.id],
networkids=[str(cls.network1.id), str(cls.network2.id)]
)
for nic in cls.virtual_machine2.nic:
if nic.isdefault:
cls.virtual_machine2.ssh_ip = nic.ipaddress
cls.virtual_machine2.default_network_id = nic.networkid
break
except Exception as e:
cls.fail("Exception while deploying virtual machine: %s" % e)
cls._cleanup.append(cls.virtual_machine1)
cls._cleanup.append(cls.virtual_machine2)
cls._cleanup.append(cls.network1)
cls._cleanup.append(cls.network2)
cls._cleanup.append(cls.network3)
cls._cleanup.append(cls.shared_network_offering)
if cls.zone.securitygroupsenabled:
cls._cleanup.append(security_group)
cls._cleanup.append(cls.account1)
cls._cleanup.append(cls.user_domain)
@classmethod
def tearDownClass(self):
try:
cleanup_resources(self.apiclient, self._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
if self.skip:
self.skipTest("Test can be run only on advanced zone and KVM hypervisor")
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def verify_network_rules(self, vm_id):
virtual_machine = VirtualMachine.list(
self.apiclient,
id=vm_id
)
vm = virtual_machine[0]
hosts = list_hosts(
self.apiclient,
id=vm.hostid
)
host = hosts[0]
if host.hypervisor.lower() not in "kvm":
return
host.user, host.password = get_host_credentials(self.config, host.ipaddress)
for nic in vm.nic:
secips = ""
if len(nic.secondaryip) > 0:
for secip in nic.secondaryip:
secips += secip.ipaddress + ";"
command="/usr/share/cloudstack-common/scripts/vm/network/security_group.py verify_network_rules --vmname %s --vmip %s --vmmac %s --nicsecips '%s'" % (vm.instancename, nic.ipaddress, nic.macaddress, secips)
self.logger.debug("Executing command '%s' in host %s" % (command, host.ipaddress))
result=execute_command_in_host(host.ipaddress, 22,
host.user,
host.password,
command)
if len(result) > 0:
self.fail("The iptables/ebtables rules for nic %s on vm %s on host %s are not correct" %(nic.ipaddress, vm.instancename, host.name))
@attr(tags=["adeancedsg"], required_hardware="false")
def test_01_create_vm_with_multiple_nics(self):
"""Create Vm with multiple NIC's
Steps:
# 1. Create more than 1 isolated or shared network
# 2. Create a vm and select more than 1 network while deploying
# 3. Vm is deployed successfully with 1 nic from each network
# 4. All the vm's should be pingable
:return:
"""
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertEqual(
len(virtual_machine), 1,
"Virtual Machine create with 2 NIC's failed")
nicIdInVm = virtual_machine[0].nic[0]
self.assertIsNotNone(nicIdInVm, "NIC 1 not found in Virtual Machine")
nicIdInVm = virtual_machine[0].nic[1]
self.assertIsNotNone(nicIdInVm, "NIC 2 not found in Virtual Machine")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_02_add_nic_to_vm(self):
"""Create VM with single NIC and then add additional NIC
Steps:
# 1. Create a VM by selecting one default NIC
# 2. Create few more isolated or shared networks
# 3. Add extra NIC's to the vm from the newly created networks
# 4. The deployed VM should have extra nic's added in the above
# step without any fail
# 5. The IP's of the extra NIC's should be pingable
:return:
"""
self.virtual_machine1.add_nic(self.apiclient, self.network2.id)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
nicIdInVm = virtual_machine[0].nic[1]
self.assertIsNotNone(nicIdInVm, "Second NIC not found")
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_03_add_ip_to_default_nic(self):
""" Add secondary IP's to the VM
Steps:
# 1. Create a VM with more than 1 NIC
# 2) Navigate to Instances->NIC->Edit Secondary IP's
# ->Aquire new Secondary IP"
# 3) Add as many secondary Ip as possible to the VM
# 4) Configure the secondary IP's by referring to "Configure
# the secondary IP's" in the "Action Item" section
:return:
"""
ipaddress = NIC.addIp(
self.apiclient,
id=self.virtual_machine2.nic[0].id
)
self.assertIsNotNone(
ipaddress,
"Unable to add secondary IP to the default NIC")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_04_add_ip_to_remaining_nics(self):
""" Add secondary IP's to remaining NIC's
Steps:
# 1) Create a VM with more than 1 NIC
# 2)Navigate to Instances-NIC's->Edit Secondary IP's
# ->Acquire new Secondary IP
# 3) Add secondary IP to all the NIC's of the VM
# 4) Confiugre the secondary IP's by referring to "Configure the
# secondary IP's" in the "Action Item" section
:return:
"""
self.virtual_machine1.add_nic(self.apiclient, self.network3.id)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
self.assertIsNotNone(
vms[0].nic[2],
"Third NIC is not added successfully to the VM")
vms1_nic1_id = vms[0].nic[1]['id']
vms1_nic2_id = vms[0].nic[2]['id']
ipaddress21 = NIC.addIp(
self.apiclient,
id=vms1_nic1_id
)
ipaddress22 = NIC.addIp(
self.apiclient,
id=vms1_nic1_id
)
self.assertIsNotNone(
ipaddress21,
"Unable to add first secondary IP to the second nic")
self.assertIsNotNone(
ipaddress22,
"Unable to add second secondary IP to second NIC")
ipaddress31 = NIC.addIp(
self.apiclient,
id=vms1_nic2_id
)
ipaddress32 = NIC.addIp(
self.apiclient,
id=vms1_nic2_id
)
self.assertIsNotNone(
ipaddress31,
"Unable to add first secondary IP to third NIC")
self.assertIsNotNone(
ipaddress32,
"Unable to add second secondary IP to third NIC")
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_05_stop_start_vm_with_multiple_nic(self):
""" Stop and Start a VM with Multple NIC
Steps:
# 1) Create a Vm with multiple NIC's
# 2) Configure secondary IP's on the VM
# 3) Try to stop/start the VM
# 4) Ping the IP's of the vm
# 5) Remove Secondary IP from one of the NIC
:return:
"""
ipaddress1 = NIC.addIp(
self.apiclient,
id=self.virtual_machine2.nic[0].id
)
ipaddress2 = NIC.addIp(
self.apiclient,
id=self.virtual_machine2.nic[1].id
)
# Stop the VM with multiple NIC's
self.virtual_machine2.stop(self.apiclient)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertEqual(
virtual_machine[0]['state'], 'Stopped',
"Could not stop the VM with multiple NIC's")
if virtual_machine[0]['state'] == 'Stopped':
# If stopped then try to start the VM
self.virtual_machine2.start(self.apiclient)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertEqual(
virtual_machine[0]['state'], 'Running',
"Could not start the VM with multiple NIC's")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_06_migrate_vm_with_multiple_nic(self):
""" Migrate a VM with Multple NIC
Steps:
# 1) Create a Vm with multiple NIC's
# 2) Configure secondary IP's on the VM
# 3) Try to stop/start the VM
# 4) Ping the IP's of the vm
:return:
"""
# Skipping adding Secondary IP to NIC since its already
# done in the previous test cases
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
old_host_id = virtual_machine[0]['hostid']
try:
hosts = Host.list(
self.apiclient,
virtualmachineid=self.virtual_machine1.id,
listall=True)
self.assertEqual(
validateList(hosts)[0],
PASS,
"hosts list validation failed")
# Get a host which is not already assigned to VM
for host in hosts:
if host.id == old_host_id:
continue
else:
host_id = host.id
break
self.virtual_machine1.migrate(self.apiclient, host_id)
except Exception as e:
self.fail("Exception occured: %s" % e)
# List the vm again
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id)
new_host_id = virtual_machine[0]['hostid']
self.assertNotEqual(
old_host_id, new_host_id,
"Migration of VM to new host failed"
)
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_07_remove_secondary_ip_from_nic(self):
""" Remove secondary IP from any NIC
Steps:
# 1) Navigate to Instances
# 2) Select any vm
# 3) NIC's ->Edit secondary IP's->Release IP
# 4) The secondary IP should be successfully removed
"""
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id)
# Check which NIC is having secondary IP
secondary_ips = virtual_machine[0].nic[1].secondaryip
for secondary_ip in secondary_ips:
NIC.removeIp(self.apiclient, ipaddressid=secondary_ip['id'])
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertFalse(
virtual_machine[0].nic[1].secondaryip,
'Failed to remove secondary IP')
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_08_remove_nic_from_vm(self):
""" Remove NIC from VM
Steps:
# 1) Navigate to Instances->select any vm->NIC's->NIC 2
# ->Click on "X" button to remove the second NIC
# 2) Remove other NIC's as well from the VM
# 3) All the NIC's should be successfully removed from the VM
:return:
"""
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id)
for nic in virtual_machine[0].nic:
if nic.isdefault:
continue
self.virtual_machine2.remove_nic(self.apiclient, nic.id)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id)
self.assertEqual(
len(virtual_machine[0].nic), 1,
"Failed to remove all the nics from the virtual machine")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_09_reboot_vm_with_multiple_nic(self):
""" Reboot a VM with Multple NIC
Steps:
# 1) Create a Vm with multiple NIC's
# 2) Configure secondary IP's on the VM
# 3) Try to reboot the VM
# 4) Ping the IP's of the vm
:return:
"""
# Skipping adding Secondary IP to NIC since its already
# done in the previous test cases
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
try:
self.virtual_machine1.reboot(self.apiclient)
except Exception as e:
self.fail("Exception occured: %s" % e)
self.verify_network_rules(self.virtual_machine1.id)
|
GabrielBrascher/cloudstack
|
test/integration/component/test_multiple_nic_support.py
|
Python
|
apache-2.0
| 24,109
|
# Copyright (c) 2017 Linaro Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for debugging with JLink.'''
from os import path
import os
from .core import ZephyrBinaryRunner, get_env_or_bail
DEFAULT_JLINK_GDB_PORT = 2331
class JLinkBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for the J-Link GDB server.'''
def __init__(self, device,
gdbserver='JLinkGDBServer', iface='swd', elf_name=None,
gdb=None, gdb_port=DEFAULT_JLINK_GDB_PORT, tui=None,
debug=False):
super(JLinkBinaryRunner, self).__init__(debug=debug)
self.device = device
self.gdbserver_cmd = [gdbserver]
self.iface = iface
self.elf_name = elf_name
self.gdb_cmd = [gdb] if gdb is not None else None
self.gdb_port = gdb_port
self.tui_arg = [tui] if tui is not None else []
def replaces_shell_script(shell_script, command):
return (command in {'debug', 'debugserver'} and
shell_script == 'jlink.sh')
def create_from_env(command, debug):
'''Create runner from environment.
Required:
- JLINK_DEVICE: device name
Required for 'debug':
- GDB: gdb to use
- O: build output directory
- KERNEL_ELF_NAME: zephyr kernel binary in ELF format
Optional for 'debug':
- TUI: if present, passed to gdb server used to flash
Optional for 'debug', 'debugserver':
- JLINK_GDBSERVER: default is JLinkGDBServer
- GDB_PORT: default is 2331
- JLINK_IF: default is swd
'''
device = get_env_or_bail('JLINK_DEVICE')
gdb = os.environ.get('GDB', None)
o = os.environ.get('O', None)
elf = os.environ.get('KERNEL_ELF_NAME', None)
elf_name = None
if o is not None:
if elf is not None:
elf_name = path.join(o, elf)
tui = os.environ.get('TUI', None)
gdbserver = os.environ.get('JLINK_GDBSERVER', 'JLinkGDBServer')
gdb_port = int(os.environ.get('GDB_PORT',
str(DEFAULT_JLINK_GDB_PORT)))
iface = os.environ.get('JLINK_IF', 'swd')
return JLinkBinaryRunner(device, gdbserver=gdbserver,
iface=iface, elf_name=elf_name,
gdb=gdb, gdb_port=gdb_port, tui=tui,
debug=debug)
def print_gdbserver_message(self):
print('JLink GDB server running on port {}'.format(self.gdb_port))
def run(self, command, **kwargs):
if command not in {'debug', 'debugserver'}:
raise ValueError('{} is not supported'.format(command))
server_cmd = (self.gdbserver_cmd +
['-port', str(self.gdb_port),
'-if', self.iface,
'-device', self.device,
'-silent',
'-singlerun'])
if command == 'debugserver':
self.print_gdbserver_message()
self.check_call(server_cmd)
else:
if self.gdb_cmd is None:
raise ValueError('Cannot debug; gdb is missing')
if self.elf_name is None:
raise ValueError('Cannot debug; elf is missing')
client_cmd = (self.gdb_cmd +
self.tui_arg +
[self.elf_name] +
['-ex', 'target remote :{}'.format(self.gdb_port),
'-ex', 'monitor halt',
'-ex', 'load',
'-ex', 'monitor reset'])
self.print_gdbserver_message()
self.run_server_and_client(server_cmd, client_cmd)
|
fbsder/zephyr
|
scripts/support/runner/jlink.py
|
Python
|
apache-2.0
| 3,770
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import jinja2
import os
import webapp2
import logging
from google.appengine.api import memcache
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainPage(webapp2.RequestHandler):
def get(self):
greetings = memcache.get('entries')
if greetings is None:
greetings = []
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(entries=greetings))
def post(self):
greeting = self.request.get('entry')
greetings = memcache.get('entries')
if greetings is not None:
greetings.append(greeting)
if not memcache.replace('entries', greetings):
logging.error('Memcache replace failed.')
else:
greetings = [greeting]
if not memcache.set('entries', greetings):
logging.error('Memcache set failed.')
self.redirect('/')
class Clear(webapp2.RequestHandler):
def post(self):
if not memcache.delete('entries'):
logging.error("Memcache failed to delete entries")
self.redirect('/')
application = webapp2.WSGIApplication([
('/', MainPage),
('/clear', Clear)
], debug=True)
|
GoogleCloudPlatformTraining/cp100-appengine-memcache-python
|
guestbook.py
|
Python
|
apache-2.0
| 1,998
|
# -*- coding: utf-8 -*-
"""Tests for mac notes plugin."""
from __future__ import unicode_literals
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import mac_notes
from tests.parsers.sqlite_plugins import test_lib
class MacNotesTest(test_lib.SQLitePluginTestCase):
"""Tests for mac notes database plugin."""
def testProcess(self):
"""Test the Process function on a Mac Notes file."""
plugin_object = mac_notes.MacNotesPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['NotesV7.storedata'], plugin_object)
self.assertEqual(storage_writer.number_of_events, 6)
self.assertEqual(storage_writer.number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the first note.
event = events[0]
self.CheckTimestamp(event.timestamp, '2014-02-11 02:38:27.097813')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_title = 'building 4th brandy gibs'
self.assertEqual(event_data.title, expected_title)
expected_text = (
'building 4th brandy gibs microsoft office body soul and peace '
'example.com 3015555555: plumbing and heating claim#123456 Small '
'business ')
self.assertEqual(event_data.text, expected_text)
expected_short_message = 'title:{0:s}'.format(expected_title)
expected_message = 'title:{0:s} note_text:{1:s}'.format(
expected_title, expected_text)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
rgayon/plaso
|
tests/parsers/sqlite_plugins/mac_notes.py
|
Python
|
apache-2.0
| 1,683
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Identity service."""
import abc
import functools
import os
import uuid
from oslo.config import cfg
import six
from keystone import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.identity.mapping_backends import mapping
from keystone import notifications
from keystone.openstack.common import importutils
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
def filter_user(user_ref):
"""Filter out private items in a user dict.
'password', 'tenants' and 'groups' are never returned.
:returns: user_ref
"""
if user_ref:
user_ref = user_ref.copy()
user_ref.pop('password', None)
user_ref.pop('tenants', None)
user_ref.pop('groups', None)
user_ref.pop('domains', None)
try:
user_ref['extra'].pop('password', None)
user_ref['extra'].pop('tenants', None)
except KeyError:
pass
return user_ref
class DomainConfigs(dict):
"""Discover, store and provide access to domain specific configs.
The setup_domain_drivers() call will be made via the wrapper from
the first call to any driver function handled by this manager. This
setup call it will scan the domain config directory for files of the form
keystone.<domain_name>.conf
For each file, the domain_name will be turned into a domain_id and then
this class will:
- Create a new config structure, adding in the specific additional options
defined in this config file
- Initialise a new instance of the required driver with this new config.
"""
configured = False
driver = None
def _load_driver(self, assignment_api, domain_id):
domain_config = self[domain_id]
domain_config['driver'] = (
importutils.import_object(
domain_config['cfg'].identity.driver, domain_config['cfg']))
domain_config['driver'].assignment_api = assignment_api
def _load_config(self, assignment_api, file_list, domain_name):
try:
domain_ref = assignment_api.get_domain_by_name(domain_name)
except exception.DomainNotFound:
LOG.warning(
_('Invalid domain name (%s) found in config file name'),
domain_name)
return
# Create a new entry in the domain config dict, which contains
# a new instance of both the conf environment and driver using
# options defined in this set of config files. Later, when we
# service calls via this Manager, we'll index via this domain
# config dict to make sure we call the right driver
domain = domain_ref['id']
self[domain] = {}
self[domain]['cfg'] = cfg.ConfigOpts()
config.configure(conf=self[domain]['cfg'])
self[domain]['cfg'](args=[], project='keystone',
default_config_files=file_list)
self._load_driver(assignment_api, domain)
def setup_domain_drivers(self, standard_driver, assignment_api):
# This is called by the api call wrapper
self.configured = True
self.driver = standard_driver
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
LOG.warning(_('Unable to locate domain config directory: %s'),
conf_dir)
return
for r, d, f in os.walk(conf_dir):
for fname in f:
if (fname.startswith(DOMAIN_CONF_FHEAD) and
fname.endswith(DOMAIN_CONF_FTAIL)):
if fname.count('.') >= 2:
self._load_config(assignment_api,
[os.path.join(r, fname)],
fname[len(DOMAIN_CONF_FHEAD):
-len(DOMAIN_CONF_FTAIL)])
else:
LOG.debug(('Ignoring file (%s) while scanning domain '
'config directory'),
fname)
def get_domain_driver(self, domain_id):
if domain_id in self:
return self[domain_id]['driver']
def get_domain_conf(self, domain_id):
if domain_id in self:
return self[domain_id]['cfg']
def reload_domain_driver(self, assignment_api, domain_id):
# Only used to support unit tests that want to set
# new config values. This should only be called once
# the domains have been configured, since it relies on
# the fact that the configuration files have already been
# read.
if self.configured:
if domain_id in self:
self._load_driver(assignment_api, domain_id)
else:
# The standard driver
self.driver = self.driver()
self.driver.assignment_api = assignment_api
def domains_configured(f):
"""Wraps API calls to lazy load domain configs after init.
This is required since the assignment manager needs to be initialized
before this manager, and yet this manager's init wants to be
able to make assignment calls (to build the domain configs). So
instead, we check if the domains have been initialized on entry
to each call, and if requires load them,
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if (not self.domain_configs.configured and
CONF.identity.domain_specific_drivers_enabled):
self.domain_configs.setup_domain_drivers(
self.driver, self.assignment_api)
return f(self, *args, **kwargs)
return wrapper
def exception_translated(exception_type):
"""Wraps API calls to map to correct exception."""
def _exception_translated(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except exception.PublicIDNotFound as e:
if exception_type == 'user':
raise exception.UserNotFound(user_id=e.message)
elif exception_type == 'group':
raise exception.GroupNotFound(group_id=e.message)
elif exception_type == 'assertion':
raise AssertionError(_('Invalid user / password'))
else:
raise
return wrapper
return _exception_translated
@dependency.provider('identity_api')
@dependency.optional('revoke_api')
@dependency.requires('assignment_api', 'credential_api', 'id_mapping_api',
'token_api')
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
This class also handles the support of domain specific backends, by using
the DomainConfigs class. The setup call for DomainConfigs is called
from with the @domains_configured wrapper in a lazy loading fashion
to get around the fact that we can't satisfy the assignment api it needs
from within our __init__() function since the assignment driver is not
itself yet initialized.
Each of the identity calls are pre-processed here to choose, based on
domain, which of the drivers should be called. The non-domain-specific
driver is still in place, and is used if there is no specific driver for
the domain in question (or we are not using multiple domain drivers).
Starting with Juno, in order to be able to obtain the domain from
just an ID being presented as part of an API call, a public ID to domain
and local ID mapping is maintained. This mapping also allows for the local
ID of drivers that do not provide simple UUIDs (such as LDAP) to be
referenced via a public facing ID. The mapping itself is automatically
generated as entities are accessed via the driver.
This mapping is only used when:
- the entity is being handled by anything other than the default driver, or
- the entity is being handled by the default LDAP driver and backward
compatible IDs are not required.
This means that in the standard case of a single SQL backend or the default
settings of a single LDAP backend (since backward compatible IDs is set to
True by default), no mapping is used. An alternative approach would be to
always use the mapping table, but in the cases where we don't need it to
make the public and local IDs the same. It is felt that not using the
mapping by default is a more prudent way to introduce this functionality.
"""
_USER = 'user'
_GROUP = 'group'
def __init__(self):
super(Manager, self).__init__(CONF.identity.driver)
self.domain_configs = DomainConfigs()
# Domain ID normalization methods
def _set_domain_id_and_mapping(self, ref, domain_id, driver,
entity_type):
"""Patch the domain_id/public_id into the resulting entity(ies).
:param ref: the entity or list of entities to post process
:param domain_id: the domain scope used for the call
:param driver: the driver used to execute the call
:param entity_type: whether this is a user or group
:returns: post processed entity or list or entities
Called to post-process the entity being returned, using a mapping
to substitute a public facing ID as necessary. This method must
take into account:
- If the driver is not domain aware, then we must set the domain
attribute of all entities irrespective of mapping.
- If the driver does not support UUIDs, then we always want to provide
a mapping, except for the special case of this being the default
driver and backward_compatible_ids is set to True. This is to ensure
that entity IDs do not change for an existing LDAP installation (only
single domain/driver LDAP configurations were previously supported).
- If the driver does support UUIDs, then we always create a mapping
entry, but use the local UUID as the public ID. The exception to
- this is that if we just have single driver (i.e. not using specific
multi-domain configs), then we don't both with the mapping at all.
"""
conf = CONF.identity
if (driver is self.driver and driver.generates_uuids() and
driver.is_domain_aware()):
# The default driver that needs no help, e.g. SQL
return ref
LOG.debug('ID Mapping - Domain ID: %(domain)s, '
'Default Driver: %(driver)s, '
'Domains: %(aware)s, UUIDs: %(generate)s, '
'Compatible IDs: %(compat)s',
{'domain': domain_id,
'driver': (driver == self.driver),
'aware': driver.is_domain_aware(),
'generate': driver.generates_uuids(),
'compat': CONF.identity_mapping.backward_compatible_ids})
if isinstance(ref, dict):
LOG.debug('Local ID: %s', ref['id'])
ref = ref.copy()
# If the driver can't handle domains, then we need to insert the
# domain_id into the entity being returned. If the domain_id is
# None that means we are running in a single backend mode, so to
# remain backwardly compatible, we put in the default domain ID.
if not driver.is_domain_aware():
if domain_id is None:
domain_id = conf.default_domain_id
ref['domain_id'] = domain_id
# There are two situations where we must now use the mapping:
# - this isn't the default driver (i.e. multiple backends), or
# - we have a single backend that doesn't use UUIDs
# The exception to the above is that we must honor backward
# compatibility if this is the default driver (e.g. to support
# current LDAP)
if (driver is not self.driver or
(not driver.generates_uuids() and
not CONF.identity_mapping.backward_compatible_ids)):
local_entity = {'domain_id': ref['domain_id'],
'local_id': ref['id'],
'entity_type': entity_type}
public_id = self.id_mapping_api.get_public_id(local_entity)
if public_id:
ref['id'] = public_id
LOG.debug('Found existing mapping to public ID: %s',
ref['id'])
else:
# Need to create a mapping. If the driver generates UUIDs
# then pass the local UUID in as the public ID to use.
if driver.generates_uuids():
public_id = ref['id']
ref['id'] = self.id_mapping_api.create_id_mapping(
local_entity, public_id)
LOG.debug('Created new mapping to public ID: %s',
ref['id'])
return ref
elif isinstance(ref, list):
return [self._set_domain_id_and_mapping(
x, domain_id, driver, entity_type) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def _clear_domain_id_if_domain_unaware(self, driver, ref):
"""Clear domain_id details if driver is not domain aware."""
if not driver.is_domain_aware() and 'domain_id' in ref:
ref = ref.copy()
ref.pop('domain_id')
return ref
def _select_identity_driver(self, domain_id):
"""Choose a backend driver for the given domain_id.
:param domain_id: The domain_id for which we want to find a driver. If
the domain_id is specified as None, then this means
we need a driver that handles multiple domains.
:returns: chosen backend driver
If there is a specific driver defined for this domain then choose it.
If the domain is None, or there no specific backend for the given
domain is found, then we chose the default driver.
"""
if domain_id is None:
driver = self.driver
else:
driver = (self.domain_configs.get_domain_driver(domain_id) or
self.driver)
# If the driver is not domain aware (e.g. LDAP) then check to
# ensure we are not mapping multiple domains onto it - the only way
# that would happen is that the default driver is LDAP and the
# domain is anything other than None or the default domain.
if (not driver.is_domain_aware() and driver == self.driver and
domain_id != CONF.identity.default_domain_id and
domain_id is not None):
LOG.warning('Found multiple domains being mapped to a '
'driver that does not support that (e.g. '
'LDAP) - Domain ID: %(domain)s, '
'Default Driver: %(driver)s',
{'domain': domain_id,
'driver': (driver == self.driver)})
raise exception.DomainNotFound(domain_id=domain_id)
return driver
def _get_domain_driver_and_entity_id(self, public_id):
"""Look up details using the public ID.
:param public_id: the ID provided in the call
:returns: domain_id, which can be None to indicate that the driver
in question supports multiple domains
driver selected based on this domain
entity_id which will is understood by the driver.
Use the mapping table to look up the domain, driver and local entity
that is represented by the provided public ID. Handle the situations
were we do not use the mapping (e.g. single driver that understands
UUIDs etc.)
"""
conf = CONF.identity
# First, since we don't know anything about the entity yet, we must
# assume it needs mapping, so long as we are using domain specific
# drivers.
if conf.domain_specific_drivers_enabled:
local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
if local_id_ref:
return (
local_id_ref['domain_id'],
self._select_identity_driver(local_id_ref['domain_id']),
local_id_ref['local_id'])
# So either we are using multiple drivers but the public ID is invalid
# (and hence was not found in the mapping table), or the public ID is
# being handled by the default driver. Either way, the only place left
# to look is in that standard driver. However, we don't yet know if
# this driver also needs mapping (e.g. LDAP in non backward
# compatibility mode).
driver = self.driver
if driver.generates_uuids():
if driver.is_domain_aware:
# No mapping required, and the driver can handle the domain
# information itself. The classic case of this is the
# current SQL driver.
return (None, driver, public_id)
else:
# Although we don't have any drivers of this type, i.e. that
# understand UUIDs but not domains, conceptually you could.
return (conf.default_domain_id, driver, public_id)
# So the only place left to find the ID is in the default driver which
# we now know doesn't generate UUIDs
if not CONF.identity_mapping.backward_compatible_ids:
# We are not running in backward compatibility mode, so we
# must use a mapping.
local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
if local_id_ref:
return (
local_id_ref['domain_id'],
driver,
local_id_ref['local_id'])
else:
raise exception.PublicIDNotFound(id=public_id)
# If we reach here, this means that the default driver
# requires no mapping - but also doesn't understand domains
# (e.g. the classic single LDAP driver situation). Hence we pass
# back the public_ID unmodified and use the default domain (to
# keep backwards compatibility with existing installations).
#
# It is still possible that the public ID is just invalid in
# which case we leave this to the caller to check.
return (conf.default_domain_id, driver, public_id)
def _assert_user_and_group_in_same_backend(
self, user_entity_id, user_driver, group_entity_id, group_driver):
"""Ensures that user and group IDs are backed by the same backend.
Raise a CrossBackendNotAllowed exception if they are not from the same
backend, otherwise return None.
"""
if user_driver is not group_driver:
# Determine first if either IDs don't exist by calling
# the driver.get methods (which will raise a NotFound
# exception).
user_driver.get_user(user_entity_id)
group_driver.get_group(group_entity_id)
# If we get here, then someone is attempting to create a cross
# backend membership, which is not allowed.
raise exception.CrossBackendNotAllowed(group_id=group_entity_id,
user_id=user_entity_id)
def _mark_domain_id_filter_satisfied(self, hints):
if hints:
for filter in hints.filters:
if (filter['name'] == 'domain_id' and
filter['comparator'] == 'equals'):
hints.filters.remove(filter)
def _ensure_domain_id_in_hints(self, hints, domain_id):
if (domain_id is not None and
not hints.get_exact_filter_by_name('domain_id')):
hints.add_filter('domain_id', domain_id)
# The actual driver calls - these are pre/post processed here as
# part of the Manager layer to make sure we:
#
# - select the right driver for this domain
# - clear/set domain_ids for drivers that do not support domains
# - create any ID mapping that might be required
@notifications.emit_event('authenticate')
@domains_configured
@exception_translated('assertion')
def authenticate(self, context, user_id, password):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
ref = driver.authenticate(entity_id, password)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@notifications.created(_USER, result_id_arg_attr='id')
@domains_configured
@exception_translated('user')
def create_user(self, user_ref):
user = user_ref.copy()
user['name'] = clean.user_name(user['name'])
user.setdefault('enabled', True)
user['enabled'] = clean.user_enabled(user['enabled'])
domain_id = user['domain_id']
self.assignment_api.get_domain(domain_id)
# For creating a user, the domain is in the object itself
domain_id = user_ref['domain_id']
driver = self._select_identity_driver(domain_id)
user = self._clear_domain_id_if_domain_unaware(driver, user)
# Generate a local ID - in the future this might become a function of
# the underlying driver so that it could conform to rules set down by
# that particular driver type.
user['id'] = uuid.uuid4().hex
ref = driver.create_user(user['id'], user)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@domains_configured
@exception_translated('user')
def get_user(self, user_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
ref = driver.get_user(entity_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
def assert_user_enabled(self, user_id, user=None):
"""Assert the user and the user's domain are enabled.
:raise AssertionError if the user or the user's domain is disabled.
"""
if user is None:
user = self.get_user(user_id)
self.assignment_api.assert_domain_enabled(user['domain_id'])
if not user.get('enabled', True):
raise AssertionError(_('User is disabled: %s') % user_id)
@domains_configured
@exception_translated('user')
def get_user_by_name(self, user_name, domain_id):
driver = self._select_identity_driver(domain_id)
ref = driver.get_user_by_name(user_name, domain_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@manager.response_truncated
@domains_configured
@exception_translated('user')
def list_users(self, domain_scope=None, hints=None):
driver = self._select_identity_driver(domain_scope)
hints = hints or driver_hints.Hints()
if driver.is_domain_aware():
# Force the domain_scope into the hint to ensure that we only get
# back domains for that scope.
self._ensure_domain_id_in_hints(hints, domain_scope)
else:
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter.
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_users(hints)
return self._set_domain_id_and_mapping(
ref_list, domain_scope, driver, mapping.EntityType.USER)
@notifications.updated(_USER)
@domains_configured
@exception_translated('user')
def update_user(self, user_id, user_ref):
user = user_ref.copy()
if 'name' in user:
user['name'] = clean.user_name(user['name'])
if 'enabled' in user:
user['enabled'] = clean.user_enabled(user['enabled'])
if 'domain_id' in user:
self.assignment_api.get_domain(user['domain_id'])
if 'id' in user:
if user_id != user['id']:
raise exception.ValidationError(_('Cannot change user ID'))
# Since any ID in the user dict is now irrelevant, remove its so as
# the driver layer won't be confused by the fact the this is the
# public ID not the local ID
user.pop('id')
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
user = self._clear_domain_id_if_domain_unaware(driver, user)
ref = driver.update_user(entity_id, user)
if user.get('enabled') is False or user.get('password') is not None:
if self.revoke_api:
self.revoke_api.revoke_by_user(user_id)
self.token_api.delete_tokens_for_user(user_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@notifications.deleted(_USER)
@domains_configured
@exception_translated('user')
def delete_user(self, user_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
driver.delete_user(entity_id)
self.credential_api.delete_credentials_for_user(user_id)
self.token_api.delete_tokens_for_user(user_id)
self.id_mapping_api.delete_id_mapping(user_id)
@notifications.created(_GROUP, result_id_arg_attr='id')
@domains_configured
@exception_translated('group')
def create_group(self, group_ref):
group = group_ref.copy()
group.setdefault('description', '')
domain_id = group['domain_id']
self.assignment_api.get_domain(domain_id)
# For creating a group, the domain is in the object itself
domain_id = group_ref['domain_id']
driver = self._select_identity_driver(domain_id)
group = self._clear_domain_id_if_domain_unaware(driver, group)
# Generate a local ID - in the future this might become a function of
# the underlying driver so that it could conform to rules set down by
# that particular driver type.
group['id'] = uuid.uuid4().hex
ref = driver.create_group(group['id'], group)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.GROUP)
@domains_configured
@exception_translated('group')
def get_group(self, group_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
ref = driver.get_group(entity_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.GROUP)
@notifications.updated(_GROUP)
@domains_configured
@exception_translated('group')
def update_group(self, group_id, group):
if 'domain_id' in group:
self.assignment_api.get_domain(group['domain_id'])
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
group = self._clear_domain_id_if_domain_unaware(driver, group)
ref = driver.update_group(entity_id, group)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.GROUP)
def revoke_tokens_for_group(self, group_id):
# We get the list of users before we attempt the group
# deletion, so that we can remove these tokens after we know
# the group deletion succeeded.
# TODO(ayoung): revoke based on group and roleids instead
user_ids = []
for u in self.list_users_in_group(group_id):
user_ids.append(u['id'])
if self.revoke_api:
self.revoke_api.revoke_by_user(u['id'])
self.token_api.delete_tokens_for_users(user_ids)
@notifications.deleted(_GROUP)
@domains_configured
@exception_translated('group')
def delete_group(self, group_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
# As well as deleting the group, we need to invalidate
# any tokens for the users who are members of the group.
self.revoke_tokens_for_group(group_id)
driver.delete_group(entity_id)
self.id_mapping_api.delete_id_mapping(group_id)
@domains_configured
@exception_translated('group')
def add_user_to_group(self, user_id, group_id):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
_domain_id, group_driver, group_entity_id = (
self._get_domain_driver_and_entity_id(group_id))
# Get the same info for the user_id, taking care to map any
# exceptions correctly
_domain_id, user_driver, user_entity_id = (
get_entity_info_for_user(user_id))
self._assert_user_and_group_in_same_backend(
user_entity_id, user_driver, group_entity_id, group_driver)
group_driver.add_user_to_group(user_entity_id, group_entity_id)
self.token_api.delete_tokens_for_user(user_id)
@domains_configured
@exception_translated('group')
def remove_user_from_group(self, user_id, group_id):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
_domain_id, group_driver, group_entity_id = (
self._get_domain_driver_and_entity_id(group_id))
# Get the same info for the user_id, taking care to map any
# exceptions correctly
_domain_id, user_driver, user_entity_id = (
get_entity_info_for_user(user_id))
self._assert_user_and_group_in_same_backend(
user_entity_id, user_driver, group_entity_id, group_driver)
group_driver.remove_user_from_group(user_entity_id, group_entity_id)
# TODO(ayoung) revoking all tokens for a user based on group
# membership is overkill, as we only would need to revoke tokens
# that had role assignments via the group. Calculating those
# assignments would have to be done by the assignment backend.
if self.revoke_api:
self.revoke_api.revoke_by_user(user_id)
self.token_api.delete_tokens_for_user(user_id)
@manager.response_truncated
@domains_configured
@exception_translated('user')
def list_groups_for_user(self, user_id, hints=None):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
hints = hints or driver_hints.Hints()
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_groups_for_user(entity_id, hints)
return self._set_domain_id_and_mapping(
ref_list, domain_id, driver, mapping.EntityType.GROUP)
@manager.response_truncated
@domains_configured
@exception_translated('group')
def list_groups(self, domain_scope=None, hints=None):
driver = self._select_identity_driver(domain_scope)
hints = hints or driver_hints.Hints()
if driver.is_domain_aware():
# Force the domain_scope into the hint to ensure that we only get
# back domains for that scope.
self._ensure_domain_id_in_hints(hints, domain_scope)
else:
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter.
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_groups(hints)
return self._set_domain_id_and_mapping(
ref_list, domain_scope, driver, mapping.EntityType.GROUP)
@manager.response_truncated
@domains_configured
@exception_translated('group')
def list_users_in_group(self, group_id, hints=None):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
hints = hints or driver_hints.Hints()
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_users_in_group(entity_id, hints)
return self._set_domain_id_and_mapping(
ref_list, domain_id, driver, mapping.EntityType.USER)
@domains_configured
@exception_translated('group')
def check_user_in_group(self, user_id, group_id):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
_domain_id, group_driver, group_entity_id = (
self._get_domain_driver_and_entity_id(group_id))
# Get the same info for the user_id, taking care to map any
# exceptions correctly
_domain_id, user_driver, user_entity_id = (
get_entity_info_for_user(user_id))
self._assert_user_and_group_in_same_backend(
user_entity_id, user_driver, group_entity_id, group_driver)
return group_driver.check_user_in_group(user_entity_id,
group_entity_id)
@domains_configured
def change_password(self, context, user_id, original_password,
new_password):
# authenticate() will raise an AssertionError if authentication fails
self.authenticate(context, user_id, original_password)
update_dict = {'password': new_password}
self.update_user(user_id, update_dict)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for an Identity driver."""
def _get_list_limit(self):
return CONF.identity.list_limit or CONF.list_limit
def is_domain_aware(self):
"""Indicates if Driver supports domains."""
return True
def generates_uuids(self):
"""Indicates if Driver generates UUIDs as the local entity ID."""
return True
@abc.abstractmethod
def authenticate(self, user_id, password):
"""Authenticate a given user and password.
:returns: user_ref
:raises: AssertionError
"""
raise exception.NotImplemented() # pragma: no cover
# user crud
@abc.abstractmethod
def create_user(self, user_id, user):
"""Creates a new user.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_users(self, hints):
"""List users in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_users_in_group(self, group_id, hints):
"""List users in a group.
:param group_id: the group in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_user(self, user_id):
"""Get a user by ID.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_user(self, user_id, user):
"""Updates an existing user.
:raises: keystone.exception.UserNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_user_to_group(self, user_id, group_id):
"""Adds a user to a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def check_user_in_group(self, user_id, group_id):
"""Checks if a user is a member of a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def remove_user_from_group(self, user_id, group_id):
"""Removes a user from a group.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_user(self, user_id):
"""Deletes an existing user.
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_user_by_name(self, user_name, domain_id):
"""Get a user by name.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
# group crud
@abc.abstractmethod
def create_group(self, group_id, group):
"""Creates a new group.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_groups(self, hints):
"""List groups in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_groups_for_user(self, user_id, hints):
"""List groups a user is in
:param user_id: the user in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_group(self, group_id):
"""Get a group by ID.
:returns: group_ref
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_group(self, group_id, group):
"""Updates an existing group.
:raises: keystone.exceptionGroupNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_group(self, group_id):
"""Deletes an existing group.
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
# end of identity
@dependency.provider('id_mapping_api')
class MappingManager(manager.Manager):
"""Default pivot point for the ID Mapping backend."""
def __init__(self):
super(MappingManager, self).__init__(CONF.identity_mapping.driver)
@six.add_metaclass(abc.ABCMeta)
class MappingDriver(object):
"""Interface description for an ID Mapping driver."""
@abc.abstractmethod
def get_public_id(self, local_entity):
"""Returns the public ID for the given local entity.
:param dict local_entity: Containing the entity domain, local ID and
type ('user' or 'group').
:returns: public ID, or None if no mapping is found.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_id_mapping(self, public_id):
"""Returns the local mapping.
:param public_id: The public ID for the mapping required.
:returns dict: Containing the entity domain, local ID and type. If no
mapping is found, it returns None.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_id_mapping(self, local_entity, public_id=None):
"""Create and store a mapping to a public_id.
:param dict local_entity: Containing the entity domain, local ID and
type ('user' or 'group').
:param public_id: If specified, this will be the public ID. If this
is not specified, a public ID will be generated.
:returns: public ID
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_id_mapping(self, public_id):
"""Deletes an entry for the given public_id.
:param public_id: The public ID for the mapping to be deleted.
The method is silent if no mapping is found.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def purge_mappings(self, purge_filter):
"""Purge selected identity mappings.
:param dict purge_filter: Containing the attributes of the filter that
defines which entries to purge. An empty
filter means purge all mappings.
"""
raise exception.NotImplemented() # pragma: no cover
|
reeshupatel/demo
|
keystone/identity/core.py
|
Python
|
apache-2.0
| 42,880
|
import osgtest.library.core as core
import osgtest.library.files as files
import osgtest.library.osgunittest as osgunittest
class TestLcMaps(osgunittest.OSGTestCase):
required_rpms = ['lcmaps', 'lcmaps-db-templates', 'vo-client', 'vo-client-lcmaps-voms']
def test_01_configure(self):
core.config['lcmaps.db'] = '/etc/lcmaps.db'
core.config['lcmaps.gsi-authz'] = '/etc/grid-security/gsi-authz.conf'
core.skip_ok_unless_installed(*self.required_rpms)
template = files.read('/usr/share/lcmaps/templates/lcmaps.db.vomsmap',
as_single_string=True)
files.write(core.config['lcmaps.db'], template, owner='lcmaps')
files.write(core.config['lcmaps.gsi-authz'],
"globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n",
owner='lcmaps')
def test_02_old_xrootd_policy(self):
core.skip_ok_unless_installed('xrootd-lcmaps', *self.required_rpms)
self.skip_ok_if(core.PackageVersion('xrootd-lcmaps') >= '1.4.0')
files.append(core.config['lcmaps.db'],
'''xrootd_policy:
verifyproxynokey -> banfile
banfile -> banvomsfile | bad
banvomsfile -> gridmapfile | bad
gridmapfile -> good | vomsmapfile
vomsmapfile -> good | defaultmapfile
defaultmapfile -> good | bad
''',
backup=False)
|
efajardo/osg-test
|
osgtest/tests/test_140_lcmaps.py
|
Python
|
apache-2.0
| 1,373
|
import cv2
import numpy as np
template = np.zeros((500,500,3),np.uint8)
template[:,:,0] = 255
template[:,:,1] = 255
template[:,:,2] = 255
x = [50,250,250,50,50]
y = [50,50,250,250,50]
cnt = np.asarray(zip(x,y))
cv2.drawContours(template,[cnt],0,0,1)
x = [100,200,200,100,100]
y = [300,300,150,150,300]
cnt = np.asarray(zip(x,y))
cv2.drawContours(template,[cnt],0,0,1)
x = [150,400,400,150,150]
y = [200,200,400,400,200]
cnt = np.asarray(zip(x,y))
cv2.drawContours(template,[cnt],0,0,1)
x = [150,200,200,150,150]
y = [250,250,200,200,250]
cnt = np.asarray(zip(x,y))
cv2.drawContours(template,[cnt],0,(255,0,0),-1)
cv2.imwrite("/home/ggdhines/github/aggregation/docs/images/rectangle_overlap.jpg",template)
|
zooniverse/aggregation
|
docs/source/images/rectangle_overlap.py
|
Python
|
apache-2.0
| 713
|
from vt_manager_kvm.models import *
from vt_manager_kvm.communication.utils.XmlUtils import *
import xmlrpclib
am = xmlrpclib.Server('https://expedient:expedient@192.168.254.193:8445/xmlrpc/agent')
xml = xmlFileToString('communication/utils/queryDelete.xml')
am.send(xml)
|
ict-felix/stack
|
vt_manager_kvm/src/python/vt_manager_kvm/tests/testdelete.py
|
Python
|
apache-2.0
| 278
|
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import utils_tests
import trappy
sys.path.append(os.path.join(utils_tests.TESTS_DIRECTORY, "..", "trappy"))
class BaseTestSched(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(BaseTestSched, self).__init__(
[("trace_sched.txt", "trace.txt")],
*args,
**kwargs)
class TestSchedLoadAvgSchedGroup(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedLoadAvgSchedGroup creates a proper data_frame"""
dfr = trappy.Run().sched_load_avg_sched_group.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpus"].iloc[0], "00000002")
self.assertEquals(dfr["load"].iloc[0], 0)
self.assertEquals(dfr["utilization"].iloc[0], 0)
class TestSchedLoadAvgTask(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedLoadAvgTask creates a proper data_frame"""
dfr = trappy.Run().sched_load_avg_task.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["comm"].iloc[0], "sshd")
self.assertEquals(dfr["pid"].iloc[0], 2962)
self.assertEquals(dfr["load"].iloc[0], 0)
self.assertEquals(dfr["utilization"].iloc[0], 0)
self.assertEquals(dfr["runnable_avg_sum"].iloc[0], 0)
self.assertEquals(dfr["running_avg_sum"].iloc[0], 0)
self.assertEquals(dfr["avg_period"].iloc[0], 48595)
class TestSchedLoadAvgCpu(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedLoadAvgCpu creates a proper data_frame"""
dfr = trappy.Run().sched_load_avg_cpu.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 0)
self.assertEquals(dfr["load"].iloc[0], 13)
self.assertEquals(dfr["utilization"].iloc[0], 18)
class TestSchedContribScaleFactor(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedContribScaleFactor creates a proper data_frame"""
dfr = trappy.Run().sched_contrib_scale_factor.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 0)
self.assertEquals(dfr["freq_scale_factor"].iloc[0], 426)
self.assertEquals(dfr["cpu_scale_factor"].iloc[0], 1024)
class TestSchedCpuCapacity(BaseTestSched):
def test_get_dataframe(self):
"""Test that SchedCpuCapacity creates a proper data_frame"""
dfr = trappy.Run().sched_cpu_capacity.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 3)
self.assertEquals(dfr["capacity"].iloc[0], 430)
self.assertEquals(dfr["rt_capacity"].iloc[0], 1024)
class TestSchedCpuFrequency(BaseTestSched):
def test_get_dataframe(self):
"""Test that CpuFrequency creates a proper data_frame"""
dfr = trappy.Run().sched_cpu_frequency.data_frame
self.assertTrue(len(dfr) == 1)
self.assertEquals(dfr["cpu"].iloc[0], 0)
self.assertEquals(dfr["state"].iloc[0], 600000)
self.assertFalse("cpu_id" in dfr.columns)
class TestGetFilters(BaseTestSched):
def test_get_filters(self):
"""Test that Run::get_filters returns correct list of filters"""
run = trappy.Run()
classes = run.class_definitions
filters = run.get_filters()
self.assertTrue(len(classes) == len(filters))
self.assertTrue(sorted(classes) == sorted(filters))
sched_classes = run.sched_classes
sched_filters = run.get_filters("sched")
self.assertTrue(len(sched_classes) == len(sched_filters))
self.assertTrue(sorted(sched_classes) == sorted(sched_filters))
class TestSpacedValueAttributes(BaseTestSched):
def test_spaced_value_attr(self):
"""Test that Run object parses spaced value attributes correctly"""
with open("trace.txt", "a") as fout:
fout.write(" <...>-2971 [004] 6550.056871: sched_load_avg_task: comm=AsyncTask #2 pid=6163 ")
dfr = trappy.Run().sched_load_avg_task.data_frame
self.assertTrue(len(dfr) == 2)
self.assertEquals(dfr["comm"].iloc[1], "AsyncTask #2")
self.assertEquals(dfr["pid"].iloc[1], 6163)
class TestNoSchedTraces(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestNoSchedTraces, self).__init__(
[("trace_empty.txt", "trace.txt")],
*args,
**kwargs)
def test_empty_trace_txt(self):
"""Test that empty objects are created with empty trace file"""
run = trappy.Run()
for attr in run.sched_classes.iterkeys():
self.assertTrue(len(getattr(run, attr).data_frame) == 0)
|
derkling/trappy
|
tests/test_sched.py
|
Python
|
apache-2.0
| 5,299
|
def pytest_addoption(parser):
parser.addoption('--jenkins-docker', action='store',
default='jenkins/jenkins',
help='The Jenkins Docker container to launch')
|
OddBloke/jenkins-job-linter
|
integration_tests/conftest.py
|
Python
|
apache-2.0
| 203
|
from ftw import ruleset, testrunner, http, errors
import pytest
import re
import random
import threading
def test_logcontains(ruleset, test):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage(stage)
# Should return a test error because its searching before response
def test_search1():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
with pytest.raises(errors.TestError):
runner.test_response(http_ua.response_object,re.compile('dog'))
# Should return a failure because it is searching for a word not there
def test_search2():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
http_ua.send_request(x)
with pytest.raises(AssertionError):
runner.test_response(http_ua.response_object,re.compile('dog'))
# Should return a success because it is searching for a word not there
def test_search3():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
http_ua.send_request(x)
runner.test_response(http_ua.response_object,re.compile('established to be used for'))
# Should return a success because we found our regex
def test_search4():
runner = testrunner.TestRunner()
x = ruleset.Input(dest_addr="example.com",headers={"Host":"example.com"})
http_ua = http.HttpUA()
http_ua.send_request(x)
runner.test_response(http_ua.response_object,re.compile('.*'))
|
fastly/ftw
|
test/integration/test_htmlcontains.py
|
Python
|
apache-2.0
| 1,658
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import math
import scipy.special as sps
mean = 0
variance = 1
sigma = math.sqrt(variance)
def drawSampleNormal(sampleSize):
samples = np.random.normal(mean, sigma, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
plt.plot(bins,mlab.normpdf(bins,mean,sigma))
plt.show()
plt.savefig("normal_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleNormal(20)
drawSampleNormal(50)
drawSampleNormal(100)
drawSampleNormal(500)
alpha = 7.5
beta = 10
def drawSampleGamma(sampleSize):
samples = np.random.gamma(alpha, beta, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
pdf = bins**(alpha-1)*(np.exp(-bins/beta) / (sps.gamma(alpha)*beta**alpha))
plt.plot(bins, pdf, linewidth=2, color='r')
plt.show()
plt.savefig("gamma_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleGamma(20)
drawSampleGamma(50)
drawSampleGamma(100)
drawSampleGamma(500)
|
PredictionIO/open-academy
|
KairatAshim/pio_assignment2/problem2/problem2.py
|
Python
|
apache-2.0
| 1,046
|
from django.contrib import admin
from widgy.admin import WidgyAdmin
from widgy.contrib.page_builder.models import Callout
admin.site.register(Callout, WidgyAdmin)
|
j00bar/django-widgy
|
widgy/contrib/page_builder/admin.py
|
Python
|
apache-2.0
| 165
|
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
# http://initd.org/psycopg/docs/
import psycopg2
import select
import logger
import adm
import re
import threading
from wh import xlt, modPath
from Crypto.PublicKey._slowmath import rsa_construct
sqlKeywords=[]
moreKeywords=['serial', 'bigserial']
colKeywords=[]
def getSqlKeywords():
global colKeywords
global sqlKeywords
if not sqlKeywords:
f=open(modPath("kwlist.h", __name__))
lines=f.read()
f.close()
for line in lines.splitlines():
if line.startswith("PG_KEYWORD("):
tokens=line.split(',')
keyword=tokens[0][12:-1].lower()
# RESERVED, UNRESERVED, TYPE_FUNC_NAME, COL_NAME
if tokens[2].lstrip().startswith('COL_NAME'):
colKeywords.append(keyword)
else:
sqlKeywords.append(keyword)
colKeywords.extend(moreKeywords)
return sqlKeywords
identMatchPattern=re.compile("^[a-z][a-z0-9_]+$")
def quoteIdent(ident):
if identMatchPattern.match(ident) and ident not in getSqlKeywords():
return ident
return '"%s"' % ident.replace('"', '""')
def quoteValue(val, conn=None):
if isinstance(val, unicode): # psycopg2 quoting has some problems with unicode
return "'%s'" % val.replace("'", "''").replace("\\", "\\\\")
adapter=psycopg2.extensions.adapt(val)
if conn and hasattr(adapter, 'prepare'):
if isinstance(conn, pgConnection):
conn=conn.conn
elif isinstance(conn, pgCursor):
conn=conn.conn.conn
adapter.prepare(conn)
return adapter.getquoted()
class SqlException(adm.ServerException):
def __init__(self, sql, error):
logger.querylog(sql, error=error)
self.error=error
self.sql=sql
Exception.__init__(self, sql, error)
def __str__(self):
return self.error
######################################################################
class pgType:
def __init__(self, row):
self.oid=row['oid']
self.name=row['typname']
self.namespace=row['nspname']
self.category=row['typcategory']
def IsNumeric(self):
return self.category == 'N'
class pgTypeCache:
def __init__(self, rowset):
self.cache={}
self.Add(rowset)
def Add(self, rowset):
if not isinstance(rowset, pgRowset):
rowset=[rowset]
typ=None
for row in rowset:
typ=pgType(row)
self.cache[typ.oid] = typ
return typ
def Get(self, oid):
return self.cache.get(oid)
######################################################################
class pgCursorResult:
def __init__(self, cursor, colNames=None):
self.cursor=cursor
if colNames:
self.colNames=colNames
else:
self.colNames=[]
for d in cursor.GetDescription():
self.colNames.append(d.name)
class pgRow(pgCursorResult):
def __init__(self, cursor, row, colNames=None):
pgCursorResult.__init__(self, cursor, colNames)
self.row=row
def getTuple(self):
return tuple(self.getList())
def getList(self):
l=[]
for i in range(len(self.colNames)):
l.append(self.getItem(i))
return l
def getDict(self):
d={}
for i in range(len(self.colNames)):
item=self.getItem(i)
# aggregate functions deliver [None] with empty left joins; we want []
if isinstance(item, list) and len(item) == 1 and item[0] == None:
item=[]
d[self.colNames[i]] = item
return d
def __str__(self):
cols=[]
for i in range(len(self.colNames)):
val=unicode(self.getItem(i))
cols.append("%s=%s" % (self.colNames[i], val))
return "( %s )" % ",".join(cols)
def hasAttr(self, colName):
try:
self.colNames.index(colName)
return True
except:
return False
def getItem(self, i):
val=self.row[i]
if isinstance(val, str):
return val.decode('utf8')
return val
def __getitem__(self, colName):
try:
if isinstance(colName, (str, unicode)):
i=self.colNames.index(colName)
else:
i=colName
return self.getItem(i)
except Exception as _e:
logger.debug("Column %s not found" % colName)
return None
class pgRowset(pgCursorResult):
def __init__(self, cursor):
pgCursorResult.__init__(self, cursor)
self.__fetchone()
def GetRowcount(self):
return self.cursor.GetRowcount()
def __fetchone(self):
if self.cursor.GetRowcount() > 0:
row = self.cursor.FetchOne()
else:
row=None
if row:
self.curRow = pgRow(self.cursor, row, self.colNames)
else:
self.curRow=None
def HasMore(self):
return self.curRow != None
def Next(self):
row=self.curRow
if row:
self.__fetchone()
return row
def getDict(self):
d={}
for row in self:
d[row[0]] = row.getDict()
return d
def getDictList(self):
d=[]
for row in self:
d.append(row.getDict())
return d
def getList(self):
d=[]
for row in self:
d.append(row[0])
return d
def __iter__(self):
class RowsetIterator:
def __init__(self, outer):
self.outer=outer
def __iter__(self):
return self
def next(self):
row=self.outer.Next()
if row:
return row
else:
raise StopIteration()
return RowsetIterator(self)
######################################################################
class pgConnection:
def __init__(self, dsn, pool=None):
self.pool=pool
self.conn=None
self.cursor=None
self.inUse=False
self.lastError=None
self.trapSqlException=True
self.conn=psycopg2.connect(dsn, async=True)
self.wait("Connect")
self.cursor=self.conn.cursor()
def disconnect(self):
self.cursor=None
if self.conn:
self.conn.close()
self.conn=None
if self.pool:
self.pool.RemoveConnection(self)
def wait(self, spot=""):
if self.conn.async:
while self.conn.isexecuting():
try:
state = self.conn.poll()
except Exception as e:
self._handleException(e)
return False
if state == psycopg2.extensions.POLL_OK:
return True
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [self.conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([self.conn.fileno()], [], [])
else:
raise adm.ConnectionException(self.node, xlt("WAIT %s" % spot), self.lastError)
return False
def _handleException(self, exception):
if self.cursor and self.cursor.query:
cmd=self.cursor.query
else:
cmd=None
exception.message=errlines=exception.message.decode('utf8')
logger.querylog(cmd, error=errlines)
if self.trapSqlException:
self.lastError=errlines
if self.pool:
self.pool.lastError=errlines
adm.StopWaiting(adm.mainframe)
if self.conn and self.conn.closed:
self.disconnect()
if self.trapSqlException:
raise SqlException(cmd, errlines)
else:
raise exception
def isRunning(self):
return self.conn.poll() != psycopg2.extensions.POLL_OK
def GetCursor(self):
return pgCursor(self)
######################################################################
class pgCursor():
def __init__(self, conn):
conn.trapSqlException=True
self.conn=conn
self.cursor=self.conn.cursor
def __del__(self):
self.Close()
def SetThrowSqlException(self, how):
"""
SetThrowSqlException(bool)
If set to false, will throw psycopg exception instead of SqlException.
Use this to catch expected exception without GUI display
"""
self.conn.trapSqlException=how
def Close(self):
if self.conn:
# logger.trace(2, 4, "RELEASING %s", str(self.conn))
self.conn.inUse=False
self.conn=None
self.cursor=None
def GetPid(self):
return self.conn.conn.get_backend_pid()
def Quote(self, val):
return quoteValue(val, self)
def GetDescription(self):
if self.cursor.description:
return self.cursor.description
return []
def GetRowcount(self):
return self.cursor.rowcount
def FetchOne(self):
row=self.cursor.fetchone()
return row
# def Rollback(self):
# self.cursor.execute("ROLLBACK")
# self.cursor.wait("ROLLBACK")
#
# def Commit(self):
# self.cursor.execute("COMMIT")
# self.cursor.wait("COMMIT")
def execute(self, cmd, args=None):
if args:
if isinstance(args, list):
args=tuple(args)
elif isinstance(args, tuple):
pass
else:
args=(args,)
try:
self.cursor.execute(cmd, args)
except Exception as e:
print "EXcept", e, unicode(e)
self.conn._handleException(e)
def wait(self, spot=""):
return self.conn.wait(spot)
def ExecuteSet(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSet")
rowset=pgRowset(self)
logger.querylog(self.cursor.query, result="%d rows" % rowset.GetRowcount())
adm.StopWaiting(frame)
return rowset
except Exception as e:
adm.StopWaiting(frame, e.error)
raise e
def ExecuteList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getList()
return None
def ExecuteDictList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getDictList()
return None
def ExecuteRow(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteRow")
row=self.cursor.fetchone()
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
row=pgRow(self, row)
logger.querylog(self.cursor.query, result=unicode(row))
return row
return None
def Execute(self, cmd, args=None, spot=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("Execute")
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
rc=self.GetRowcount()
if spot: spot += " "
else: spot=""
logger.querylog(self.cursor.query, result=spot+ xlt("%d rows") % rc)
return rc
def ExecuteSingle(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSingle")
try:
row=self.cursor.fetchone()
except Exception as _e:
#print e
row=None
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
result=row[0]
logger.querylog(self.cursor.query, result="%s" % result)
return result
else:
logger.querylog(self.cursor.query, result=xlt("no result"))
return None
def Insert(self, cmd, returning=None):
if returning:
cmd += "\nRETURNING %s" % returning
rowset=self.ExecuteSet(cmd)
if not self.GetRowcount():
return None
result=[]
for row in rowset:
line=row.getTuple()
if len(line) > 1:
result.append(line)
else:
result.append(line[0])
if len(result) > 1:
return result
else:
return result[0]
else:
self.ExecuteSingle(cmd)
return self.cursor.lastrowid
def ExecuteDict(self, cmd, args=None):
set=self.ExecuteSet(cmd, args)
d={}
for row in set:
d[row[0]] = row[1]
return d
def ExecuteAsync(self, cmd, args=None):
worker=QueryWorker(self, cmd, args)
return worker
#############################################################################
class pgConnectionPool:
def __init__(self, node, dsn):
self.node=node
self.lastError=None
self.connections=[]
self.lock=threading.Lock()
self.dsn=dsn
# create first connection to make sure params are ok
conn=self.CreateConnection()
with self.lock:
self.connections.append(conn)
def __del__(self):
self.Disconnect()
def ServerVersion(self):
if not self.connections:
return None
v=self.connections[0].conn.server_version
return int(v/10000) + ((v%10000)/100)*0.1
def HasFailed(self):
return len(self.connections) == 0
def Disconnect(self):
for conn in self.connections:
conn.disconnect()
self.connections=[]
def RemoveConnection(self, conn):
try: self.connections.remove(conn)
except: pass
def GetCursor(self):
conn=None
with self.lock:
for c in self.connections:
if not c.inUse:
conn=c
# logger.trace(2, 4, "USING %s", str(c))
c.inUse=True
break
if not conn:
conn=self.CreateConnection()
# logger.trace(2, 4, "CREATING %s", str(c))
return conn.GetCursor()
def CreateConnection(self):
try:
conn=pgConnection(self.dsn, self)
return conn
except Exception as e:
self.lastError = unicode(e)
raise adm.ConnectionException(self.node, xlt("Connect"), self.lastError)
##########################################################
class QueryWorker(threading.Thread):
def __init__(self, cursor, cmd, args):
threading.Thread.__init__(self)
self.cursor=cursor
self.cmd=cmd
self.args=args
self.running=True
def __del__(self):
self.cancel()
self.cursor=None
def run(self):
self.cancelled=False
self.error=None
try:
self.cursor.execute(self.cmd, self.args)
self.cursor.wait("AsyncWorker")
except Exception as e:
self.error=e
self.running=False
def cancel(self):
if self.running:
self.cancelled=True
self.running=False
self.cursor.conn.conn.cancel()
def GetRowcount(self):
return self.cursor.GetRowcount()
def GetResult(self):
rs=None
try:
rs=pgRowset(self.cursor)
except:
pass
self.cursor=None
return rs
def IsRunning(self):
return self.running
def Cancel(self):
if self.running:
self.cancel()
#######################################################################
class pgQuery:
def __init__(self, tab=None, cursor=None):
self.columns=[]
self.vals=[]
self.tables=[]
self.where=[]
self.order=[]
self.group=[]
self.cursor=cursor
if tab:
self.tables.append(tab)
def quoteIdent(self, identifier):
return quoteIdent(identifier)
def SetCursor(self, cursor):
self.cursor=cursor
def AddCol(self, name, quoted=False):
if name:
if isinstance(name, list):
map(lambda x: self.AddCol(x, quoted), name)
else:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
def AddColVal(self, name, val, quoted=False):
if name:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
self.vals.append(val)
def AddJoin(self, tab):
if tab:
self.tables.append("JOIN %s" % tab)
def AddLeft(self, tab):
if tab:
self.tables.append("LEFT OUTER JOIN %s" % tab)
def AddWhere(self, where, val=None):
if where:
if val:
where="%s=%s" % (quoteIdent(where), quoteValue(val))
self.where.append(where)
def AddOrder(self, order, quoted=False):
if order:
if quoted:
order=quoteIdent(order)
self.order.append(order)
def AddGroup(self, group):
if group:
self.group.append(group)
def groupJoin(self, partList, sep=', ', breakLen=80):
result=[]
line=""
for part in partList:
if line: line += "%s%s" % (sep, part)
else: line=part
if len(line) > breakLen:
result.append(line)
line=""
if line:
result.append(line)
return ",\n ".join(result)
def SelectQueryString(self):
sql=["SELECT %s" % self.groupJoin(self.columns),
" FROM %s" % "\n ".join(self.tables) ]
if self.where:
sql.append(" WHERE %s" % "\n AND ".join(self.where))
if self.group:
sql.append(" GROUP BY %s" % ", ".join(self.group))
if self.order:
sql.append(" ORDER BY %s" % ", ".join(self.order))
return "\n".join(sql)
def Select(self):
return self.cursor.ExecuteSet(self.SelectQueryString())
def Insert(self, returning=None):
if len(self.tables) != 1:
raise Exception("pgQuery: INSERT with single table only")
sql=["INSERT INTO %s (%s)" % (self.tables[0], ",".join(self.columns))]
values=[]
for col in range(len(self.columns)):
values.append("%s" % quoteValue(self.vals[col], self.cursor))
sql.append(" VALUES (%s)" % self.groupJoin(values))
return self.cursor.Insert("\n".join(sql), returning)
def Update(self):
if len(self.tables) != 1:
raise Exception("pgQuery: UPDATE with single table only")
sql=["UPDATE %s" % self.tables[0]]
cols=[]
for col in range(len(self.columns)):
val=quoteValue(self.vals[col], self.cursor)
cols.append( "%s=%s" % ( self.columns[col], val ))
sql.append(" SET %s" % self.groupJoin(cols))
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="UPDATE")
def Delete(self):
if len(self.tables) != 1:
raise Exception("pgQuery: DELETE with single table only")
sql=["DELETE FROM %s" % self.tables[0]]
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="DELETE")
|
andreas-p/admin4
|
modPg/_pgsql.py
|
Python
|
apache-2.0
| 17,772
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromConstructor(test_util.TensorFlowTestCase):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TocoConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TocoConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TocoConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TocoConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testSizeNoneInvalid(self):
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testBatchSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testDumpGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantize(self):
np.random.seed(0)
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TocoConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TocoConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.post_training_quantize = True
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testExtendedMode(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.converter_mode = lite.ConverterMode.TOCO_EXTENDED_ALL
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Eager delegate before inference.',
str(error.exception))
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TocoConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
# TODO(nupurgarg): Test model loading in open source.
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
self._graph_def_file = resource_loader.get_path_to_datafile(
'testdata/tflite_graph.pb')
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TocoConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check case where input shape is None.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
class FromKerasFile(test_util.TensorFlowTestCase):
def setUp(self):
keras.backend.clear_session()
def _getSequentialModel(self):
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
return keras_file
def testSequentialModel(self):
"""Test a Sequential tf.keras model with default inputs."""
keras_file = self._getSequentialModel()
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
keras_file = self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_keras_model_file(
keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
keras_file = self._getSequentialModel()
# Passing in shape of invalid input array has no impact as long as all input
# arrays have a shape.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, input_shapes={'invalid-input': [2, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Passing in shape of valid input array.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
keras_file = self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_keras_model_file(
keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testFunctionalModel(self):
"""Test a Functional tf.keras model with default inputs."""
with session.Session().as_default():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
with session.Session().as_default():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.predict([input_a_np, input_b_np], batch_size=5)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
os.remove(keras_file)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input_a', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('input_b', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(2, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('dropout/Identity', output_details[1]['name'])
self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization'])
def testFunctionalSequentialModel(self):
"""Test a Functional tf.keras model containing a Sequential model."""
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model = keras.models.Model(model.input, model.output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
if __name__ == '__main__':
test.main()
|
xodus7/tensorflow
|
tensorflow/contrib/lite/python/lite_test.py
|
Python
|
apache-2.0
| 45,063
|
from sqlite3 import connect
class DB:
def __init__(self,path="people.db"):
self.conn = connect(path)
self.c = self.conn.cursor()
def getList(self):
self.c.execute('SELECT * FROM people')
return self.c.fetchall()
def close(self):
self.conn.close()
class Date:
def __init__(self, y, m, d):
self.y = int(y)
self.m = int(m) if m else None
self.d = int(d) if d else None
def check (self, other, attr):
if getattr(self,attr) == None or getattr(other,attr) == None:
return 1
if getattr(self,attr) < getattr(other,attr):
return 1
if getattr(self,attr) > getattr(other,attr):
return -1
return 0
__le__ = lambda self, other: True if 1 == (self.check(other,"y") or self.check(other,"m") or self.check(other,"d") or 1) else False
__str__ = lambda self: str(self.y) + "-" + str(self.m) + "-" + str(self.d)
class OrcID:
def __init__(self, id, start, stop):
self.id = id
self.start = Date(*start.split("-"))
self.stop = Date(*stop.split("-"))
getID = lambda self: "-".join([self.id[4 * i : 4 * (i + 1)] for i in range(4)])
__str__ = lambda self: self.getID() + ": " + str(self.start) + " - " + str(self.stop)
if __name__ == "__main__":
db = DB()
orcs = [OrcID(*t) for t in db.getList()]
db.close()
for orc in orcs:
print("Do something with",orc)
class WorkSummary:
def __init__(self, path, title, date):
self.path = path
self.title = title
self.date = date
__lt__ = lambda self, other: self.date.y < other.date.y or (self.date.y == other.date.y and self.title < other.title)
__eq__ = lambda self, other: self.date.y == other.date.y and self.title == other.title
__str__ = lambda self: self.title + ": " + str(self.date)
|
ScaDS/ORC-Schlange
|
Tutorial/02 DATE, ORCID and WorkSummary Class(Filter 1)/__main__.py
|
Python
|
apache-2.0
| 1,679
|
import os
import time
import const
import pickle
'''
Simulator class that manage the needed loop to make a
Qlearn agent to train as long as the epsilon decay
is bigger than the tolerance
'''
class Simulator(object):
# Set up environment variables
const.MAX_FLIPS = 4
const.NUM_LOOK = 3
def __init__(self, game,debug=False):
self._game = game
const.DEBUG = debug
# stats dict records the w/l of each player
self.stats = {}
self.stats['results'] = [0,0,0]
def run(self, tolerance=0.05, n_test=0):
"""
Run a simulation of the environment.
'tolerance' is the minimum epsilon necessary to begin testing (if enabled)
'n_test' is the number of testing trials simulated
Note that the minimum number of training trials is always 20.
"""
a = self.getQlearnAgent(self._game)
total_trials = 1
testing = False
trial = 1
while True:
# flip testing when more than 20 trials and
# epsilon less than tolerance
if not testing:
if total_trials > 20:
if a.learning:
if a.epsilon < tolerance:
#if total_trials > 3000:
testing = True
trial = 1
self.stats['results'] = [0,0,0]
else:
testing = True
trial = 1
else:
if trial > n_test:
break
while not self._game.checkWin():
if testing:
self._game.drawScreen()
mv = self._game.getCurPlayer().choose_move(self._game.getState())
self._game.update(mv)
# Prompt for replay
if self._game._winner is None:
msg = "The game was a draw!"
self.stats['results'][2] += 1
else:
msg = str(self._game._winner) + " wins!"
self.stats['results'][self._game._player_pair.index(self._game._winner)] += 1
print (msg)
self._game.drawScreen()
self._game.reset(testing=testing)
#a.reset(testing=testing)
print ("/-------------------------")
if testing:
print ("| Testing trial {}".format(trial))
print ('Epsilon: ', a.epsilon)
else:
print ("| Training trial {}".format(trial))
print ('Epsilon: ', a.epsilon)
print ("\-------------------------")
self.printResults()
total_trials = total_trials + 1
trial = trial + 1
print (len(a.Q))
print ('Training size: ', total_trials - trial)
self.saveGeneratedDict(a)
def getQlearnAgent(self, game):
'''
Returns the agent that has a QLEARN agent declared
Exits the simulation in the case none of them is
declared as such
'''
if game._player_pair[0]._name == 'QLEARN1':
a = game._player_pair[0]
return a
elif game._player_pair[1]._name == 'QLEARN2':
a = game._player_pair[1]
return a
elif game._player_pair[0]._name == 'MINIMAXQ1':
a = game._player_pair[0]
return a
elif game._player_pair[1]._name == 'MINIMAXQ2':
a = game._player_pair[1]
return a
else:
print('One of the player agents need to be Qlearn')
os.sys.exit()
def printResults(self):
'''
Print the results out of the game from the stats class
'''
p1 = self._game._player_pair[0]
p2 = self._game._player_pair[1]
results = self.stats['results']
if results[0] > results[2]:
msg = str(p1)
elif results[0] < results[2]:
msg = str(p2)
else:
msg = 'It was a tie'
msg = ''
msg = '\n' + msg + '\n'
msg += str(p1) + ': ' + '/'.join(map(str, results))
msg += '\n'
msg += str(p2) + ': ' + '/'.join(map(str, (results[1], results[0], results[2])))
msg += '\n'
print(msg)
def saveGeneratedDict(self, player):
'''
Save dictionary object generated during the training in order
to be able to use the player without training it again.
'''
save_dir = './reinforcement_dict/' + self.getGameNames() + '.pickle'
with open(save_dir, 'wb') as handle:
pickle.dump(player.Q, handle, protocol=pickle.HIGHEST_PROTOCOL)
def getGameNames(self):
'''
Return game names, for both payer one and player two
'''
return const.GAME_NAME
|
armandosrz/UdacityNanoMachine
|
capstone_project/Conniption/src/simulator.py
|
Python
|
apache-2.0
| 4,855
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x021000
_REQUEST_MESSAGE_TYPE = 135168
# hex: 0x021001
_RESPONSE_MESSAGE_TYPE = 135169
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_TTL_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_REFERENCE_ID_OFFSET = _REQUEST_TTL_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, thread_id, ttl, reference_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TTL_OFFSET, ttl)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, reference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)
|
hazelcast/hazelcast-python-client
|
hazelcast/protocol/codec/multi_map_lock_codec.py
|
Python
|
apache-2.0
| 1,157
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import unittest
import mock
from pumphouse.tasks import base
class Tenant(base.Resource):
@base.task
def create(self):
self.data = self.env.cloud.keystone.tenants.create(self.data)
@base.task
def delete(self):
self.env.cloud.keystone.tenants.delete(self.data["id"])
class Server(base.Resource):
@classmethod
def get_id_for(cls, data):
try:
tenant_id = data["tenant_id"]
except KeyError:
tenant_id = Tenant.get_id_for(data["tenant"])
return (tenant_id, super(Server, cls).get_id_for(data))
@Tenant()
def tenant(self):
if "tenant_id" in self.data:
return {"id": self.data["tenant_id"]}
elif "tenant" in self.data:
return self.data["tenant"]
else:
assert False
@base.task(requires=[tenant.create])
def create(self):
server = self.data.copy()
server.pop("tenant")
server["tenant_id"] = self.tenant["id"]
self.data = self.env.cloud.nova.servers.create(server)
@base.task(before=[tenant.delete])
def delete(self):
self.env.cloud.nova.servers.delete(self.data)
class TenantWorkload(base.Resource):
@Tenant()
def tenant(self):
return self.data
@base.Collection(Server)
def servers(self):
return self.env.cloud.nova.servers.list(search_opts={
"all_tenants": 1,
"tenant_id": self.tenant["id"],
})
delete = base.task(name="delete",
requires=[tenant.delete, servers.each().delete])
create = base.task(requires=[tenant.create, servers.each().create])
class TasksBaseTestCase(unittest.TestCase):
def test_create_tasks(self):
tenant = {"name": "tenant1"}
created_tenant = dict(tenant, id="tenid1")
servers = [
{"name": "server1", "tenant": tenant},
{"name": "server2", "tenant": tenant},
]
env = mock.Mock(plugins={})
env.cloud.keystone.tenants.create.return_value = created_tenant
runner = base.TaskflowRunner(env)
workload = runner.get_resource(TenantWorkload, tenant)
workload.servers = servers
runner.add(workload.create)
runner.run()
self.assertEqual(
env.cloud.keystone.tenants.create.call_args_list,
[mock.call(tenant)],
)
self.assertItemsEqual(
env.cloud.nova.servers.create.call_args_list,
map(mock.call, [
{"tenant_id": created_tenant["id"], "name": server["name"]}
for server in servers
]),
)
self.assertEqual(len(env.method_calls), 1 + len(servers))
def test_delete_tasks(self):
tenant = {"id": "tenid1", "name": "tenant1"}
servers = [
{"id": "servid1", "name": "server1", "tenant_id": tenant["id"]},
{"id": "servid2", "name": "server2", "tenant_id": tenant["id"]},
]
env = mock.Mock(plugins={})
env.cloud.nova.servers.list.return_value = servers
env.cloud.keystone.tenants.get.return_value = tenant
runner = base.TaskflowRunner(env)
workload = runner.get_resource(TenantWorkload, tenant)
runner.add(workload.delete)
runner.run()
self.assertEqual(
env.cloud.nova.servers.list.call_args_list,
[mock.call(search_opts={
"all_tenants": 1,
"tenant_id": tenant["id"],
})],
)
self.assertEqual(
env.cloud.keystone.tenants.delete.call_args_list,
[mock.call(tenant["id"])],
)
self.assertItemsEqual(
env.cloud.nova.servers.delete.call_args_list,
map(mock.call, servers),
)
self.assertEqual(len(env.method_calls), 2 + len(servers))
|
Mirantis/pumphouse
|
tests/unit/test_tasks_base.py
|
Python
|
apache-2.0
| 4,450
|
import serial
from threading import Thread
from menu import Menu
from time import sleep
import settings as s
import pygame
from pygame import *
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=.025)
current_mark = None
def worker():
global current_mark
while True:
read_serial = ser.readline().strip()
if len(read_serial) > 0:
current_mark = str(read_serial,'utf-8')
print('inside worker thread: ' + str(current_mark))
t = Thread(target=worker)
t.daemon = True
class SceneMananger:
def __init__(self):
self.go_to(Menu())
def go_to(self, scene):
self.scene = scene
self.scene.manager = self
def main():
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.init()
pygame.mixer.quit()
pygame.mixer.init(44100, -16, 2, 512)
screen = pygame.display.set_mode((s.DISPLAY_WIDTH, s.DISPLAY_HEIGHT), pygame.FULLSCREEN)
timer = pygame.time.Clock()
running = True
manager = SceneMananger()
t.start()
global current_mark
while running:
if pygame.event.get(QUIT):
running = False
return
if current_mark is not None:
manager.scene.on_gpio(current_mark)
manager.scene.render_all()
current_mark = None
manager.scene.on_event(pygame.event.get())
manager.scene.render_all()
timer.tick(60)
if __name__ == "__main__":
main()
|
DrewMcCarthy/dartboard
|
scenemanager.py
|
Python
|
apache-2.0
| 1,324
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import mock
from keystone.common import dependency
from keystone import config
from keystone.contrib.revoke import model
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import test_backend_sql
CONF = config.CONF
def _new_id():
return uuid.uuid4().hex
def _future_time():
expire_delta = datetime.timedelta(seconds=1000)
future_time = timeutils.utcnow() + expire_delta
return future_time
def _past_time():
expire_delta = datetime.timedelta(days=-1000)
past_time = timeutils.utcnow() + expire_delta
return past_time
def _sample_blank_token():
issued_delta = datetime.timedelta(minutes=-2)
issued_at = timeutils.utcnow() + issued_delta
token_data = model.blank_token_data(issued_at)
return token_data
def _matches(event, token_values):
"""See if the token matches the revocation event.
Used as a secondary check on the logic to Check
By Tree Below: This is abrute force approach to checking.
Compare each attribute from the event with the corresponding
value from the token. If the event does not have a value for
the attribute, a match is still possible. If the event has a
value for the attribute, and it does not match the token, no match
is possible, so skip the remaining checks.
:param event one revocation event to match
:param token_values dictionary with set of values taken from the
token
:returns if the token matches the revocation event, indicating the
token has been revoked
"""
# The token has three attributes that can match the user_id
if event.user_id is not None:
for attribute_name in ['user_id', 'trustor_id', 'trustee_id']:
if event.user_id == token_values[attribute_name]:
break
else:
return False
# The token has two attributes that can match the domain_id
if event.domain_id is not None:
for attribute_name in ['user_domain_id', 'project_domain_id']:
if event.domain_id == token_values[attribute_name]:
break
else:
return False
# If any one check does not match, the while token does
# not match the event. The numerous return False indicate
# that the token is still valid and short-circuits the
# rest of the logic.
attribute_names = ['project_id',
'expires_at', 'trust_id', 'consumer_id',
'access_token_id']
for attribute_name in attribute_names:
if getattr(event, attribute_name) is not None:
if (getattr(event, attribute_name) !=
token_values[attribute_name]):
return False
if event.role_id is not None:
roles = token_values['roles']
for role in roles:
if event.role_id == role:
break
else:
return False
if token_values['issued_at'] > event.issued_before:
return False
return True
@dependency.requires('revoke_api')
class RevokeTests(object):
def test_list(self):
self.revoke_api.revoke_by_user(user_id=1)
self.assertEqual(1, len(self.revoke_api.get_events()))
self.revoke_api.revoke_by_user(user_id=2)
self.assertEqual(2, len(self.revoke_api.get_events()))
def test_list_since(self):
self.revoke_api.revoke_by_user(user_id=1)
self.revoke_api.revoke_by_user(user_id=2)
past = timeutils.utcnow() - datetime.timedelta(seconds=1000)
self.assertEqual(2, len(self.revoke_api.get_events(past)))
future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
self.assertEqual(0, len(self.revoke_api.get_events(future)))
def test_past_expiry_are_removed(self):
user_id = 1
self.revoke_api.revoke_by_expiration(user_id, _future_time())
self.assertEqual(1, len(self.revoke_api.get_events()))
event = model.RevokeEvent()
event.revoked_at = _past_time()
self.revoke_api.revoke(event)
self.assertEqual(1, len(self.revoke_api.get_events()))
@mock.patch.object(timeutils, 'utcnow')
def test_expired_events_removed_validate_token_success(self, mock_utcnow):
def _sample_token_values():
token = _sample_blank_token()
token['expires_at'] = timeutils.isotime(_future_time(),
subsecond=True)
return token
now = datetime.datetime.utcnow()
now_plus_2h = now + datetime.timedelta(hours=2)
mock_utcnow.return_value = now
# Build a token and validate it. This will seed the cache for the
# future 'synchronize' call.
token_values = _sample_token_values()
user_id = _new_id()
self.revoke_api.revoke_by_user(user_id)
token_values['user_id'] = user_id
self.assertRaises(exception.TokenNotFound,
self.revoke_api.check_token,
token_values)
# Move our clock forward by 2h, build a new token and validate it.
# 'synchronize' should now be exercised and remove old expired events
mock_utcnow.return_value = now_plus_2h
self.revoke_api.revoke_by_expiration(_new_id(), now_plus_2h)
# should no longer throw an exception
self.revoke_api.check_token(token_values)
class SqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(SqlRevokeTests, self).config_overrides()
self.config_fixture.config(
group='revoke',
driver='keystone.contrib.revoke.backends.sql.Revoke')
self.config_fixture.config(
group='token',
provider='keystone.token.providers.pki.Provider',
revoke_by_id=False)
class KvsRevokeTests(tests.TestCase, RevokeTests):
def config_overrides(self):
super(KvsRevokeTests, self).config_overrides()
self.config_fixture.config(
group='revoke',
driver='keystone.contrib.revoke.backends.kvs.Revoke')
self.config_fixture.config(
group='token',
provider='keystone.token.providers.pki.Provider',
revoke_by_id=False)
def setUp(self):
super(KvsRevokeTests, self).setUp()
self.load_backends()
class RevokeTreeTests(tests.TestCase):
def setUp(self):
super(RevokeTreeTests, self).setUp()
self.events = []
self.tree = model.RevokeTree()
self._sample_data()
def _sample_data(self):
user_ids = []
project_ids = []
role_ids = []
for i in range(0, 3):
user_ids.append(_new_id())
project_ids.append(_new_id())
role_ids.append(_new_id())
project_tokens = []
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[1]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[1]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[0]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[1]
project_tokens[i]['roles'] = [role_ids[0]]
token_to_revoke = _sample_blank_token()
token_to_revoke['user_id'] = user_ids[0]
token_to_revoke['project_id'] = project_ids[0]
token_to_revoke['roles'] = [role_ids[0]]
self.project_tokens = project_tokens
self.user_ids = user_ids
self.project_ids = project_ids
self.role_ids = role_ids
self.token_to_revoke = token_to_revoke
def _assertTokenRevoked(self, token_data):
self.assertTrue(any([_matches(e, token_data) for e in self.events]))
return self.assertTrue(self.tree.is_revoked(token_data),
'Token should be revoked')
def _assertTokenNotRevoked(self, token_data):
self.assertFalse(any([_matches(e, token_data) for e in self.events]))
return self.assertFalse(self.tree.is_revoked(token_data),
'Token should not be revoked')
def _revoke_by_user(self, user_id):
return self.tree.add_event(
model.RevokeEvent(user_id=user_id))
def _revoke_by_expiration(self, user_id, expires_at):
event = self.tree.add_event(
model.RevokeEvent(user_id=user_id,
expires_at=expires_at))
self.events.append(event)
return event
def _revoke_by_grant(self, role_id, user_id=None,
domain_id=None, project_id=None):
event = self.tree.add_event(
model.RevokeEvent(user_id=user_id,
role_id=role_id,
domain_id=domain_id,
project_id=project_id))
self.events.append(event)
return event
def _revoke_by_user_and_project(self, user_id, project_id):
event = self.tree.add_event(
model.RevokeEvent(project_id=project_id,
user_id=user_id))
self.events.append(event)
return event
def _revoke_by_project_role_assignment(self, project_id, role_id):
event = self.tree.add_event(
model.RevokeEvent(project_id=project_id,
role_id=role_id))
self.events.append(event)
return event
def _revoke_by_domain_role_assignment(self, domain_id, role_id):
event = self.tree.add_event(
model.RevokeEvent(domain_id=domain_id,
role_id=role_id))
self.events.append(event)
return event
def _user_field_test(self, field_name):
user_id = _new_id()
event = self._revoke_by_user(user_id)
self.events.append(event)
token_data_u1 = _sample_blank_token()
token_data_u1[field_name] = user_id
self._assertTokenRevoked(token_data_u1)
token_data_u2 = _sample_blank_token()
token_data_u2[field_name] = _new_id()
self._assertTokenNotRevoked(token_data_u2)
self.tree.remove_event(event)
self.events.remove(event)
self._assertTokenNotRevoked(token_data_u1)
def test_revoke_by_user(self):
self._user_field_test('user_id')
def test_revoke_by_user_matches_trustee(self):
self._user_field_test('trustee_id')
def test_revoke_by_user_matches_trustor(self):
self._user_field_test('trustor_id')
def test_by_user_expiration(self):
future_time = _future_time()
user_id = 1
event = self._revoke_by_expiration(user_id, future_time)
token_data_1 = _sample_blank_token()
token_data_1['user_id'] = user_id
token_data_1['expires_at'] = future_time
self._assertTokenRevoked(token_data_1)
token_data_2 = _sample_blank_token()
token_data_2['user_id'] = user_id
expire_delta = datetime.timedelta(seconds=2000)
future_time = timeutils.utcnow() + expire_delta
token_data_2['expires_at'] = future_time
self._assertTokenNotRevoked(token_data_2)
self.removeEvent(event)
self._assertTokenNotRevoked(token_data_1)
def removeEvent(self, event):
self.events.remove(event)
self.tree.remove_event(event)
def test_by_project_grant(self):
token_to_revoke = self.token_to_revoke
tokens = self.project_tokens
self._assertTokenNotRevoked(token_to_revoke)
for token in tokens:
self._assertTokenNotRevoked(token)
event = self._revoke_by_grant(role_id=self.role_ids[0],
user_id=self.user_ids[0],
project_id=self.project_ids[0])
self._assertTokenRevoked(token_to_revoke)
for token in tokens:
self._assertTokenNotRevoked(token)
self.removeEvent(event)
self._assertTokenNotRevoked(token_to_revoke)
for token in tokens:
self._assertTokenNotRevoked(token)
token_to_revoke['roles'] = [self.role_ids[0],
self.role_ids[1],
self.role_ids[2]]
event = self._revoke_by_grant(role_id=self.role_ids[0],
user_id=self.user_ids[0],
project_id=self.project_ids[0])
self._assertTokenRevoked(token_to_revoke)
self.removeEvent(event)
self._assertTokenNotRevoked(token_to_revoke)
event = self._revoke_by_grant(role_id=self.role_ids[1],
user_id=self.user_ids[0],
project_id=self.project_ids[0])
self._assertTokenRevoked(token_to_revoke)
self.removeEvent(event)
self._assertTokenNotRevoked(token_to_revoke)
self._revoke_by_grant(role_id=self.role_ids[0],
user_id=self.user_ids[0],
project_id=self.project_ids[0])
self._revoke_by_grant(role_id=self.role_ids[1],
user_id=self.user_ids[0],
project_id=self.project_ids[0])
self._revoke_by_grant(role_id=self.role_ids[2],
user_id=self.user_ids[0],
project_id=self.project_ids[0])
self._assertTokenRevoked(token_to_revoke)
def test_by_project_and_user_and_role(self):
user_id1 = _new_id()
user_id2 = _new_id()
project_id = _new_id()
self.events.append(self._revoke_by_user(user_id1))
self.events.append(
self._revoke_by_user_and_project(user_id2, project_id))
token_data = _sample_blank_token()
token_data['user_id'] = user_id2
token_data['project_id'] = project_id
self._assertTokenRevoked(token_data)
def _assertEmpty(self, collection):
return self.assertEqual(0, len(collection), "collection not empty")
def _assertEventsMatchIteration(self, turn):
self.assertEqual(1, len(self.tree.revoke_map))
self.assertEqual(turn + 1, len(self.tree.revoke_map
['trust_id=*']
['consumer_id=*']
['access_token_id=*']))
# two different functions add domain_ids, +1 for None
self.assertEqual(2 * turn + 1, len(self.tree.revoke_map
['trust_id=*']
['consumer_id=*']
['access_token_id=*']
['expires_at=*']))
# two different functions add project_ids, +1 for None
self.assertEqual(2 * turn + 1, len(self.tree.revoke_map
['trust_id=*']
['consumer_id=*']
['access_token_id=*']
['expires_at=*']
['domain_id=*']))
# 10 users added
self.assertEqual(turn, len(self.tree.revoke_map
['trust_id=*']
['consumer_id=*']
['access_token_id=*']
['expires_at=*']
['domain_id=*']
['project_id=*']))
def test_cleanup(self):
events = self.events
self._assertEmpty(self.tree.revoke_map)
expiry_base_time = _future_time()
for i in range(0, 10):
events.append(
self._revoke_by_user(_new_id()))
args = (_new_id(),
expiry_base_time + datetime.timedelta(seconds=i))
events.append(
self._revoke_by_expiration(*args))
self.assertEqual(i + 2, len(self.tree.revoke_map
['trust_id=*']
['consumer_id=*']
['access_token_id=*']),
'adding %s to %s' % (args,
self.tree.revoke_map))
events.append(
self._revoke_by_project_role_assignment(_new_id(), _new_id()))
events.append(
self._revoke_by_domain_role_assignment(_new_id(), _new_id()))
events.append(
self._revoke_by_domain_role_assignment(_new_id(), _new_id()))
events.append(
self._revoke_by_user_and_project(_new_id(), _new_id()))
self._assertEventsMatchIteration(i + 1)
for event in self.events:
self.tree.remove_event(event)
self._assertEmpty(self.tree.revoke_map)
|
reeshupatel/demo
|
keystone/tests/test_revoke.py
|
Python
|
apache-2.0
| 17,990
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime, timedelta
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from desktop.lib.paths import get_apps_root
from hbased.ttypes import AlreadyExists
from hbase.api import HbaseApi
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
help = 'Create and fill some demo tables in the first configured cluster.'
def handle_noargs(self, **options):
api = HbaseApi()
cluster_name = api.getClusters()[0]['name'] # Currently pick first configured cluster
# Check connectivity
api.connectCluster(cluster_name)
self.create_analytics_table(api, cluster_name)
self.load_analytics_table(api, cluster_name)
self.create_binary_table(api, cluster_name)
self.load_binary_table(api, cluster_name)
def create_analytics_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'analytics_demo', [{'properties': {'name': 'hour'}}, {'properties': {'name': 'day'}}, {'properties': {'name': 'total'}}])
except AlreadyExists:
pass
def load_analytics_table(self, api, cluster_name):
table_data = os.path.join(get_apps_root(), 'hbase', 'example', 'analytics', 'hbase-analytics.tsv')
api.bulkUpload(cluster_name, 'analytics_demo', open(table_data))
def create_binary_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'document_demo', [{'properties': {'name': 'doc'}}])
except AlreadyExists:
pass
def load_binary_table(self, api, cluster_name):
today = datetime.now().strftime('%Y%m%d')
tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
api.putRow(cluster_name, 'document_demo', today, {'doc:txt': 'Hue is awesome!'})
api.putRow(cluster_name, 'document_demo', today, {'doc:json': '{"user": "hue", "coolness": "extra"}'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I like HBase</xml>'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I LOVE HBase</xml>'})
root = os.path.join(get_apps_root(), 'hbase', 'example', 'documents')
api.putRow(cluster_name, 'document_demo', today, {'doc:img': open(root + '/hue-logo.png', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:html': open(root + '/gethue.com.html', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:pdf': open(root + '/gethue.pdf', "rb").read()})
|
yongshengwang/builthue
|
apps/hbase/src/hbase/management/commands/hbase_setup.py
|
Python
|
apache-2.0
| 3,274
|
response.title = settings.title
response.subtitle = settings.subtitle
response.meta.author = '%(author)s <%(author_email)s>' % settings
response.meta.keywords = settings.keywords
response.meta.description = settings.description
response.menu = [
(T('Index'),URL('default','index')==URL(),URL('default','index'),[]),
(T('Video'),URL('default','video')==URL(),URL('default','video'),[]),
(T('Info'), False, "http://www.oarstack.com/2015/04/oarstack-analysis/", []),
]
response.google_analytics_id="UA-52135133-2"
|
sloe/analyseapp
|
models/menu.py
|
Python
|
apache-2.0
| 523
|
from .fixtures import * # noqa, pylint: disable=wildcard-import
|
efiop/dvc
|
dvc/testing/conftest.py
|
Python
|
apache-2.0
| 65
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# General builder script for staging, packaging, deploying,
# and debugging Titanium Mobile applications on Android
#
import os, sys, subprocess, shutil, time, signal, string, platform, re, glob, hashlib, imp, inspect
import run, avd, prereq, zipfile, tempfile, fnmatch, codecs, traceback, simplejson
from mako.template import Template
from os.path import splitext
from compiler import Compiler
from os.path import join, splitext, split, exists
from shutil import copyfile
from xml.dom.minidom import parseString
from tilogger import *
from datetime import datetime, timedelta
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
top_support_dir = os.path.dirname(template_dir)
sys.path.append(top_support_dir)
sys.path.append(os.path.join(top_support_dir, 'common'))
sys.path.append(os.path.join(top_support_dir, 'module'))
from tiapp import *
from android import Android
from androidsdk import AndroidSDK
from deltafy import Deltafy, Delta
from css import csscompiler
from module import ModuleDetector
import localecompiler
import fastdev
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn', 'CVS'];
android_avd_hw = {'hw.camera': 'yes', 'hw.gps':'yes'}
res_skips = ['style']
log = None
# Copied from frameworks/base/tools/aapt/Package.cpp
uncompressed_types = [
".jpg", ".jpeg", ".png", ".gif",
".wav", ".mp2", ".mp3", ".ogg", ".aac",
".mpg", ".mpeg", ".mid", ".midi", ".smf", ".jet",
".rtttl", ".imy", ".xmf", ".mp4", ".m4a",
".m4v", ".3gp", ".3gpp", ".3g2", ".3gpp2",
".amr", ".awb", ".wma", ".wmv"
]
MIN_API_LEVEL = 7
def render_template_with_tiapp(template_text, tiapp_obj):
t = Template(template_text)
return t.render(tiapp=tiapp_obj)
def remove_ignored_dirs(dirs):
for d in dirs:
if d in ignoreDirs:
dirs.remove(d)
# ZipFile.extractall introduced in Python 2.6, so this is workaround for earlier
# versions
def zip_extractall(zfile, target_dir):
file_infos = zfile.infolist()
for info in file_infos:
if info.file_size > 0:
file_path = os.path.join(target_dir, os.path.normpath(info.filename))
parent_path = os.path.dirname(file_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
out_file = open(file_path, "wb")
out_file.write(zfile.read(info.filename))
out_file.close()
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def pipe(args1,args2):
p1 = subprocess.Popen(args1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(args2, stdin=p1.stdout, stdout=subprocess.PIPE)
return p2.communicate()[0]
def read_properties(propFile, separator=":= "):
propDict = dict()
for propLine in propFile:
propDef = propLine.strip()
if len(propDef) == 0:
continue
if propDef[0] in ( '!', '#' ):
continue
punctuation= [ propDef.find(c) for c in separator ] + [ len(propDef) ]
found= min( [ pos for pos in punctuation if pos != -1 ] )
name= propDef[:found].rstrip()
value= propDef[found:].lstrip(separator).rstrip()
propDict[name]= value
propFile.close()
return propDict
def info(msg):
log.info(msg)
def debug(msg):
log.debug(msg)
def warn(msg):
log.warn(msg)
def trace(msg):
log.trace(msg)
def error(msg):
log.error(msg)
def copy_all(source_folder, dest_folder, ignore_dirs=[], ignore_files=[], ignore_exts=[], one_time_msg=""):
msg_shown = False
for root, dirs, files in os.walk(source_folder):
for d in dirs:
if d in ignore_dirs:
dirs.remove(d)
for f in files:
if f in ignore_files:
continue
ext = os.path.splitext(f)[1]
if ext in ignore_exts:
continue
if one_time_msg and not msg_shown:
info(one_time_msg)
msg_shown = True
from_ = os.path.join(root, f)
to_ = from_.replace(source_folder, dest_folder, 1)
to_directory = os.path.split(to_)[0]
if not os.path.exists(to_directory):
os.makedirs(to_directory)
shutil.copyfile(from_, to_)
def remove_orphaned_files(source_folder, target_folder):
is_res = source_folder.endswith('Resources') or source_folder.endswith('Resources' + os.sep)
for root, dirs, files in os.walk(target_folder):
for f in files:
full = os.path.join(root, f)
rel = full.replace(target_folder, '')
if rel[0] == os.sep:
rel = rel[1:]
is_orphan = False
if not os.path.exists(os.path.join(source_folder, rel)):
is_orphan = True
# But it could be under android/... too (platform-specific)
if is_orphan and is_res:
if os.path.exists(os.path.join(source_folder, 'android', rel)):
is_orphan = False
if is_orphan:
os.remove(full)
def is_resource_drawable(path):
if re.search("android/images/(high|medium|low|res-[^/]+)/", path.replace(os.sep, "/")):
return True
else:
return False
def resource_drawable_folder(path):
if not is_resource_drawable(path):
return None
else:
pattern = r'/android/images/(high|medium|low|res-[^/]+)/'
match = re.search(pattern, path.replace(os.sep, "/"))
if not match.groups():
return None
folder = match.groups()[0]
if re.match('high|medium|low', folder):
return 'drawable-%sdpi' % folder[0]
else:
return 'drawable-%s' % folder.replace('res-', '')
class Builder(object):
def __init__(self, name, sdk, project_dir, support_dir, app_id):
self.top_dir = project_dir
self.project_tiappxml = os.path.join(self.top_dir,'tiapp.xml')
self.project_dir = os.path.join(project_dir,'build','android')
self.res_dir = os.path.join(self.project_dir,'res')
self.platform_dir = os.path.join(project_dir, 'platform', 'android')
self.project_src_dir = os.path.join(self.project_dir, 'src')
self.project_gen_dir = os.path.join(self.project_dir, 'gen')
self.name = name
self.app_id = app_id
self.support_dir = support_dir
self.compiled_files = []
self.force_rebuild = False
self.debugger_host = None
self.debugger_port = -1
self.fastdev_port = -1
self.fastdev = False
temp_tiapp = TiAppXML(self.project_tiappxml)
if temp_tiapp and temp_tiapp.android and 'tool-api-level' in temp_tiapp.android:
self.tool_api_level = int(temp_tiapp.android['tool-api-level'])
else:
self.tool_api_level = MIN_API_LEVEL
self.sdk = AndroidSDK(sdk, self.tool_api_level)
self.tiappxml = temp_tiapp
self.set_java_commands()
# start in 1.4, you no longer need the build/android directory
# if missing, we'll create it on the fly
if not os.path.exists(self.project_dir) or not os.path.exists(os.path.join(self.project_dir,'AndroidManifest.xml')):
android_creator = Android(name, app_id, self.sdk, None, self.java)
parent_dir = os.path.dirname(self.top_dir)
if os.path.exists(self.top_dir):
android_creator.create(parent_dir, project_dir=self.top_dir, build_time=True)
else:
android_creator.create(parent_dir)
self.force_rebuild = True
sys.stdout.flush()
# we place some files in the users home
if platform.system() == "Windows":
self.home_dir = os.path.join(os.environ['USERPROFILE'], '.titanium')
self.android_home_dir = os.path.join(os.environ['USERPROFILE'], '.android')
else:
self.home_dir = os.path.join(os.path.expanduser('~'), '.titanium')
self.android_home_dir = os.path.join(os.path.expanduser('~'), '.android')
if not os.path.exists(self.home_dir):
os.makedirs(self.home_dir)
self.sdcard = os.path.join(self.home_dir,'android2.sdcard')
self.classname = Android.strip_classname(self.name)
def set_java_commands(self):
self.jarsigner = "jarsigner"
self.javac = "javac"
self.java = "java"
if platform.system() == "Windows":
if os.environ.has_key("JAVA_HOME"):
home_jarsigner = os.path.join(os.environ["JAVA_HOME"], "bin", "jarsigner.exe")
home_javac = os.path.join(os.environ["JAVA_HOME"], "bin", "javac.exe")
home_java = os.path.join(os.environ["JAVA_HOME"], "bin", "java.exe")
found = True
# TODO Document this path and test properly under windows
if os.path.exists(home_jarsigner):
self.jarsigner = home_jarsigner
else:
# Expected but not found
found = False
error("Required jarsigner not found")
if os.path.exists(home_javac):
self.javac = home_javac
else:
error("Required javac not found")
found = False
if os.path.exists(home_java):
self.java = home_java
else:
error("Required java not found")
found = False
if found == False:
error("One or more required files not found - please check your JAVA_HOME environment variable")
sys.exit(1)
else:
found = False
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, 'jarsigner.exe')) and os.path.exists(os.path.join(path, 'javac.exe')):
self.jarsigner = os.path.join(path, 'jarsigner.exe')
self.javac = os.path.join(path, 'javac.exe')
self.java = os.path.join(path, 'java.exe')
found = True
break
if not found:
error("Error locating JDK: set $JAVA_HOME or put javac and jarsigner on your $PATH")
sys.exit(1)
def wait_for_home(self, type):
max_wait = 20
attempts = 0
while True:
processes = self.sdk.list_processes(['-%s' % type])
found_home = False
for process in processes:
if process["name"] == "android.process.acore":
found_home = True
break
if found_home:
break
attempts += 1
if attempts == max_wait:
error("Timed out waiting for android.process.acore")
return False
time.sleep(1)
return True
def wait_for_device(self, type):
debug("Waiting for device to be ready ...")
t = time.time()
max_wait = 30
max_zero = 6
attempts = 0
zero_attempts = 0
timed_out = True
no_devices = False
while True:
devices = self.sdk.list_devices()
trace("adb devices returned %s devices/emulators" % len(devices))
if len(devices) > 0:
found = False
for device in devices:
if type == "e" and device.is_emulator() and not device.is_offline(): found = True
elif type == "d" and device.is_device(): found = True
if found:
timed_out = False
break
else: zero_attempts += 1
try: time.sleep(5) # for some reason KeyboardInterrupts get caught here from time to time
except KeyboardInterrupt: pass
attempts += 1
if attempts == max_wait:
break
elif zero_attempts == max_zero:
no_devices = True
break
if timed_out:
if type == "e":
device = "emulator"
extra_message = "you may need to close the emulator and try again"
else:
device = "device"
extra_message = "you may try reconnecting the USB cable"
error("Timed out waiting for %s to be ready, %s" % (device, extra_message))
if no_devices:
sys.exit(1)
return False
debug("Device connected... (waited %d seconds)" % (attempts*5))
duration = time.time() - t
debug("waited %f seconds on emulator to get ready" % duration)
if duration > 1.0:
info("Waiting for the Android Emulator to become available")
return self.wait_for_home(type)
#time.sleep(20) # give it a little more time to get installed
return True
def create_avd(self,avd_id,avd_skin):
name = "titanium_%s_%s" % (avd_id,avd_skin)
name = name.replace(' ', '_')
if not os.path.exists(self.home_dir):
os.makedirs(self.home_dir)
avd_path = os.path.join(self.android_home_dir, 'avd')
my_avd = os.path.join(avd_path,"%s.avd" % name)
own_sdcard = os.path.join(self.home_dir, '%s.sdcard' % name)
if not os.path.exists(my_avd) or os.path.exists(own_sdcard):
# starting with 1.7.2, when we create a new avd, give it its own
# SDCard as well.
self.sdcard = own_sdcard
if not os.path.exists(self.sdcard):
info("Creating 64M SD card for use in Android emulator")
run.run([self.sdk.get_mksdcard(), '64M', self.sdcard])
if not os.path.exists(my_avd):
info("Creating new Android Virtual Device (%s %s)" % (avd_id,avd_skin))
inputgen = os.path.join(template_dir,'input.py')
pipe([sys.executable, inputgen], [self.sdk.get_android(), '--verbose', 'create', 'avd', '--name', name, '--target', avd_id, '-s', avd_skin, '--force', '--sdcard', self.sdcard])
inifile = os.path.join(my_avd,'config.ini')
inifilec = open(inifile,'r').read()
inifiledata = open(inifile,'w')
inifiledata.write(inifilec)
# TODO - Document options
for hw_option in android_avd_hw.keys():
inifiledata.write("%s=%s\n" % (hw_option, android_avd_hw[hw_option]))
inifiledata.close()
return name
def run_emulator(self,avd_id,avd_skin):
info("Launching Android emulator...one moment")
debug("From: " + self.sdk.get_emulator())
debug("SDCard: " + self.sdcard)
debug("AVD ID: " + avd_id)
debug("AVD Skin: " + avd_skin)
debug("SDK: " + sdk_dir)
# make sure adb is running on windows, else XP can lockup the python
# process when adb runs first time
if platform.system() == "Windows":
run.run([self.sdk.get_adb(), "start-server"], True, ignore_output=True)
devices = self.sdk.list_devices()
for device in devices:
if device.is_emulator() and device.get_port() == 5560:
info("Emulator is running.")
sys.exit()
# this will create an AVD on demand or re-use existing one if already created
avd_name = self.create_avd(avd_id,avd_skin)
# start the emulator
emulator_cmd = [
self.sdk.get_emulator(),
'-avd',
avd_name,
'-port',
'5560',
'-sdcard',
self.sdcard,
'-logcat',
'*:d,*',
'-no-boot-anim',
'-partition-size',
'128' # in between nexusone and droid
]
debug(' '.join(emulator_cmd))
p = subprocess.Popen(emulator_cmd)
def handler(signum, frame):
debug("signal caught: %d" % signum)
if not p == None:
debug("calling emulator kill on %d" % p.pid)
if platform.system() == "Windows":
os.system("taskkill /F /T /PID %i" % p.pid)
else:
os.kill(p.pid, signal.SIGTERM)
if platform.system() != "Windows":
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGABRT, handler)
signal.signal(signal.SIGTERM, handler)
# give it some time to exit prematurely
time.sleep(1)
rc = p.poll()
if rc != None:
handler(3,None)
sys.exit(rc)
# wait for the emulator to finish
try:
rc = p.wait()
except OSError:
handler(3,None)
info("Android Emulator has exited")
sys.exit(rc)
def check_file_exists(self, path):
output = self.run_adb('shell', 'ls', path)
if output != None:
if output.find("No such file or directory") == -1 \
and output.find("error: device offline") == -1:
return True
return False
def is_app_installed(self):
return self.check_file_exists('/data/app/%s*.apk' % self.app_id)
def are_resources_installed(self):
return self.check_file_exists(self.sdcard_resources+'/app.js')
def include_path(self, path, isfile):
if not isfile and os.path.basename(path) in ignoreDirs: return False
elif isfile and os.path.basename(path) in ignoreFiles: return False
return True
def warn_dupe_drawable_folders(self):
tocheck = ('high', 'medium', 'low')
image_parent = os.path.join(self.top_dir, 'Resources', 'android', 'images')
for check in tocheck:
if os.path.exists(os.path.join(image_parent, check)) and os.path.exists(os.path.join(image_parent, 'res-%sdpi' % check[0])):
warn('You have both an android/images/%s folder and an android/images/res-%sdpi folder. Files from both of these folders will end up in res/drawable-%sdpi. If two files are named the same, there is no guarantee which one will be copied last and therefore be the one the application uses. You should use just one of these folders to avoid conflicts.' % (check, check[0], check[0]))
def copy_module_platform_folders(self):
for module in self.modules:
platform_folder = os.path.join(module.path, 'platform', 'android')
if os.path.exists(platform_folder):
copy_all(platform_folder, self.project_dir, one_time_msg="Copying platform-specific files for '%s' module" % module.manifest.name)
def copy_project_platform_folder(self, ignore_dirs=[], ignore_files=[]):
if not os.path.exists(self.platform_dir):
return
copy_all(self.platform_dir, self.project_dir, ignore_dirs, ignore_files, one_time_msg="Copying platform-specific files ...")
def copy_resource_drawables(self):
debug('Processing Android resource drawables')
def make_resource_drawable_filename(orig):
normalized = orig.replace(os.sep, "/")
matches = re.search("/android/images/(high|medium|low|res-[^/]+)/(?P<chopped>.*$)", normalized)
if matches and matches.groupdict() and 'chopped' in matches.groupdict():
chopped = matches.groupdict()['chopped'].lower()
for_hash = chopped
if for_hash.endswith('.9.png'):
for_hash = for_hash[:-6] + '.png'
extension = ""
without_extension = chopped
if re.search("\\..*$", chopped):
if chopped.endswith('.9.png'):
extension = '9.png'
without_extension = chopped[:-6]
else:
extension = chopped.split(".")[-1]
without_extension = chopped[:-(len(extension)+1)]
cleaned_without_extension = re.sub(r'[^a-z0-9_]', '_', without_extension)
cleaned_extension = re.sub(r'[^a-z0-9\._]', '_', extension)
result = cleaned_without_extension[:80] + "_" + hashlib.md5(for_hash).hexdigest()[:10]
if extension:
result += "." + extension
return result
else:
trace("Regexp for resource drawable file %s failed" % orig)
return None
def delete_resource_drawable(orig):
folder = resource_drawable_folder(orig)
res_file = os.path.join(self.res_dir, folder, make_resource_drawable_filename(orig))
if os.path.exists(res_file):
try:
trace("DELETING FILE: %s" % res_file)
os.remove(res_file)
except:
warn('Unable to delete %s: %s. Execution will continue.' % (res_file, sys.exc_info()[0]))
def copy_resource_drawable(orig):
partial_folder = resource_drawable_folder(orig)
if not partial_folder:
trace("Could not copy %s; resource folder not determined" % orig)
return
dest_folder = os.path.join(self.res_dir, partial_folder)
dest_filename = make_resource_drawable_filename(orig)
if dest_filename is None:
return
dest = os.path.join(dest_folder, dest_filename)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
trace("COPYING FILE: %s => %s" % (orig, dest))
shutil.copy(orig, dest)
fileset = []
if self.force_rebuild or self.deploy_type == 'production' or \
(self.js_changed and not self.fastdev):
for root, dirs, files in os.walk(os.path.join(self.top_dir, "Resources")):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
path = os.path.join(root, f)
if is_resource_drawable(path) and f != 'default.png':
fileset.append(path)
else:
if self.project_deltas:
for delta in self.project_deltas:
path = delta.get_path()
if is_resource_drawable(path):
if delta.get_status() == Delta.DELETED:
delete_resource_drawable(path)
else:
fileset.append(path)
if len(fileset) == 0:
return False
for f in fileset:
copy_resource_drawable(f)
return True
def copy_project_resources(self):
info("Copying project resources..")
resources_dir = os.path.join(self.top_dir, 'Resources')
android_resources_dir = os.path.join(resources_dir, 'android')
self.project_deltafy = Deltafy(resources_dir, include_callback=self.include_path)
self.project_deltas = self.project_deltafy.scan()
self.js_changed = False
tiapp_delta = self.project_deltafy.scan_single_file(self.project_tiappxml)
self.tiapp_changed = tiapp_delta is not None
if self.tiapp_changed or self.force_rebuild:
info("Detected tiapp.xml change, forcing full re-build...")
# force a clean scan/copy when the tiapp.xml has changed
self.project_deltafy.clear_state()
self.project_deltas = self.project_deltafy.scan()
# rescan tiapp.xml so it doesn't show up as created next time around
self.project_deltafy.scan_single_file(self.project_tiappxml)
def strip_slash(s):
if s[0:1]=='/' or s[0:1]=='\\': return s[1:]
return s
def make_relative(path, relative_to, prefix=None):
relative_path = strip_slash(path[len(relative_to):])
if prefix is not None:
return os.path.join(prefix, relative_path)
return relative_path
for delta in self.project_deltas:
path = delta.get_path()
if re.search("android/images/(high|medium|low|res-[^/]+)/", path.replace(os.sep, "/")):
continue # density images are handled later
if delta.get_status() == Delta.DELETED and path.startswith(android_resources_dir):
shared_path = path.replace(android_resources_dir, resources_dir, 1)
if os.path.exists(shared_path):
dest = make_relative(shared_path, resources_dir, self.assets_resources_dir)
trace("COPYING FILE: %s => %s (platform-specific file was removed)" % (shared_path, dest))
shutil.copy(shared_path, dest)
if delta.get_status() != Delta.DELETED:
if path.startswith(android_resources_dir):
dest = make_relative(path, android_resources_dir, self.assets_resources_dir)
else:
# don't copy it if there is an android-specific file
if os.path.exists(path.replace(resources_dir, android_resources_dir, 1)):
continue
dest = make_relative(path, resources_dir, self.assets_resources_dir)
# check to see if this is a compiled file and if so, don't copy
if dest in self.compiled_files: continue
if path.startswith(os.path.join(resources_dir, "iphone")) or path.startswith(os.path.join(resources_dir, "blackberry")):
continue
parent = os.path.dirname(dest)
if not os.path.exists(parent):
os.makedirs(parent)
trace("COPYING %s FILE: %s => %s" % (delta.get_status_str(), path, dest))
shutil.copy(path, dest)
if (path.startswith(resources_dir) or path.startswith(android_resources_dir)) and path.endswith(".js"):
self.js_changed = True
# copy to the sdcard in development mode
if self.sdcard_copy and self.app_installed and (self.deploy_type == 'development' or self.deploy_type == 'test'):
if path.startswith(android_resources_dir):
relative_path = make_relative(delta.get_path(), android_resources_dir)
else:
relative_path = make_relative(delta.get_path(), resources_dir)
relative_path = relative_path.replace("\\", "/")
self.run_adb('push', delta.get_path(), "%s/%s" % (self.sdcard_resources, relative_path))
def generate_android_manifest(self,compiler):
self.generate_localizations()
# NOTE: these are built-in permissions we need -- we probably need to refine when these are needed too
permissions_required = ['INTERNET','ACCESS_WIFI_STATE','ACCESS_NETWORK_STATE', 'WRITE_EXTERNAL_STORAGE']
GEO_PERMISSION = [ 'ACCESS_COARSE_LOCATION', 'ACCESS_FINE_LOCATION', 'ACCESS_MOCK_LOCATION']
CONTACTS_PERMISSION = ['READ_CONTACTS']
VIBRATE_PERMISSION = ['VIBRATE']
CAMERA_PERMISSION = ['CAMERA']
WALLPAPER_PERMISSION = ['SET_WALLPAPER']
# this is our module method to permission(s) trigger - for each method on the left, require the permission(s) on the right
permission_mapping = {
# GEO
'Geolocation.watchPosition' : GEO_PERMISSION,
'Geolocation.getCurrentPosition' : GEO_PERMISSION,
'Geolocation.watchHeading' : GEO_PERMISSION,
'Geolocation.getCurrentHeading' : GEO_PERMISSION,
# MEDIA
'Media.vibrate' : VIBRATE_PERMISSION,
'Media.showCamera' : CAMERA_PERMISSION,
# CONTACTS
'Contacts.createContact' : CONTACTS_PERMISSION,
'Contacts.saveContact' : CONTACTS_PERMISSION,
'Contacts.removeContact' : CONTACTS_PERMISSION,
'Contacts.addContact' : CONTACTS_PERMISSION,
'Contacts.getAllContacts' : CONTACTS_PERMISSION,
'Contacts.showContactPicker' : CONTACTS_PERMISSION,
'Contacts.showContacts' : CONTACTS_PERMISSION,
'Contacts.getPersonByID' : CONTACTS_PERMISSION,
'Contacts.getPeopleWithName' : CONTACTS_PERMISSION,
'Contacts.getAllPeople' : CONTACTS_PERMISSION,
'Contacts.getAllGroups' : CONTACTS_PERMISSION,
'Contacts.getGroupByID' : CONTACTS_PERMISSION,
# WALLPAPER
'Media.Android.setSystemWallpaper' : WALLPAPER_PERMISSION,
}
VIDEO_ACTIVITY = """<activity
android:name="ti.modules.titanium.media.TiVideoActivity"
android:configChanges="keyboardHidden|orientation"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen"
android:launchMode="singleTask"
/>"""
MAP_ACTIVITY = """<activity
android:name="ti.modules.titanium.map.TiMapActivity"
android:configChanges="keyboardHidden|orientation"
android:launchMode="singleTask"
/>
<uses-library android:name="com.google.android.maps" />"""
FACEBOOK_ACTIVITY = """<activity
android:name="ti.modules.titanium.facebook.FBActivity"
android:theme="@android:style/Theme.Translucent.NoTitleBar"
/>"""
CAMERA_ACTIVITY = """<activity
android:name="ti.modules.titanium.media.TiCameraActivity"
android:configChanges="keyboardHidden|orientation"
android:theme="@android:style/Theme.Translucent.NoTitleBar.Fullscreen"
/>"""
activity_mapping = {
# MEDIA
'Media.createVideoPlayer' : VIDEO_ACTIVITY,
'Media.showCamera' : CAMERA_ACTIVITY,
# MAPS
'Map.createView' : MAP_ACTIVITY,
# FACEBOOK
'Facebook.setup' : FACEBOOK_ACTIVITY,
'Facebook.login' : FACEBOOK_ACTIVITY,
'Facebook.createLoginButton' : FACEBOOK_ACTIVITY,
}
# this is a map of our APIs to ones that require Google APIs to be available on the device
google_apis = {
"Map.createView" : True
}
activities = []
# figure out which permissions we need based on the used module methods
for mn in compiler.module_methods:
try:
perms = permission_mapping[mn]
if perms:
for perm in perms:
try:
permissions_required.index(perm)
except:
permissions_required.append(perm)
except:
pass
try:
mappings = activity_mapping[mn]
try:
if google_apis[mn] and not self.google_apis_supported:
warn("Google APIs detected but a device has been selected that doesn't support them. The API call to Titanium.%s will fail using '%s'" % (mn,my_avd['name']))
continue
except:
pass
try:
activities.index(mappings)
except:
activities.append(mappings)
except:
pass
# Javascript-based activities defined in tiapp.xml
if self.tiapp and self.tiapp.android and 'activities' in self.tiapp.android:
tiapp_activities = self.tiapp.android['activities']
for key in tiapp_activities:
activity = tiapp_activities[key]
if not 'url' in activity:
continue
activity_name = self.app_id + '.' + activity['classname']
activity_str = '<activity \n\t\t\tandroid:name="%s"' % activity_name
for subkey in activity:
if subkey not in ('nodes', 'name', 'url', 'options', 'classname', 'android:name'):
activity_str += '\n\t\t\t%s="%s"' % (subkey, activity[subkey])
if 'android:config' not in activity:
activity_str += '\n\t\t\tandroid:configChanges="keyboardHidden|orientation"'
if 'nodes' in activity:
activity_str += '>'
for node in activity['nodes']:
activity_str += '\n\t\t\t\t' + node.toxml()
activities.append(activity_str + '\n\t\t</activity>\n')
else:
activities.append(activity_str + '\n\t\t/>\n')
activities = set(activities)
services = []
# Javascript-based services defined in tiapp.xml
if self.tiapp and self.tiapp.android and 'services' in self.tiapp.android:
tiapp_services = self.tiapp.android['services']
for key in tiapp_services:
service = tiapp_services[key]
if not 'url' in service:
continue
service_name = self.app_id + '.' + service['classname']
service_str = '<service \n\t\t\tandroid:name="%s"' % service_name
for subkey in service:
if subkey not in ('nodes', 'service_type', 'type', 'name', 'url', 'options', 'classname', 'android:name'):
service_str += '\n\t\t\t%s="%s"' % (subkey, service[subkey])
if 'nodes' in service:
service_str += '>'
for node in service['nodes']:
service_str += '\n\t\t\t\t' + node.toxml()
services.append(service_str + '\n\t\t</service>\n')
else:
services.append(service_str + '\n\t\t/>\n')
self.use_maps = False
self.res_changed = False
iconname = self.tiapp.properties['icon']
iconpath = os.path.join(self.assets_resources_dir, iconname)
iconext = os.path.splitext(iconpath)[1]
res_drawable_dest = os.path.join(self.project_dir, 'res','drawable')
if not os.path.exists(res_drawable_dest):
os.makedirs(res_drawable_dest)
default_icon = os.path.join(self.support_resources_dir, 'default.png')
dest_icon = os.path.join(res_drawable_dest, 'appicon%s' % iconext)
if Deltafy.needs_update(iconpath, dest_icon):
self.res_changed = True
debug("copying app icon: %s" % iconpath)
shutil.copy(iconpath, dest_icon)
elif Deltafy.needs_update(default_icon, dest_icon):
self.res_changed = True
debug("copying default app icon")
shutil.copy(default_icon, dest_icon)
# make our Titanium theme for our icon
res_values_dir = os.path.join(self.project_dir, 'res','values')
if not os.path.exists(res_values_dir):
os.makedirs(res_values_dir)
theme_xml = os.path.join(res_values_dir,'theme.xml')
if not os.path.exists(theme_xml):
self.res_changed = True
debug('generating theme.xml')
theme_file = open(theme_xml, 'w')
theme_flags = "Theme"
# We need to treat the default values for fulscreen and
# navbar-hidden the same as android.py does -- false for both.
theme_fullscreen = False
theme_navbarhidden = False
if (self.tiapp.properties.get("fullscreen") == "true" or
self.tiapp.properties.get("statusbar-hidden") == "true"):
theme_fullscreen = True
elif self.tiapp.properties.get("navbar-hidden") == "true":
theme_navbarhidden = True
if theme_fullscreen:
theme_flags += ".NoTitleBar.Fullscreen"
elif theme_navbarhidden:
theme_flags += ".NoTitleBar"
# Wait, one exception. If you want the notification area (very
# top of screen) hidden, but want the title bar in the app,
# there's no theme for that. So we have to use the default theme (no flags)
# and when the application code starts running, the adjustments are then made.
# Only do this when the properties are explicitly set, so as to avoid changing
# old default behavior.
if theme_flags.endswith('.Fullscreen') and \
self.tiapp.properties.get("navbar-hidden") == 'false' and \
('fullscreen' in self.tiapp.explicit_properties or \
'statusbar-hidden' in self.tiapp.explicit_properties) and \
'navbar-hidden' in self.tiapp.explicit_properties:
theme_flags = 'Theme'
TITANIUM_THEME="""<?xml version="1.0" encoding="utf-8"?>
<resources>
<style name="Theme.Titanium" parent="android:%s">
<item name="android:windowBackground">@drawable/background</item>
</style>
</resources>
""" % theme_flags
theme_file.write(TITANIUM_THEME)
theme_file.close()
# create our background image which acts as splash screen during load
resources_dir = os.path.join(self.top_dir, 'Resources')
android_images_dir = os.path.join(resources_dir, 'android', 'images')
# look for density-specific default.png's first
if os.path.exists(android_images_dir):
pattern = r'/android/images/(high|medium|low|res-[^/]+)/default.png'
for root, dirs, files in os.walk(android_images_dir):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
path = os.path.join(root, f)
if re.search(pattern, path.replace(os.sep, "/")):
res_folder = resource_drawable_folder(path)
debug('found %s splash screen at %s' % (res_folder, path))
dest_path = os.path.join(self.res_dir, res_folder)
dest_file = os.path.join(dest_path, 'background.png')
if not os.path.exists(dest_path):
os.makedirs(dest_path)
if Deltafy.needs_update(path, dest_file):
self.res_changed = True
debug('copying %s splash screen to %s' % (path, dest_file))
shutil.copy(path, dest_file)
default_png = os.path.join(self.assets_resources_dir, 'default.png')
support_default_png = os.path.join(self.support_resources_dir, 'default.png')
background_png = os.path.join(self.project_dir, 'res','drawable','background.png')
if os.path.exists(default_png) and Deltafy.needs_update(default_png, background_png):
self.res_changed = True
debug("found splash screen at %s" % os.path.abspath(default_png))
shutil.copy(default_png, background_png)
elif Deltafy.needs_update(support_default_png, background_png):
self.res_changed = True
debug("copying default splash screen")
shutil.copy(support_default_png, background_png)
android_manifest = os.path.join(self.project_dir, 'AndroidManifest.xml')
android_manifest_to_read = android_manifest
# NOTE: allow the user to use their own custom AndroidManifest if they put a file named
# AndroidManifest.xml in platform/android, in which case all bets are off
is_custom = False
# Catch people who may have it in project root (un-released 1.4.x android_native_refactor branch users)
if os.path.exists(os.path.join(self.top_dir, 'AndroidManifest.xml')):
warn('AndroidManifest.xml file in the project root is ignored. Move it to platform/android if you want it to be your custom manifest.')
android_custom_manifest = os.path.join(self.project_dir, 'AndroidManifest.custom.xml')
if not os.path.exists(android_custom_manifest):
android_custom_manifest = os.path.join(self.platform_dir, 'AndroidManifest.xml')
else:
warn('Use of AndroidManifest.custom.xml is deprecated. Please put your custom manifest as "AndroidManifest.xml" in the "platform/android" directory if you do not need to compile for versions < 1.5')
if os.path.exists(android_custom_manifest):
android_manifest_to_read = android_custom_manifest
is_custom = True
info("Detected custom ApplicationManifest.xml -- no Titanium version migration supported")
default_manifest_contents = self.android.render_android_manifest()
custom_manifest_contents = None
if is_custom:
custom_manifest_contents = open(android_manifest_to_read,'r').read()
manifest_xml = ''
def get_manifest_xml(tiapp, template_obj=None):
xml = ''
if 'manifest' in tiapp.android_manifest:
for manifest_el in tiapp.android_manifest['manifest']:
# since we already track permissions in another way, go ahead and us e that
if manifest_el.nodeName == 'uses-permission' and manifest_el.hasAttribute('android:name'):
if manifest_el.getAttribute('android:name').split('.')[-1] not in permissions_required:
perm_val = manifest_el.getAttribute('android:name')
if template_obj is not None and "${" in perm_val:
perm_val = render_template_with_tiapp(perm_val, template_obj)
permissions_required.append(perm_val)
elif manifest_el.nodeName not in ('supports-screens', 'uses-sdk'):
this_xml = manifest_el.toprettyxml()
if template_obj is not None and "${" in this_xml:
this_xml = render_template_with_tiapp(this_xml, template_obj)
xml += this_xml
return xml
application_xml = ''
def get_application_xml(tiapp, template_obj=None):
xml = ''
if 'application' in tiapp.android_manifest:
for app_el in tiapp.android_manifest['application']:
this_xml = app_el.toxml()
if template_obj is not None and "${" in this_xml:
this_xml = render_template_with_tiapp(this_xml, template_obj)
xml += this_xml
return xml
# add manifest / application entries from tiapp.xml
manifest_xml += get_manifest_xml(self.tiapp)
application_xml += get_application_xml(self.tiapp)
# add manifest / application entries from modules
for module in self.modules:
if module.xml == None: continue
manifest_xml += get_manifest_xml(module.xml, self.tiapp)
application_xml += get_application_xml(module.xml, self.tiapp)
# build the permissions XML based on the permissions detected
permissions_required = set(permissions_required)
permissions_required_xml = ""
for p in permissions_required:
if '.' not in p:
permissions_required_xml+="<uses-permission android:name=\"android.permission.%s\"/>\n\t" % p
else:
permissions_required_xml+="<uses-permission android:name=\"%s\"/>\n\t" % p
def fill_manifest(manifest_source):
ti_activities = '<!-- TI_ACTIVITIES -->'
ti_permissions = '<!-- TI_PERMISSIONS -->'
ti_manifest = '<!-- TI_MANIFEST -->'
ti_application = '<!-- TI_APPLICATION -->'
ti_services = '<!-- TI_SERVICES -->'
manifest_source = manifest_source.replace(ti_activities,"\n\n\t\t".join(activities))
manifest_source = manifest_source.replace(ti_services,"\n\n\t\t".join(services))
manifest_source = manifest_source.replace(ti_permissions,permissions_required_xml)
if len(manifest_xml) > 0:
manifest_source = manifest_source.replace(ti_manifest, manifest_xml)
if len(application_xml) > 0:
manifest_source = manifest_source.replace(ti_application, application_xml)
return manifest_source
default_manifest_contents = fill_manifest(default_manifest_contents)
# if a custom uses-sdk or supports-screens has been specified via tiapp.xml
# <android><manifest>..., we need to replace the ones in the generated
# default manifest
supports_screens_node = None
uses_sdk_node = None
if 'manifest' in self.tiapp.android_manifest:
for node in self.tiapp.android_manifest['manifest']:
if node.nodeName == 'uses-sdk':
uses_sdk_node = node
elif node.nodeName == 'supports-screens':
supports_screens_node = node
if supports_screens_node or uses_sdk_node or ('manifest-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['manifest-attributes'].length) or ('application-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['application-attributes'].length):
dom = parseString(default_manifest_contents)
def replace_node(olddom, newnode):
nodes = olddom.getElementsByTagName(newnode.nodeName)
retval = False
if nodes:
olddom.documentElement.replaceChild(newnode, nodes[0])
retval = True
return retval
if supports_screens_node:
if not replace_node(dom, supports_screens_node):
dom.documentElement.insertBefore(supports_screens_node, dom.documentElement.firstChild.nextSibling)
if uses_sdk_node:
replace_node(dom, uses_sdk_node)
def set_attrs(element, new_attr_set):
for k in new_attr_set.keys():
if element.hasAttribute(k):
element.removeAttribute(k)
element.setAttribute(k, new_attr_set.get(k).value)
if 'manifest-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['manifest-attributes'].length:
set_attrs(dom.documentElement, self.tiapp.android_manifest['manifest-attributes'])
if 'application-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['application-attributes'].length:
set_attrs(dom.getElementsByTagName('application')[0], self.tiapp.android_manifest['application-attributes'])
default_manifest_contents = dom.toxml()
if application_xml:
# If the tiapp.xml <manifest><application> section was not empty, it could be
# that user put in <activity> entries that duplicate our own,
# such as if they want a custom theme on TiActivity. So we should delete any dupes.
dom = parseString(default_manifest_contents)
package_name = dom.documentElement.getAttribute('package')
manifest_activities = dom.getElementsByTagName('activity')
activity_names = []
nodes_to_delete = []
for manifest_activity in manifest_activities:
if manifest_activity.hasAttribute('android:name'):
activity_name = manifest_activity.getAttribute('android:name')
if activity_name.startswith('.'):
activity_name = package_name + activity_name
if activity_name in activity_names:
nodes_to_delete.append(manifest_activity)
else:
activity_names.append(activity_name)
if nodes_to_delete:
for node_to_delete in nodes_to_delete:
node_to_delete.parentNode.removeChild(node_to_delete)
default_manifest_contents = dom.toxml()
if custom_manifest_contents:
custom_manifest_contents = fill_manifest(custom_manifest_contents)
new_manifest_contents = None
android_manifest_gen = android_manifest + '.gen'
if custom_manifest_contents:
new_manifest_contents = custom_manifest_contents
# Write the would-be default as well so user can see
# some of the auto-gen'd insides of it if they need/want.
amf = open(android_manifest + '.gen', 'w')
amf.write(default_manifest_contents)
amf.close()
else:
new_manifest_contents = default_manifest_contents
if os.path.exists(android_manifest_gen):
os.remove(android_manifest_gen)
manifest_changed = False
old_contents = None
if os.path.exists(android_manifest):
old_contents = open(android_manifest, 'r').read()
if new_manifest_contents != old_contents:
trace("Writing out AndroidManifest.xml")
amf = open(android_manifest,'w')
amf.write(new_manifest_contents)
amf.close()
manifest_changed = True
if self.res_changed or manifest_changed:
res_dir = os.path.join(self.project_dir, 'res')
output = run.run([self.aapt, 'package', '-m', '-J', self.project_gen_dir, '-M', android_manifest, '-S', res_dir, '-I', self.android_jar],
warning_regex=r'skipping')
r_file = os.path.join(self.project_gen_dir, self.app_id.replace('.', os.sep), 'R.java')
if not os.path.exists(r_file) or (self.res_changed and output == None):
error("Error generating R.java from manifest")
sys.exit(1)
return manifest_changed
def generate_stylesheet(self):
update_stylesheet = False
resources_dir = os.path.join(self.top_dir, 'Resources')
project_gen_pkg_dir = os.path.join(self.project_gen_dir, self.app_id.replace('.', os.sep))
app_stylesheet = os.path.join(project_gen_pkg_dir, 'ApplicationStylesheet.java')
if not os.path.exists(app_stylesheet):
update_stylesheet = True
else:
for root, dirs, files in os.walk(resources_dir):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
if f.endswith(".jss"):
absolute_path = os.path.join(root, f)
if Deltafy.needs_update(absolute_path, app_stylesheet):
update_stylesheet = True
break
if not update_stylesheet:
return
cssc = csscompiler.CSSCompiler(resources_dir, 'android', self.app_id)
if not os.path.exists(project_gen_pkg_dir):
os.makedirs(project_gen_pkg_dir)
debug("app stylesheet => %s" % app_stylesheet)
asf = codecs.open(app_stylesheet, 'w', 'utf-8')
asf.write(cssc.code)
asf.close()
def generate_localizations(self):
# compile localization files
localecompiler.LocaleCompiler(self.name,self.top_dir,'android',sys.argv[1]).compile()
# fix un-escaped single-quotes and full-quotes
offending_pattern = '[^\\\\][\'"]'
for root, dirs, files in os.walk(self.res_dir):
remove_ignored_dirs(dirs)
for filename in files:
if filename in ignoreFiles or not filename.endswith('.xml'):
continue
full_path = os.path.join(root, filename)
f = codecs.open(full_path, 'r', 'utf-8')
contents = f.read()
f.close()
if not re.search(r"<string ", contents):
continue
doc = parseString(contents.encode("utf-8"))
string_nodes = doc.getElementsByTagName('string')
if len(string_nodes) == 0:
continue
made_change = False
for string_node in string_nodes:
if not string_node.hasChildNodes():
continue
string_child = string_node.firstChild
if string_child.nodeType == string_child.CDATA_SECTION_NODE or string_child.nodeType == string_child.TEXT_NODE:
string_value = string_child.nodeValue
if not re.search(offending_pattern, string_value):
continue
offenders = re.findall(offending_pattern, string_value)
if offenders:
for offender in offenders:
string_value = string_value.replace(offender, offender[0] + "\\" + offender[-1:])
made_change = True
string_child.nodeValue = string_value
if made_change:
new_contents = doc.toxml()
f = codecs.open(full_path, 'w', 'utf-8')
f.write(new_contents)
f.close()
def recurse(self, paths, file_glob=None):
if paths == None: yield None
if not isinstance(paths, list): paths = [paths]
for path in paths:
for root, dirs, files in os.walk(path):
remove_ignored_dirs(dirs)
for filename in files:
if filename in ignoreFiles:
continue
if file_glob != None:
if not fnmatch.fnmatch(filename, file_glob): continue
yield os.path.join(root, filename)
def generate_aidl(self):
# support for android remote interfaces in platform/android/src
framework_aidl = self.sdk.platform_path('framework.aidl')
aidl_args = [self.sdk.get_aidl(), '-p' + framework_aidl, '-I' + self.project_src_dir, '-o' + self.project_gen_dir]
for aidl_file in self.recurse(self.project_src_dir, '*.aidl'):
run.run(aidl_args + [aidl_file])
def build_generated_classes(self):
src_list = []
self.module_jars = []
class_delta = timedelta(seconds=1)
for java_file in self.recurse([self.project_src_dir, self.project_gen_dir], '*.java'):
if self.project_src_dir in java_file:
relative_path = java_file[len(self.project_src_dir)+1:]
else:
relative_path = java_file[len(self.project_gen_dir)+1:]
class_file = os.path.join(self.classes_dir, relative_path.replace('.java', '.class'))
if Deltafy.needs_update(java_file, class_file) > 0:
# the file list file still needs each file escaped apparently
debug("adding %s to javac build list" % java_file)
src_list.append('"%s"' % java_file.replace("\\", "\\\\"))
if len(src_list) == 0:
# No sources are older than their classfile counterparts, we can skip javac / dex
return False
classpath = os.pathsep.join([self.android_jar, os.pathsep.join(self.android_jars)])
project_module_dir = os.path.join(self.top_dir,'modules','android')
for module in self.modules:
if module.jar == None: continue
self.module_jars.append(module.jar)
classpath = os.pathsep.join([classpath, module.jar])
module_lib = module.get_resource('lib')
for jar in glob.glob(os.path.join(module_lib, '*.jar')):
self.module_jars.append(jar)
classpath = os.pathsep.join([classpath, jar])
if len(self.module_jars) > 0:
# kroll-apt.jar is needed for modules
classpath = os.pathsep.join([classpath, self.kroll_apt_jar])
if self.deploy_type != 'production':
classpath = os.pathsep.join([classpath,
os.path.join(self.support_dir, 'lib', 'titanium-verify.jar'),
os.path.join(self.support_dir, 'lib', 'titanium-debug.jar')])
debug("Building Java Sources: " + " ".join(src_list))
javac_command = [self.javac, '-encoding', 'utf8',
'-classpath', classpath, '-d', self.classes_dir, '-proc:none',
'-sourcepath', self.project_src_dir,
'-sourcepath', self.project_gen_dir]
(src_list_osfile, src_list_filename) = tempfile.mkstemp()
src_list_file = os.fdopen(src_list_osfile, 'w')
src_list_file.write("\n".join(src_list))
src_list_file.close()
javac_command.append('@' + src_list_filename)
(out, err, javac_process) = run.run(javac_command, ignore_error=True, return_error=True, return_process=True)
os.remove(src_list_filename)
if javac_process.returncode != 0:
error("Error(s) compiling generated Java code")
error(str(err))
sys.exit(1)
return True
def create_unsigned_apk(self, resources_zip_file):
unsigned_apk = os.path.join(self.project_dir, 'bin', 'app-unsigned.apk')
self.apk_updated = False
apk_modified = None
if os.path.exists(unsigned_apk):
apk_modified = Deltafy.get_modified_datetime(unsigned_apk)
debug("creating unsigned apk: " + unsigned_apk)
# copy existing resources into the APK
apk_zip = zipfile.ZipFile(unsigned_apk, 'w', zipfile.ZIP_DEFLATED)
def skip_jar_path(path):
ext = os.path.splitext(path)[1]
if path.endswith('/'): return True
if path.startswith('META-INF/'): return True
if path.split('/')[-1].startswith('.'): return True
if ext == '.class': return True
if 'org/appcelerator/titanium/bindings' in path and ext == '.json': return True
def compression_type(path):
ext = os.path.splitext(path)[1]
if ext in uncompressed_types:
return zipfile.ZIP_STORED
return zipfile.ZIP_DEFLATED
def zipinfo(path):
info = zipfile.ZipInfo(path)
info.compress_type = compression_type(path)
return info
def is_modified(path):
return apk_modified is None or Deltafy.needs_update_timestamp(path, apk_modified)
def zip_contains(zip, entry):
try:
zip.getinfo(entry)
except:
return False
return True
if is_modified(resources_zip_file):
self.apk_updated = True
resources_zip = zipfile.ZipFile(resources_zip_file)
for path in resources_zip.namelist():
if skip_jar_path(path): continue
debug("from resource zip => " + path)
apk_zip.writestr(zipinfo(path), resources_zip.read(path))
resources_zip.close()
# add classes.dex
if is_modified(self.classes_dex) or not zip_contains(apk_zip, 'classes.dex'):
apk_zip.write(self.classes_dex, 'classes.dex')
# add all resource files from the project
for root, dirs, files in os.walk(self.project_src_dir):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
if os.path.splitext(f)[1] != '.java':
absolute_path = os.path.join(root, f)
relative_path = os.path.join(root[len(self.project_src_dir)+1:], f)
if is_modified(absolute_path):
self.apk_updated = True
debug("resource file => " + relative_path)
apk_zip.write(os.path.join(root, f), relative_path, compression_type(f))
def add_resource_jar(jar_file):
jar = zipfile.ZipFile(jar_file)
for path in jar.namelist():
if skip_jar_path(path): continue
debug("from JAR %s => %s" % (jar_file, path))
apk_zip.writestr(zipinfo(path), jar.read(path))
jar.close()
for jar_file in self.module_jars:
add_resource_jar(jar_file)
for jar_file in self.android_jars:
add_resource_jar(jar_file)
def add_native_libs(libs_dir):
if os.path.exists(libs_dir):
for abi_dir in os.listdir(libs_dir):
libs_abi_dir = os.path.join(libs_dir, abi_dir)
if not os.path.isdir(libs_abi_dir): continue
for file in os.listdir(libs_abi_dir):
if file.endswith('.so'):
native_lib = os.path.join(libs_abi_dir, file)
if is_modified(native_lib):
self.apk_updated = True
debug("installing native lib: %s" % native_lib)
apk_zip.write(native_lib, '/'.join(['lib', abi_dir, file]))
# add any native libraries : libs/**/*.so -> lib/**/*.so
add_native_libs(os.path.join(self.project_dir, 'libs'))
# add module native libraries
for module in self.modules:
add_native_libs(module.get_resource('libs'))
apk_zip.close()
return unsigned_apk
def run_adb(self, *args):
command = [self.sdk.get_adb()]
command.extend(self.device_args)
command.extend(args)
return run.run(command)
def package_and_deploy(self):
ap_ = os.path.join(self.project_dir, 'bin', 'app.ap_')
rhino_jar = os.path.join(self.support_dir, 'js.jar')
# This is only to check if this has been overridden in production
has_compile_js = self.tiappxml.has_app_property("ti.android.compilejs")
compile_js = not has_compile_js or (has_compile_js and \
self.tiappxml.to_bool(self.tiappxml.get_app_property('ti.android.compilejs')))
pkg_assets_dir = self.assets_dir
if self.deploy_type == "production" and compile_js:
non_js_assets = os.path.join(self.project_dir, 'bin', 'non-js-assets')
if not os.path.exists(non_js_assets):
os.mkdir(non_js_assets)
copy_all(self.assets_dir, non_js_assets, ignore_exts=[".js"])
pkg_assets_dir = non_js_assets
run.run([self.aapt, 'package', '-f', '-M', 'AndroidManifest.xml', '-A', pkg_assets_dir,
'-S', 'res', '-I', self.android_jar, '-I', self.titanium_jar, '-F', ap_], warning_regex=r'skipping')
unsigned_apk = self.create_unsigned_apk(ap_)
if self.dist_dir:
app_apk = os.path.join(self.dist_dir, self.name + '.apk')
else:
app_apk = os.path.join(self.project_dir, 'bin', 'app.apk')
output = run.run([self.jarsigner, '-storepass', self.keystore_pass, '-keystore', self.keystore, '-signedjar', app_apk, unsigned_apk, self.keystore_alias])
run.check_output_for_error(output, r'RuntimeException: (.*)', True)
run.check_output_for_error(output, r'^jarsigner: (.*)', True)
# TODO Document Exit message
#success = re.findall(r'RuntimeException: (.*)', output)
#if len(success) > 0:
# error(success[0])
# sys.exit(1)
# zipalign to align byte boundaries
zipalign = self.sdk.get_zipalign()
if os.path.exists(app_apk+'z'):
os.remove(app_apk+'z')
ALIGN_32_BIT = 4
output = run.run([zipalign, '-v', str(ALIGN_32_BIT), app_apk, app_apk+'z'])
# TODO - Document Exit message
if output == None:
error("System Error while compiling Android classes.dex")
sys.exit(1)
else:
os.unlink(app_apk)
os.rename(app_apk+'z',app_apk)
if self.dist_dir:
self.post_build()
sys.exit()
if self.build_only:
return (False, False)
out = self.run_adb('get-state')
#out = subprocess.Popen([self.sdk.get_adb(), self.device_type_arg, 'get-state'], stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[0]
out = str(out).strip()
# try a few times as sometimes it fails waiting on boot
attempts = 0
launched = False
launch_failed = False
while attempts < 5:
try:
if self.install:
self.wait_for_device('d')
info("Installing application on emulator")
else:
self.wait_for_device('e')
info("Installing application on device")
output = self.run_adb('install', '-r', app_apk)
#output = run.run(cmd)
if output == None:
launch_failed = True
elif "Failure" in output:
error("Failed installing %s: %s" % (self.app_id, output))
launch_failed = True
elif not self.install:
launched = True
break
except Exception, e:
error(e)
time.sleep(3)
attempts+=1
return (launched, launch_failed)
def run_app(self):
info("Launching application ... %s" % self.name)
output = self.run_adb('shell', 'am', 'start',
'-a', 'android.intent.action.MAIN',
'-c','android.intent.category.LAUNCHER',
'-n', '%s/.%sActivity' % (self.app_id , self.classname))
trace("Launch output: %s" % output)
def wait_for_sdcard(self):
info("Waiting for SDCard to become available..")
waited = 0
max_wait = 60
while waited < max_wait:
output = self.run_adb('shell', 'mount')
if output != None:
mount_points = output.splitlines()
for mount_point in mount_points:
tokens = mount_point.split()
if len(tokens) < 2: continue
mount_path = tokens[1]
if mount_path in ['/sdcard', '/mnt/sdcard']:
return True
else:
error("Error checking for SDCard using 'mount'")
return False
time.sleep(1)
waited += 1
error("Timed out waiting for SDCard to become available (%ds)" % max_wait)
return False
def push_deploy_json(self):
deploy_data = {
"debuggerEnabled": self.debugger_host != None,
"debuggerPort": self.debugger_port,
"fastdevPort": self.fastdev_port
}
deploy_json = os.path.join(self.project_dir, 'bin', 'deploy.json')
open(deploy_json, 'w+').write(simplejson.dumps(deploy_data))
sdcard_available = self.wait_for_sdcard()
if sdcard_available:
self.run_adb('shell', 'mkdir /sdcard/%s || echo' % self.app_id)
self.run_adb('push', deploy_json, '/sdcard/%s/deploy.json' % self.app_id)
os.unlink(deploy_json)
def verify_fastdev(self):
lock_file = os.path.join(self.top_dir, '.fastdev.lock')
if not fastdev.is_running(self.top_dir):
if os.path.exists(lock_file):
os.unlink(lock_file)
return False
else:
data = simplejson.loads(open(lock_file, 'r').read())
self.fastdev_port = data["port"]
return True
def fastdev_kill_app(self):
lock_file = os.path.join(self.top_dir, ".fastdev.lock")
if os.path.exists(lock_file):
class Options(object): pass
options = Options()
options.lock_file = lock_file
try:
return fastdev.kill_app(self.top_dir, options)
except Exception, e:
return False
def merge_internal_module_resources(self):
if not self.android_jars:
return
for jar in self.android_jars:
if not os.path.exists(jar):
continue
res_zip = jar[:-4] + '.res.zip'
if not os.path.exists(res_zip):
continue
res_zip_file = zipfile.ZipFile(res_zip, "r")
try:
zip_extractall(res_zip_file, self.project_dir)
except:
raise
finally:
res_zip_file.close()
def build_and_run(self, install, avd_id, keystore=None, keystore_pass='tirocks', keystore_alias='tidev', dist_dir=None, build_only=False, device_args=None, debugger_host=None):
deploy_type = 'development'
self.build_only = build_only
self.device_args = device_args
self.postbuild_modules = []
if install:
if self.device_args == None:
self.device_args = ['-d']
if keystore == None:
deploy_type = 'test'
else:
deploy_type = 'production'
if self.device_args == None:
self.device_args = ['-e']
self.deploy_type = deploy_type
(java_failed, java_status) = prereq.check_java()
if java_failed:
error(java_status)
sys.exit(1)
# attempt to load any compiler plugins
if len(self.tiappxml.properties['plugins']) > 0:
titanium_dir = os.path.abspath(os.path.join(template_dir,'..','..','..','..'))
local_compiler_dir = os.path.abspath(os.path.join(self.top_dir,'plugins'))
tp_compiler_dir = os.path.abspath(os.path.join(titanium_dir,'plugins'))
if not os.path.exists(tp_compiler_dir) and not os.path.exists(local_compiler_dir):
error("Build Failed (Missing plugins directory)")
sys.exit(1)
compiler_config = {
'platform':'android',
'tiapp':self.tiappxml,
'project_dir':self.top_dir,
'titanium_dir':titanium_dir,
'appid':self.app_id,
'template_dir':template_dir,
'project_name':self.name,
'command':self.command,
'build_dir':s.project_dir,
'app_name':self.name,
'android_builder':self,
'deploy_type':deploy_type,
'dist_dir':dist_dir,
'logger':log
}
for plugin in self.tiappxml.properties['plugins']:
local_plugin_file = os.path.join(local_compiler_dir,plugin['name'],'plugin.py')
plugin_file = os.path.join(tp_compiler_dir,plugin['name'],plugin['version'],'plugin.py')
info("plugin=%s" % plugin_file)
if not os.path.exists(local_plugin_file) and not os.path.exists(plugin_file):
error("Build Failed (Missing plugin for %s)" % plugin['name'])
sys.exit(1)
info("Detected compiler plugin: %s/%s" % (plugin['name'],plugin['version']))
code_path = plugin_file
if os.path.exists(local_plugin_file):
code_path = local_plugin_file
compiler_config['plugin']=plugin
fin = open(code_path, 'rb')
m = hashlib.md5()
m.update(open(code_path,'rb').read())
code_hash = m.hexdigest()
p = imp.load_source(code_hash, code_path, fin)
module_functions = dict(inspect.getmembers(p, inspect.isfunction))
if module_functions.has_key('postbuild'):
debug("plugin contains a postbuild function. Will execute after project is built and packaged")
self.postbuild_modules.append((plugin['name'], p))
p.compile(compiler_config)
fin.close()
# in Windows, if the adb server isn't running, calling "adb devices"
# will fork off a new adb server, and cause a lock-up when we
# try to pipe the process' stdout/stderr. the workaround is
# to simply call adb start-server here, and not care about
# the return code / pipes. (this is harmless if adb is already running)
# -- thanks to Bill Dawson for the workaround
if platform.system() == "Windows" and not build_only:
run.run([self.sdk.get_adb(), "start-server"], True, ignore_output=True)
ti_version_file = os.path.join(self.support_dir, '..', 'version.txt')
if os.path.exists(ti_version_file):
ti_version_info = read_properties(open(ti_version_file, 'r'), '=')
if not ti_version_info is None and 'version' in ti_version_info:
ti_version_string = 'Titanium SDK version: %s' % ti_version_info['version']
if 'timestamp' in ti_version_info or 'githash' in ti_version_info:
ti_version_string += ' ('
if 'timestamp' in ti_version_info:
ti_version_string += '%s' % ti_version_info['timestamp']
if 'githash' in ti_version_info:
ti_version_string += ' %s' % ti_version_info['githash']
ti_version_string += ')'
info(ti_version_string)
if not build_only:
if deploy_type == 'development':
self.wait_for_device('e')
elif deploy_type == 'test':
self.wait_for_device('d')
self.install = install
self.dist_dir = dist_dir
self.aapt = self.sdk.get_aapt()
self.android_jar = self.sdk.get_android_jar()
self.titanium_jar = os.path.join(self.support_dir,'titanium.jar')
self.kroll_apt_jar = os.path.join(self.support_dir, 'kroll-apt.jar')
dx = self.sdk.get_dx()
self.apkbuilder = self.sdk.get_apkbuilder()
self.sdcard_resources = '/sdcard/Ti.debug/%s/Resources' % self.app_id
self.resources_installed = False
if deploy_type == "production":
self.app_installed = False
else:
self.app_installed = not build_only and self.is_app_installed()
debug("%s installed? %s" % (self.app_id, self.app_installed))
#self.resources_installed = not build_only and self.are_resources_installed()
#debug("%s resources installed? %s" % (self.app_id, self.resources_installed))
if keystore == None:
keystore = os.path.join(self.support_dir,'dev_keystore')
self.keystore = keystore
self.keystore_pass = keystore_pass
self.keystore_alias = keystore_alias
curdir = os.getcwd()
self.support_resources_dir = os.path.join(self.support_dir, 'resources')
try:
os.chdir(self.project_dir)
self.android = Android(self.name, self.app_id, self.sdk, deploy_type, self.java)
if not os.path.exists('bin'):
os.makedirs('bin')
resources_dir = os.path.join(self.top_dir,'Resources')
self.assets_dir = os.path.join(self.project_dir,'bin','assets')
self.assets_resources_dir = os.path.join(self.assets_dir,'Resources')
if not os.path.exists(self.assets_dir):
os.makedirs(self.assets_dir)
shutil.copy(self.project_tiappxml, self.assets_dir)
finalxml = os.path.join(self.assets_dir,'tiapp.xml')
self.tiapp = TiAppXML(finalxml)
self.tiapp.setDeployType(deploy_type)
self.sdcard_copy = False
sdcard_property = "ti.android.loadfromsdcard"
if self.tiapp.has_app_property(sdcard_property):
self.sdcard_copy = self.tiapp.to_bool(self.tiapp.get_app_property(sdcard_property))
fastdev_property = "ti.android.fastdev"
fastdev_enabled = (self.deploy_type == 'development' and not self.build_only)
if self.tiapp.has_app_property(fastdev_property):
fastdev_enabled = self.tiapp.to_bool(self.tiapp.get_app_property(fastdev_property))
if fastdev_enabled:
if self.verify_fastdev():
info("Fastdev server running, deploying in Fastdev mode")
self.fastdev = True
else:
warn("Fastdev enabled, but server isn't running, deploying normally")
self.classes_dir = os.path.join(self.project_dir, 'bin', 'classes')
if not os.path.exists(self.classes_dir):
os.makedirs(self.classes_dir)
if (not debugger_host is None) and len(debugger_host) > 0:
hostport = debugger_host.split(":")
self.debugger_host = hostport[0]
self.debugger_port = int(hostport[1])
debugger_enabled = self.debugger_host != None and len(self.debugger_host) > 0
self.copy_project_resources()
last_build_info = None
built_all_modules = False
build_info_path = os.path.join(self.project_dir, 'bin', 'build_info.json')
if os.path.exists(build_info_path):
last_build_info = simplejson.loads(open(build_info_path, 'r').read())
built_all_modules = last_build_info["include_all_modules"]
include_all_ti_modules = self.fastdev
if (self.tiapp.has_app_property('ti.android.include_all_modules')):
if self.tiapp.to_bool(self.tiapp.get_app_property('ti.android.include_all_modules')):
include_all_ti_modules = True
if self.tiapp_changed or (self.js_changed and not self.fastdev) or \
self.force_rebuild or self.deploy_type == "production" or \
(self.fastdev and (not self.app_installed or not built_all_modules)):
trace("Generating Java Classes")
self.android.create(os.path.abspath(os.path.join(self.top_dir,'..')),
True, project_dir = self.top_dir, include_all_ti_modules=include_all_ti_modules)
open(build_info_path, 'w').write(simplejson.dumps({
"include_all_modules": include_all_ti_modules
}))
else:
info("Tiapp.xml unchanged, skipping class generation")
# compile resources
full_resource_dir = os.path.join(self.project_dir, self.assets_resources_dir)
compiler = Compiler(self.tiapp, full_resource_dir, self.java, self.classes_dir, self.project_dir,
include_all_modules=include_all_ti_modules)
compiler.compile()
self.compiled_files = compiler.compiled_files
self.android_jars = compiler.jar_libraries
self.merge_internal_module_resources()
if not os.path.exists(self.assets_dir):
os.makedirs(self.assets_dir)
self.resource_drawables_changed = self.copy_resource_drawables()
self.warn_dupe_drawable_folders()
# Detect which modules are being used.
# We need to know this info in a few places, so the info is saved
# in self.missing_modules and self.modules
detector = ModuleDetector(self.top_dir)
self.missing_modules, self.modules = detector.find_app_modules(self.tiapp, 'android')
self.copy_module_platform_folders()
special_resources_dir = os.path.join(self.top_dir,'platform','android')
if os.path.exists(special_resources_dir):
debug("found special platform files dir = %s" % special_resources_dir)
ignore_files = ignoreFiles
ignore_files.extend(['AndroidManifest.xml']) # don't want to overwrite build/android/AndroidManifest.xml yet
self.copy_project_platform_folder(ignoreDirs, ignore_files)
self.generate_stylesheet()
self.generate_aidl()
self.manifest_changed = self.generate_android_manifest(compiler)
my_avd = None
self.google_apis_supported = False
# find the AVD we've selected and determine if we support Google APIs
for avd_props in avd.get_avds(self.sdk):
if avd_props['id'] == avd_id:
my_avd = avd_props
self.google_apis_supported = (my_avd['name'].find('Google')!=-1 or my_avd['name'].find('APIs')!=-1)
break
if build_only:
self.google_apis_supported = True
remove_orphaned_files(resources_dir, os.path.join(self.project_dir, 'bin', 'assets', 'Resources'))
generated_classes_built = self.build_generated_classes()
# TODO: enable for "test" / device mode for debugger / fastdev
if not self.build_only and self.deploy_type == "development":
self.push_deploy_json()
self.classes_dex = os.path.join(self.project_dir, 'bin', 'classes.dex')
def jar_includer(path, isfile):
if isfile and path.endswith(".jar"): return True
return False
support_deltafy = Deltafy(self.support_dir, jar_includer)
self.support_deltas = support_deltafy.scan()
dex_built = False
if len(self.support_deltas) > 0 or generated_classes_built or self.deploy_type == "production":
# the dx.bat that ships with android in windows doesn't allow command line
# overriding of the java heap space, so we call the jar directly
if platform.system() == 'Windows':
dex_args = [self.java, '-Xmx1024M', '-Djava.ext.dirs=%s' % self.sdk.get_platform_tools_dir(), '-jar', self.sdk.get_dx_jar()]
else:
dex_args = [dx, '-JXmx1536M', '-JXX:-UseGCOverheadLimit']
dex_args += ['--dex', '--output='+self.classes_dex, self.classes_dir]
dex_args += self.android_jars
dex_args += self.module_jars
if self.deploy_type != 'production':
dex_args.append(os.path.join(self.support_dir, 'lib', 'titanium-verify.jar'))
dex_args.append(os.path.join(self.support_dir, 'lib', 'titanium-debug.jar'))
# the verifier depends on Ti.Network classes, so we may need to inject it
has_network_jar = False
for jar in self.android_jars:
if jar.endswith('titanium-network.jar'):
has_network_jar = True
break
if not has_network_jar:
dex_args.append(os.path.join(self.support_dir, 'modules', 'titanium-network.jar'))
# substitute for the debugging js jar in non production mode
for jar in self.android_jars:
if jar.endswith('js.jar'):
dex_args.remove(jar)
dex_args.append(os.path.join(self.support_dir, 'js-debug.jar'))
info("Compiling Android Resources... This could take some time")
# TODO - Document Exit message
run_result = run.run(dex_args, warning_regex=r'warning: ')
if (run_result == None):
dex_built = False
error("System Error while compiling Android classes.dex")
sys.exit(1)
else:
dex_built = True
debug("Android classes.dex built")
if dex_built or generated_classes_built or self.tiapp_changed or self.manifest_changed or not self.app_installed or not self.fastdev:
# metadata has changed, we need to do a full re-deploy
launched, launch_failed = self.package_and_deploy()
if launched:
self.run_app()
info("Deployed %s ... Application should be running." % self.name)
elif launch_failed==False and not build_only:
info("Application installed. Launch from drawer on Home Screen")
elif not build_only:
# Relaunch app if nothing was built
info("Re-launching application ... %s" % self.name)
relaunched = False
killed = False
if self.fastdev:
killed = self.fastdev_kill_app()
if not killed:
processes = self.run_adb('shell', 'ps')
for line in processes.splitlines():
columns = line.split()
if len(columns) > 1:
pid = columns[1]
id = columns[len(columns)-1]
if id == self.app_id:
self.run_adb('shell', 'kill', pid)
relaunched = True
self.run_app()
if relaunched:
info("Relaunched %s ... Application should be running." % self.name)
self.post_build()
#intermediary code for on-device debugging (later)
#if debugger_host != None:
#import debugger
#debug("connecting to debugger: %s, debugger=%s" % (debugger_host, str(debugger)))
#debugger.run(debugger_host, '127.0.0.1:5999')
finally:
os.chdir(curdir)
sys.stdout.flush()
def post_build(self):
try:
if self.postbuild_modules:
for p in self.postbuild_modules:
info("Running postbuild function in %s plugin" % p[0])
p[1].postbuild()
except Exception,e:
error("Error performing post-build steps: %s" % e)
if __name__ == "__main__":
def usage():
print "%s <command> <project_name> <sdk_dir> <project_dir> <app_id> [key] [password] [alias] [dir] [avdid] [avdsdk]" % os.path.basename(sys.argv[0])
print
print "available commands: "
print
print " emulator build and run the emulator"
print " simulator build and run the app on the simulator"
print " install build and install the app on the device"
print " distribute build final distribution package for upload to marketplace"
print " run build and run the project using values from tiapp.xml"
print " run-emulator run the emulator with a default AVD ID and skin"
sys.exit(1)
argc = len(sys.argv)
if argc < 2:
usage()
command = sys.argv[1]
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
get_values_from_tiapp = False
if command == 'run':
if argc < 4:
print 'Usage: %s run <project_dir> <android_sdk>' % sys.argv[0]
sys.exit(1)
get_values_from_tiapp = True
project_dir = sys.argv[2]
sdk_dir = sys.argv[3]
avd_id = "7"
elif command == 'run-emulator':
if argc < 4:
print 'Usage: %s run-emulator <project_dir> <android_sdk>' % sys.argv[0]
sys.exit(1)
get_values_from_tiapp = True
project_dir = sys.argv[2]
sdk_dir = sys.argv[3]
# sensible defaults?
avd_id = "7"
avd_skin = "HVGA"
else:
if argc < 6 or command == '--help' or (command=='distribute' and argc < 10):
usage()
if get_values_from_tiapp:
tiappxml = TiAppXML(os.path.join(project_dir, 'tiapp.xml'))
app_id = tiappxml.properties['id']
project_name = tiappxml.properties['name']
else:
project_name = dequote(sys.argv[2])
sdk_dir = os.path.abspath(os.path.expanduser(dequote(sys.argv[3])))
project_dir = os.path.abspath(os.path.expanduser(dequote(sys.argv[4])))
app_id = dequote(sys.argv[5])
log = TiLogger(os.path.join(os.path.abspath(os.path.expanduser(dequote(project_dir))), 'build.log'))
log.debug(" ".join(sys.argv))
s = Builder(project_name,sdk_dir,project_dir,template_dir,app_id)
s.command = command
try:
if command == 'run-emulator':
s.run_emulator(avd_id, avd_skin)
elif command == 'run':
s.build_and_run(False, avd_id)
elif command == 'emulator':
avd_id = dequote(sys.argv[6])
avd_skin = dequote(sys.argv[7])
s.run_emulator(avd_id, avd_skin)
elif command == 'simulator':
info("Building %s for Android ... one moment" % project_name)
avd_id = dequote(sys.argv[6])
debugger_host = None
if len(sys.argv) > 8:
debugger_host = dequote(sys.argv[8])
s.build_and_run(False, avd_id, debugger_host=debugger_host)
elif command == 'install':
avd_id = dequote(sys.argv[6])
device_args = ['-d']
if len(sys.argv) >= 8:
device_args = ['-s', sys.argv[7]]
s.build_and_run(True, avd_id, device_args=device_args)
elif command == 'distribute':
key = os.path.abspath(os.path.expanduser(dequote(sys.argv[6])))
password = dequote(sys.argv[7])
alias = dequote(sys.argv[8])
output_dir = dequote(sys.argv[9])
avd_id = dequote(sys.argv[10])
s.build_and_run(True, avd_id, key, password, alias, output_dir)
elif command == 'build':
s.build_and_run(False, 1, build_only=True)
else:
error("Unknown command: %s" % command)
usage()
except SystemExit, n:
sys.exit(n)
except:
e = traceback.format_exc()
error("Exception occured while building Android project:")
for line in e.splitlines():
error(line)
sys.exit(1)
|
arnaudsj/titanium_mobile
|
support/android/builder.py
|
Python
|
apache-2.0
| 74,676
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def testFunction(request):
return "PASS"
import os
# os.environ["FOO"] is only available at runtime.
print(os.environ["FOO"])
|
GoogleCloudPlatform/buildpacks
|
builders/testdata/python/functions/with_env_var/main.py
|
Python
|
apache-2.0
| 705
|