gt
stringclasses
1 value
context
stringlengths
2.49k
119k
from django.conf import settings from django.conf.urls import url, include from django.conf.urls.i18n import i18n_patterns from django.views.generic import TemplateView, RedirectView from django.utils.module_loading import import_string import os.path import zerver.forms from zproject import dev_urls from zproject.legacy_urls import legacy_urls from zerver.views.integrations import IntegrationView, APIView, HelpView from zerver.lib.integrations import WEBHOOK_INTEGRATIONS from zerver.webhooks import github_dispatcher from django.contrib.auth.views import (login, password_reset, password_reset_done, password_reset_confirm, password_reset_complete) import zerver.tornado.views import zerver.views import zerver.views.auth import zerver.views.compatibility import zerver.views.home import zerver.views.registration import zerver.views.zephyr import zerver.views.users import zerver.views.unsubscribe import zerver.views.integrations import zerver.views.user_settings import confirmation.views from zerver.lib.rest import rest_dispatch # NB: There are several other pieces of code which route requests by URL: # # - legacy_urls.py contains API endpoint written before the redesign # and should not be added to. # # - runtornado.py has its own URL list for Tornado views. See the # invocation of web.Application in that file. # # - The Nginx config knows which URLs to route to Django or Tornado. # # - Likewise for the local dev server in tools/run-dev.py. # These views serve pages (HTML). As such, their internationalization # must depend on the url. # # If you're adding a new page to the website (as opposed to a new # endpoint for use by code), you should add it here. i18n_urls = [ url(r'^$', zerver.views.home.home, name='zerver.views.home.home'), # We have a desktop-specific landing page in case we change our / # to not log in in the future. We don't want to require a new # desktop app build for everyone in that case url(r'^desktop_home/$', zerver.views.home.desktop_home, name='zerver.views.home.desktop_home'), url(r'^accounts/login/sso/$', zerver.views.auth.remote_user_sso, name='login-sso'), url(r'^accounts/login/jwt/$', zerver.views.auth.remote_user_jwt, name='login-jwt'), url(r'^accounts/login/social/(\w+)$', zerver.views.auth.start_social_login, name='login-social'), url(r'^accounts/login/google/$', zerver.views.auth.start_google_oauth2, name='zerver.views.auth.start_google_oauth2'), url(r'^accounts/login/google/send/$', zerver.views.auth.send_oauth_request_to_google, name='zerver.views.auth.send_oauth_request_to_google'), url(r'^accounts/login/google/done/$', zerver.views.auth.finish_google_oauth2, name='zerver.views.auth.finish_google_oauth2'), url(r'^accounts/login/subdomain/$', zerver.views.auth.log_into_subdomain, name='zerver.views.auth.log_into_subdomain'), url(r'^accounts/login/local/$', zerver.views.auth.dev_direct_login, name='zerver.views.auth.dev_direct_login'), # We have two entries for accounts/login to allow reverses on the Django # view we're wrapping to continue to function. url(r'^accounts/login/', zerver.views.auth.login_page, {'template_name': 'zerver/login.html'}, name='zerver.views.auth.login_page'), url(r'^accounts/login/', login, {'template_name': 'zerver/login.html'}, name='django.contrib.auth.views.login'), url(r'^accounts/logout/', zerver.views.auth.logout_then_login, name='zerver.views.auth.logout_then_login'), url(r'^accounts/webathena_kerberos_login/', zerver.views.zephyr.webathena_kerberos_login, name='zerver.views.zephyr.webathena_kerberos_login'), url(r'^accounts/password/reset/$', password_reset, {'post_reset_redirect': '/accounts/password/reset/done/', 'template_name': 'zerver/reset.html', 'email_template_name': 'registration/password_reset_email.txt', 'password_reset_form': zerver.forms.ZulipPasswordResetForm, }, name='django.contrib.auth.views.password_reset'), url(r'^accounts/password/reset/done/$', password_reset_done, {'template_name': 'zerver/reset_emailed.html'}), url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)/$', password_reset_confirm, {'post_reset_redirect': '/accounts/password/done/', 'template_name': 'zerver/reset_confirm.html', 'set_password_form': zerver.forms.LoggingSetPasswordForm}, name='django.contrib.auth.views.password_reset_confirm'), url(r'^accounts/password/done/$', password_reset_complete, {'template_name': 'zerver/reset_done.html'}), # Avatar url(r'^avatar/(?P<email_or_id>[\S]+)?/(?P<medium>[\S]+)?', zerver.views.users.avatar, name='zerver.views.users.avatar'), url(r'^avatar/(?P<email_or_id>[\S]+)?', zerver.views.users.avatar, name='zerver.views.users.avatar'), # Registration views, require a confirmation ID. url(r'^accounts/home/', zerver.views.registration.accounts_home, name='zerver.views.registration.accounts_home'), url(r'^accounts/send_confirm/(?P<email>[\S]+)?', TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'), name='send_confirm'), url(r'^accounts/register/', zerver.views.registration.accounts_register, name='zerver.views.registration.accounts_register'), url(r'^accounts/do_confirm/(?P<confirmation_key>[\w]+)', confirmation.views.confirm, name='confirmation.views.confirm'), url(r'^accounts/confirm_new_email/(?P<confirmation_key>[\w]+)', zerver.views.user_settings.confirm_email_change, name='zerver.views.user_settings.confirm_email_change'), # Email unsubscription endpoint. Allows for unsubscribing from various types of emails, # including the welcome emails (day 1 & 2), missed PMs, etc. url(r'^accounts/unsubscribe/(?P<type>[\w]+)/(?P<token>[\w]+)', zerver.views.unsubscribe.email_unsubscribe, name='zerver.views.unsubscribe.email_unsubscribe'), # Portico-styled page used to provide email confirmation of terms acceptance. url(r'^accounts/accept_terms/$', zerver.views.home.accounts_accept_terms, name='zerver.views.home.accounts_accept_terms'), # Realm Creation url(r'^create_realm/$', zerver.views.registration.create_realm, name='zerver.views.create_realm'), url(r'^create_realm/(?P<creation_key>[\w]+)$', zerver.views.registration.create_realm, name='zerver.views.create_realm'), # Login/registration url(r'^register/$', zerver.views.registration.accounts_home, name='register'), url(r'^login/$', zerver.views.auth.login_page, {'template_name': 'zerver/login.html'}, name='zerver.views.auth.login_page'), # A registration page that passes through the domain, for totally open realms. url(r'^register/(?P<realm_str>\S+)/$', zerver.views.registration.accounts_home_with_realm_str, name='zerver.views.registration.accounts_home_with_realm_str'), # API and integrations documentation url(r'^api/$', APIView.as_view(template_name='zerver/api.html')), url(r'^api/endpoints/$', zerver.views.integrations.api_endpoint_docs, name='zerver.views.integrations.api_endpoint_docs'), url(r'^integrations/$', IntegrationView.as_view()), url(r'^about/$', TemplateView.as_view(template_name='zerver/about.html')), url(r'^apps/$', TemplateView.as_view(template_name='zerver/apps.html')), url(r'^robots\.txt$', RedirectView.as_view(url='/static/robots.txt', permanent=True)), # Landing page, features pages, signup form, etc. url(r'^hello/$', TemplateView.as_view(template_name='zerver/hello.html'), name='landing-page'), url(r'^new-user/$', RedirectView.as_view(url='/hello', permanent=True)), url(r'^features/$', TemplateView.as_view(template_name='zerver/features.html')), url(r'^find_my_team/$', zerver.views.registration.find_my_team, name='zerver.views.registration.find_my_team'), url(r'^authors/$', zerver.views.users.authors_view, name='zerver.views.users.authors_view') ] # If a Terms of Service is supplied, add that route if settings.TERMS_OF_SERVICE is not None: i18n_urls += [url(r'^terms/$', TemplateView.as_view(template_name='zerver/terms.html'))] # Make a copy of i18n_urls so that they appear without prefix for english urls = list(i18n_urls) # These endpoints constitute the redesigned API (V1), which uses: # * REST verbs # * Basic auth (username:password is email:apiKey) # * Take and return json-formatted data # # If you're adding a new endpoint to the code that requires authentication, # please add it here. # See rest_dispatch in zerver.lib.rest for an explanation of auth methods used # # All of these paths are accessed by either a /json or /api/v1 prefix v1_api_and_json_patterns = [ # realm-level calls url(r'^realm$', rest_dispatch, {'PATCH': 'zerver.views.realm.update_realm'}), # Returns a 204, used by desktop app to verify connectivity status url(r'generate_204$', zerver.views.registration.generate_204, name='zerver.views.registration.generate_204'), # realm/domains -> zerver.views.realm_aliases url(r'^realm/domains$', rest_dispatch, {'GET': 'zerver.views.realm_aliases.list_aliases', 'POST': 'zerver.views.realm_aliases.create_alias'}), url(r'^realm/domains/(?P<domain>\S+)$', rest_dispatch, {'PATCH': 'zerver.views.realm_aliases.patch_alias', 'DELETE': 'zerver.views.realm_aliases.delete_alias'}), # realm/emoji -> zerver.views.realm_emoji url(r'^realm/emoji$', rest_dispatch, {'GET': 'zerver.views.realm_emoji.list_emoji'}), url(r'^realm/emoji/(?P<emoji_name>.*)$', rest_dispatch, {'PUT': 'zerver.views.realm_emoji.upload_emoji', 'DELETE': 'zerver.views.realm_emoji.delete_emoji'}), # realm/icon -> zerver.views.realm_icon url(r'^realm/icon$', rest_dispatch, {'PUT': 'zerver.views.realm_icon.upload_icon', 'DELETE': 'zerver.views.realm_icon.delete_icon_backend', 'GET': 'zerver.views.realm_icon.get_icon_backend'}), # realm/filters -> zerver.views.realm_filters url(r'^realm/filters$', rest_dispatch, {'GET': 'zerver.views.realm_filters.list_filters', 'POST': 'zerver.views.realm_filters.create_filter'}), url(r'^realm/filters/(?P<filter_id>\d+)$', rest_dispatch, {'DELETE': 'zerver.views.realm_filters.delete_filter'}), # users -> zerver.views.users # # Since some of these endpoints do something different if used on # yourself with `/me` as the email, we need to make sure that we # don't accidentally trigger these. The cleanest way to do that # is to add a regular expression assertion that it isn't `/me/` # (or ends with `/me`, in the case of hitting the root URL). url(r'^users$', rest_dispatch, {'GET': 'zerver.views.users.get_members_backend', 'POST': 'zerver.views.users.create_user_backend'}), url(r'^users/(?!me/)(?P<email>[^/]*)/reactivate$', rest_dispatch, {'POST': 'zerver.views.users.reactivate_user_backend'}), url(r'^users/(?!me/)(?P<email>[^/]*)/presence$', rest_dispatch, {'GET': 'zerver.views.presence.get_presence_backend'}), url(r'^users/(?!me$)(?P<email>[^/]*)$', rest_dispatch, {'PATCH': 'zerver.views.users.update_user_backend', 'DELETE': 'zerver.views.users.deactivate_user_backend'}), url(r'^bots$', rest_dispatch, {'GET': 'zerver.views.users.get_bots_backend', 'POST': 'zerver.views.users.add_bot_backend'}), url(r'^bots/(?!me/)(?P<email>[^/]*)/api_key/regenerate$', rest_dispatch, {'POST': 'zerver.views.users.regenerate_bot_api_key'}), url(r'^bots/(?!me/)(?P<email>[^/]*)$', rest_dispatch, {'PATCH': 'zerver.views.users.patch_bot_backend', 'DELETE': 'zerver.views.users.deactivate_bot_backend'}), # messages -> zerver.views.messages # GET returns messages, possibly filtered, POST sends a message url(r'^messages$', rest_dispatch, {'GET': 'zerver.views.messages.get_old_messages_backend', 'POST': 'zerver.views.messages.send_message_backend'}), url(r'^messages/(?P<message_id>[0-9]+)$', rest_dispatch, {'GET': 'zerver.views.messages.json_fetch_raw_message', 'PATCH': 'zerver.views.messages.update_message_backend'}), url(r'^messages/render$', rest_dispatch, {'POST': 'zerver.views.messages.render_message_backend'}), url(r'^messages/flags$', rest_dispatch, {'POST': 'zerver.views.messages.update_message_flags'}), url(r'^messages/(?P<message_id>\d+)/history$', rest_dispatch, {'GET': 'zerver.views.messages.get_message_edit_history'}), # reactions -> zerver.view.reactions # PUT adds a reaction to a message # DELETE removes a reaction from a message url(r'^messages/(?P<message_id>[0-9]+)/emoji_reactions/(?P<emoji_name>.*)$', rest_dispatch, {'PUT': 'zerver.views.reactions.add_reaction_backend', 'DELETE': 'zerver.views.reactions.remove_reaction_backend'}), # attachments -> zerver.views.attachments url(r'^attachments$', rest_dispatch, {'GET': 'zerver.views.attachments.list_by_user'}), url(r'^attachments/(?P<attachment_id>[0-9]+)$', rest_dispatch, {'DELETE': 'zerver.views.attachments.remove'}), # typing -> zerver.views.typing # POST sends a typing notification event to recipients url(r'^typing$', rest_dispatch, {'POST': 'zerver.views.typing.send_notification_backend'}), # user_uploads -> zerver.views.upload url(r'^user_uploads$', rest_dispatch, {'POST': 'zerver.views.upload.upload_file_backend'}), # invite -> zerver.views.invite url(r'^invite/bulk$', rest_dispatch, {'POST': 'zerver.views.invite.bulk_invite_users'}), # users/me -> zerver.views url(r'^users/me$', rest_dispatch, {'GET': 'zerver.views.users.get_profile_backend', 'DELETE': 'zerver.views.users.deactivate_user_own_backend'}), # PUT is currently used by mobile apps, we intend to remove the PUT version # as soon as possible. POST exists to correct the erroneous use of PUT. url(r'^users/me/pointer$', rest_dispatch, {'GET': 'zerver.views.pointer.get_pointer_backend', 'PUT': 'zerver.views.pointer.update_pointer_backend', 'POST': 'zerver.views.pointer.update_pointer_backend'}), url(r'^users/me/presence$', rest_dispatch, {'POST': 'zerver.views.presence.update_active_status_backend'}), # Endpoint used by mobile devices to register their push # notification credentials url(r'^users/me/apns_device_token$', rest_dispatch, {'POST': 'zerver.views.push_notifications.add_apns_device_token', 'DELETE': 'zerver.views.push_notifications.remove_apns_device_token'}), url(r'^users/me/android_gcm_reg_id$', rest_dispatch, {'POST': 'zerver.views.push_notifications.add_android_reg_id', 'DELETE': 'zerver.views.push_notifications.remove_android_reg_id'}), # users/me -> zerver.views.user_settings url(r'^users/me/api_key/regenerate$', rest_dispatch, {'POST': 'zerver.views.user_settings.regenerate_api_key'}), url(r'^users/me/enter-sends$', rest_dispatch, {'POST': 'zerver.views.user_settings.change_enter_sends'}), url(r'^users/me/avatar$', rest_dispatch, {'PUT': 'zerver.views.user_settings.set_avatar_backend', 'DELETE': 'zerver.views.user_settings.delete_avatar_backend'}), # settings -> zerver.views.user_settings url(r'^settings/display$', rest_dispatch, {'PATCH': 'zerver.views.user_settings.update_display_settings_backend'}), url(r'^settings/notifications$', rest_dispatch, {'PATCH': 'zerver.views.user_settings.json_change_notify_settings'}), url(r'^settings/ui$', rest_dispatch, {'PATCH': 'zerver.views.user_settings.json_change_ui_settings'}), # users/me/alert_words -> zerver.views.alert_words url(r'^users/me/alert_words$', rest_dispatch, {'GET': 'zerver.views.alert_words.list_alert_words', 'POST': 'zerver.views.alert_words.set_alert_words', 'PUT': 'zerver.views.alert_words.add_alert_words', 'DELETE': 'zerver.views.alert_words.remove_alert_words'}), url(r'^users/me/(?P<stream_id>\d+)/topics$', rest_dispatch, {'GET': 'zerver.views.streams.get_topics_backend'}), # streams -> zerver.views.streams # (this API is only used externally) url(r'^streams$', rest_dispatch, {'GET': 'zerver.views.streams.get_streams_backend'}), # GET returns `stream_id`, stream name should be encoded in the url query (in `stream` param) url(r'^get_stream_id', rest_dispatch, {'GET': 'zerver.views.streams.json_get_stream_id'}), # GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404) url(r'^streams/(?P<stream_id>\d+)/members$', rest_dispatch, {'GET': 'zerver.views.streams.get_subscribers_backend'}), url(r'^streams/(?P<stream_id>\d+)$', rest_dispatch, {'PATCH': 'zerver.views.streams.update_stream_backend', 'DELETE': 'zerver.views.streams.deactivate_stream_backend'}), url(r'^default_streams$', rest_dispatch, {'POST': 'zerver.views.streams.add_default_stream', 'DELETE': 'zerver.views.streams.remove_default_stream'}), # GET lists your streams, POST bulk adds, PATCH bulk modifies/removes url(r'^users/me/subscriptions$', rest_dispatch, {'GET': 'zerver.views.streams.list_subscriptions_backend', 'POST': 'zerver.views.streams.add_subscriptions_backend', 'PATCH': 'zerver.views.streams.update_subscriptions_backend', 'DELETE': 'zerver.views.streams.remove_subscriptions_backend'}), # muting -> zerver.views.muting url(r'^users/me/subscriptions/muted_topics$', rest_dispatch, {'POST': 'zerver.views.muting.set_muted_topics'}), # used to register for an event queue in tornado url(r'^register$', rest_dispatch, {'POST': 'zerver.views.events_register.events_register_backend'}), # events -> zerver.tornado.views url(r'^events$', rest_dispatch, {'GET': 'zerver.tornado.views.get_events_backend', 'DELETE': 'zerver.tornado.views.cleanup_event_queue'}), ] # Include the dual-use patterns twice urls += [ url(r'^api/v1/', include(v1_api_and_json_patterns)), url(r'^json/', include(v1_api_and_json_patterns)), ] # user_uploads -> zerver.views.upload.serve_file_backend # # This url is an exception to the url naming schemes for endpoints. It # supports both API and session cookie authentication, using a single # URL for both (not 'api/v1/' or 'json/' prefix). This is required to # easily support the mobile apps fetching uploaded files without # having to rewrite URLs, and is implemented using the # 'override_api_url_scheme' flag passed to rest_dispatch urls += url(r'^user_uploads/(?P<realm_id_str>(\d*|unk))/(?P<filename>.*)', rest_dispatch, {'GET': ('zerver.views.upload.serve_file_backend', {'override_api_url_scheme'})}), # Incoming webhook URLs # We don't create urls for particular git integrations here # because of generic one below for incoming_webhook in WEBHOOK_INTEGRATIONS: if incoming_webhook.url_object: urls.append(incoming_webhook.url_object) urls.append(url(r'^api/v1/external/github', github_dispatcher.api_github_webhook_dispatch)) # Mobile-specific authentication URLs urls += [ # This json format view used by the mobile apps lists which authentication # backends the server allows, to display the proper UI and check for server existence url(r'^api/v1/get_auth_backends', zerver.views.auth.api_get_auth_backends, name='zerver.views.auth.api_get_auth_backends'), # used by mobile apps to check if they are compatible with the server url(r'^compatibility$', zerver.views.compatibility.check_compatibility), # This json format view used by the mobile apps accepts a username # password/pair and returns an API key. url(r'^api/v1/fetch_api_key$', zerver.views.auth.api_fetch_api_key, name='zerver.views.auth.api_fetch_api_key'), # This is for the signing in through the devAuthBackEnd on mobile apps. url(r'^api/v1/dev_fetch_api_key$', zerver.views.auth.api_dev_fetch_api_key, name='zerver.views.auth.api_dev_fetch_api_key'), # This is for fetching the emails of the admins and the users. url(r'^api/v1/dev_get_emails$', zerver.views.auth.api_dev_get_emails, name='zerver.views.auth.api_dev_get_emails'), # Used to present the GOOGLE_CLIENT_ID to mobile apps url(r'^api/v1/fetch_google_client_id$', zerver.views.auth.api_fetch_google_client_id, name='zerver.views.auth.api_fetch_google_client_id'), ] # Include URL configuration files for site-specified extra installed # Django apps for app_name in settings.EXTRA_INSTALLED_APPS: app_dir = os.path.join(settings.DEPLOY_ROOT, app_name) if os.path.exists(os.path.join(app_dir, 'urls.py')): urls += [url(r'^', include('%s.urls' % (app_name,)))] i18n_urls += import_string("{}.urls.i18n_urlpatterns".format(app_name)) # Tornado views urls += [ # Used internally for communication between Django and Tornado processes url(r'^notify_tornado$', zerver.tornado.views.notify, name='zerver.tornado.views.notify'), ] # Python Social Auth urls += [url(r'^', include('social_django.urls', namespace='social'))] # User documentation site urls += [url(r'^help/(?P<article>.*)$', HelpView.as_view(template_name='zerver/help/main.html'))] if settings.DEVELOPMENT: urls += dev_urls.urls i18n_urls += dev_urls.i18n_urls # The sequence is important; if i18n urls don't come first then # reverse url mapping points to i18n urls which causes the frontend # tests to fail urlpatterns = i18n_patterns(*i18n_urls) + urls + legacy_urls
import os.path import threading import tempfile import sys import mitmproxy.platform from mitmproxy.proxy.config import ProxyConfig from mitmproxy.proxy.server import ProxyServer from mitmproxy import controller from mitmproxy import options from mitmproxy import exceptions from mitmproxy import io import pathod.test import pathod.pathoc from mitmproxy import eventsequence from mitmproxy.test import tflow from mitmproxy.test import tutils from mitmproxy.test import taddons class MasterTest: def cycle(self, master, content): f = tflow.tflow(req=tutils.treq(content=content)) master.addons.handle_lifecycle("clientconnect", f.client_conn) for i in eventsequence.iterate(f): master.addons.handle_lifecycle(*i) master.addons.handle_lifecycle("clientdisconnect", f.client_conn) return f def dummy_cycle(self, master, n, content): for i in range(n): self.cycle(master, content) master.shutdown() def flowfile(self, path): with open(path, "wb") as f: fw = io.FlowWriter(f) t = tflow.tflow(resp=True) fw.add(t) class TestState: def __init__(self): self.flows = [] def request(self, f): if f not in self.flows: self.flows.append(f) def response(self, f): if f not in self.flows: self.flows.append(f) def websocket_start(self, f): if f not in self.flows: self.flows.append(f) # TODO: add TCP support? # def tcp_start(self, f): # if f not in self.flows: # self.flows.append(f) class TestMaster(taddons.RecordingMaster): def __init__(self, opts, config): s = ProxyServer(config) super().__init__(opts, s) def clear_addons(self, addons): self.addons.clear() self.state = TestState() self.addons.add(self.state) self.addons.add(*addons) self.addons.configure_all(self.options, self.options.keys()) self.addons.trigger("running") def reset(self, addons): self.clear_addons(addons) self.clear() class ProxyThread(threading.Thread): def __init__(self, tmaster): threading.Thread.__init__(self) self.tmaster = tmaster self.name = "ProxyThread (%s:%s)" % ( tmaster.server.address[0], tmaster.server.address[1], ) controller.should_exit = False @property def port(self): return self.tmaster.server.address[1] @property def tlog(self): return self.tmaster.logs def run(self): self.tmaster.run() def shutdown(self): self.tmaster.shutdown() class ProxyTestBase: # Test Configuration ssl = None ssloptions = False masterclass = TestMaster add_upstream_certs_to_client_chain = False @classmethod def setup_class(cls): cls.server = pathod.test.Daemon( ssl=cls.ssl, ssloptions=cls.ssloptions) cls.server2 = pathod.test.Daemon( ssl=cls.ssl, ssloptions=cls.ssloptions) opts = cls.get_options() cls.config = ProxyConfig(opts) tmaster = cls.masterclass(opts, cls.config) cls.proxy = ProxyThread(tmaster) cls.proxy.start() @classmethod def teardown_class(cls): # perf: we want to run tests in parallell # should this ever cause an error, travis should catch it. # shutil.rmtree(cls.cadir) cls.proxy.shutdown() cls.server.shutdown() cls.server2.shutdown() def teardown(self): try: self.server.wait_for_silence() except exceptions.Timeout: # FIXME: Track down the Windows sync issues if sys.platform != "win32": raise def setup(self): self.master.reset(self.addons()) self.server.clear_log() self.server2.clear_log() @property def master(self): return self.proxy.tmaster @classmethod def get_options(cls): cls.cadir = os.path.join(tempfile.gettempdir(), "mitmproxy") return options.Options( listen_port=0, cadir=cls.cadir, add_upstream_certs_to_client_chain=cls.add_upstream_certs_to_client_chain, ssl_insecure=True, ) def addons(self): """ Can be over-ridden to add a standard set of addons to tests. """ return [] class LazyPathoc(pathod.pathoc.Pathoc): def __init__(self, lazy_connect, *args, **kwargs): self.lazy_connect = lazy_connect pathod.pathoc.Pathoc.__init__(self, *args, **kwargs) def connect(self): return pathod.pathoc.Pathoc.connect(self, self.lazy_connect) class HTTPProxyTest(ProxyTestBase): def pathoc_raw(self): return pathod.pathoc.Pathoc(("127.0.0.1", self.proxy.port), fp=None) def pathoc(self, sni=None): """ Returns a connected Pathoc instance. """ if self.ssl: conn = ("127.0.0.1", self.server.port) else: conn = None return LazyPathoc( conn, ("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None ) def pathod(self, spec, sni=None): """ Constructs a pathod GET request, with the appropriate base and proxy. """ p = self.pathoc(sni=sni) if self.ssl: q = "get:'/p/%s'" % spec else: q = "get:'%s/p/%s'" % (self.server.urlbase, spec) with p.connect(): return p.request(q) def app(self, page): if self.ssl: p = pathod.pathoc.Pathoc( ("127.0.0.1", self.proxy.port), True, fp=None ) with p.connect((options.APP_HOST, options.APP_PORT)): return p.request("get:'%s'" % page) else: p = self.pathoc() with p.connect(): return p.request("get:'http://%s%s'" % (options.APP_HOST, page)) class TransparentProxyTest(ProxyTestBase): ssl = None @classmethod def setup_class(cls): cls._init_transparent_mode = mitmproxy.platform.init_transparent_mode cls._original_addr = mitmproxy.platform.original_addr mitmproxy.platform.init_transparent_mode = lambda: True mitmproxy.platform.original_addr = lambda sock: ("127.0.0.1", cls.server.port) super().setup_class() @classmethod def teardown_class(cls): super().teardown_class() mitmproxy.platform.init_transparent_mode = cls._init_transparent_mode mitmproxy.platform.original_addr = cls._original_addr @classmethod def get_options(cls): opts = ProxyTestBase.get_options() opts.mode = "transparent" return opts def pathod(self, spec, sni=None): """ Constructs a pathod GET request, with the appropriate base and proxy. """ if self.ssl: p = self.pathoc(sni=sni) q = "get:'/p/%s'" % spec else: p = self.pathoc() q = "get:'/p/%s'" % spec with p.connect(): return p.request(q) def pathoc(self, sni=None): """ Returns a connected Pathoc instance. """ p = pathod.pathoc.Pathoc( ("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None ) return p class ReverseProxyTest(ProxyTestBase): ssl = None @classmethod def get_options(cls): opts = ProxyTestBase.get_options() s = "".join( [ "https" if cls.ssl else "http", "://", "127.0.0.1:", str(cls.server.port) ] ) opts.mode = "reverse:" + s return opts def pathoc(self, sni=None): """ Returns a connected Pathoc instance. """ p = pathod.pathoc.Pathoc( ("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None ) return p def pathod(self, spec, sni=None): """ Constructs a pathod GET request, with the appropriate base and proxy. """ if self.ssl: p = self.pathoc(sni=sni) q = "get:'/p/%s'" % spec else: p = self.pathoc() q = "get:'/p/%s'" % spec with p.connect(): return p.request(q) class SocksModeTest(HTTPProxyTest): @classmethod def get_options(cls): opts = ProxyTestBase.get_options() opts.mode = "socks5" return opts class ChainProxyTest(ProxyTestBase): """ Chain three instances of mitmproxy in a row to test upstream mode. Proxy order is cls.proxy -> cls.chain[0] -> cls.chain[1] cls.proxy and cls.chain[0] are in upstream mode, cls.chain[1] is in regular mode. """ chain = None n = 2 @classmethod def setup_class(cls): cls.chain = [] super().setup_class() for _ in range(cls.n): opts = cls.get_options() config = ProxyConfig(opts) tmaster = cls.masterclass(opts, config) proxy = ProxyThread(tmaster) proxy.start() cls.chain.insert(0, proxy) # Patch the orginal proxy to upstream mode opts = cls.get_options() cls.config = cls.proxy.tmaster.config = cls.proxy.tmaster.server.config = ProxyConfig(opts) @classmethod def teardown_class(cls): super().teardown_class() for proxy in cls.chain: proxy.shutdown() def setup(self): super().setup() for proxy in self.chain: proxy.tmaster.reset(self.addons()) @classmethod def get_options(cls): opts = super().get_options() if cls.chain: # First proxy is in normal mode. s = "http://127.0.0.1:%s" % cls.chain[0].port opts.update( mode="upstream:" + s, ) return opts class HTTPUpstreamProxyTest(ChainProxyTest, HTTPProxyTest): pass
""" The ``fluent_contents_tags`` module provides two template tags for rendering placeholders: It can be loaded using: .. code-block:: html+django {% load fluent_contents_tags %} A placeholder which is stored in a :class:`~fluent_contents.models.PlaceholderField` can be rendered with the following syntax: .. code-block:: html+django {% render_placeholder someobject.placeholder %} To support CMS interfaces, placeholder slots can be defined in the template. This is done using the following syntax: .. code-block:: html+django {% page_placeholder currentpage "slotname" %} {% page_placeholder currentpage "slotname" title="Admin title" role="main" %} The CMS interface can scan for those tags using the :ref:`fluent_contents.analyzer` module. """ import six from django.conf import settings from django.db.models import Manager from django.forms import Media from django.template import Library, Variable, TemplateSyntaxError from fluent_contents.models import Placeholder, ImmutableMedia from fluent_contents import rendering from tag_parser import parse_token_kwargs, parse_as_var from tag_parser.basetags import BaseNode, BaseAssignmentOrOutputNode from fluent_contents import appsettings from fluent_contents.rendering import get_cached_placeholder_output from fluent_contents.utils.templatetags import is_true, extract_literal, extract_literal_bool register = Library() @register.tag def page_placeholder(parser, token): """ Render a placeholder for a given object. Syntax: .. code-block:: html+django {% page_placeholder currentpage "slotname" %} Additionally, extra meta information can be provided for the admin interface. .. code-block:: html+django {% page_placeholder currentpage "slotname" title="Tab title" role="main %} If the currentpage variable is named ``page``, it can be left out. The extra information can be extracted with the :func:`~PagePlaceholderNode.get_title` and :func:`~PagePlaceholderNode.get_role` functions of the :class:`~PagePlaceholderNode` class. Optionally, a template can be used to render the placeholder: .. code-block:: html+django {% page_placeholder currentpage "slotname" template="mysite/parts/slot_placeholder.html" %} That template should loop over the content items, for example: .. code-block:: html+django {% for contentitem, html in contentitems %} {% if not forloop.first %}<div class="splitter"></div>{% endif %} {{ html }} {% endfor %} .. note:: When a template is used, the system assumes that the output can change per request. Hence, the output of individual items will be cached, but the final merged output is no longer cached. Add ``cachable=True`` to enable output caching for templates too. """ return PagePlaceholderNode.parse(parser, token) class PagePlaceholderNode(BaseAssignmentOrOutputNode): """ The template node of the ``page_placeholder`` tag. It renders a placeholder of a provided parent object. The template tag can also contain additional metadata, which can be returned by scanning for this node using the :ref:`fluent_contents.analyzer` module. """ allowed_kwargs = ('title', 'role', 'template', 'cachable', 'fallback') allowed_meta_kwargs = ('title', 'role') min_args = 1 max_args = 2 def __init__(self, tag_name, as_var, parent_expr, slot_expr, **kwargs): super(PagePlaceholderNode, self).__init__(tag_name, as_var, parent_expr, slot_expr, **kwargs) self.slot_expr = slot_expr # Move some arguments outside the regular "kwargs" # because they don't need to be parsed as variables. # Those are the remaining non-functional args for CMS admin page. self.meta_kwargs = {} for arg in self.allowed_meta_kwargs: try: self.meta_kwargs[arg] = kwargs.pop(arg) except KeyError: pass @classmethod def parse(cls, parser, token): """ Parse the node syntax: .. code-block:: html+django {% page_placeholder parentobj slotname title="test" role="m" %} """ bits, as_var = parse_as_var(parser, token) tag_name, args, kwargs = parse_token_kwargs(parser, bits, allowed_kwargs=cls.allowed_kwargs, compile_args=True, compile_kwargs=True) # Play with the arguments if len(args) == 2: parent_expr = args[0] slot_expr = args[1] elif len(args) == 1: # Allow 'page' by default. Works with most CMS'es, including django-fluent-pages. parent_expr = Variable('page') slot_expr = args[0] else: raise TemplateSyntaxError("""{0} tag allows two arguments: 'parent object' 'slot name' and optionally: title=".." role="..".""".format(tag_name)) cls.validate_args(tag_name, *args, **kwargs) return cls( tag_name=tag_name, as_var=as_var, parent_expr=parent_expr, slot_expr=slot_expr, **kwargs ) def get_slot(self): """ Return the string literal that is used for the placeholder slot in the template. When the variable is not a string literal, ``None`` is returned. """ return extract_literal(self.slot_expr) def get_title(self): """ Return the string literal that is used in the template. The title is used in the admin screens. """ try: return extract_literal(self.meta_kwargs['title']) except KeyError: slot = self.get_slot() if slot is not None: return slot.replace('_', ' ').title() return None def get_role(self): """ Return the string literal that is used in the template. The role can be "main", "sidebar" or "related", or shorted to "m", "s", "r". """ try: return extract_literal(self.meta_kwargs['role']) except KeyError: return None def get_fallback_language(self): """ Return whether to use the fallback language. """ try: # Note: currently not supporting strings yet. return extract_literal_bool(self.kwargs['fallback']) or None except KeyError: return False def get_value(self, context, *tag_args, **tag_kwargs): request = self.get_request(context) output = None # Process arguments parent, slot = tag_args template_name = tag_kwargs.get('template', None) cachable = is_true(tag_kwargs.get('cachable', not bool(template_name))) # default: True unless there is a template. fallback_language = is_true(tag_kwargs.get('fallback', False)) if template_name and cachable and not extract_literal(self.kwargs['template']): # If the template name originates from a variable, it can change any time. # It's not possible to create a reliable output cache for for that, # as it would have to include any possible template name in the key. raise TemplateSyntaxError("{0} tag does not allow 'cachable' for variable template names!".format(self.tag_name)) if appsettings.FLUENT_CONTENTS_CACHE_OUTPUT \ and appsettings.FLUENT_CONTENTS_CACHE_PLACEHOLDER_OUTPUT \ and cachable: # See if the entire placeholder output is cached, # if so, no database queries have to be performed. # This will be omitted when an template is used, # because there is no way to expire that or tell whether that template is cacheable. output = get_cached_placeholder_output(parent, slot) if output is None: # Get the placeholder try: placeholder = Placeholder.objects.get_by_slot(parent, slot) except Placeholder.DoesNotExist: return "<!-- placeholder '{0}' does not yet exist -->".format(slot) output = rendering.render_placeholder(request, placeholder, parent, template_name=template_name, cachable=cachable, limit_parent_language=True, fallback_language=fallback_language ) rendering.register_frontend_media(request, output.media) # Assume it doesn't hurt. TODO: should this be optional? return output.html @register.tag def render_placeholder(parser, token): """ Render a shared placeholder. Syntax: .. code-block:: html+django {% render_placeholder someobject.placeholder %} """ return RenderPlaceholderNode.parse(parser, token) class RenderPlaceholderNode(BaseAssignmentOrOutputNode): """ The template node of the ``render_placeholder`` tag. It renders the provided placeholder object. """ min_args = 1 max_args = 1 allowed_kwargs = ('template', 'cachable', 'fallback') @classmethod def validate_args(cls, tag_name, *args, **kwargs): if len(args) != 1: raise TemplateSyntaxError("""{0} tag allows only one parameter: a placeholder object.""".format(tag_name)) super(RenderPlaceholderNode, cls).validate_args(tag_name, *args, **kwargs) def get_value(self, context, *tag_args, **tag_kwargs): request = self.get_request(context) # Parse arguments try: placeholder = _get_placeholder_arg(self.args[0], tag_args[0]) except RuntimeWarning as e: return u"<!-- {0} -->".format(e) template_name = tag_kwargs.get('template', None) cachable = is_true(tag_kwargs.get('cachable', not bool(template_name))) # default: True unless there is a template. fallback_language = is_true(tag_kwargs.get('fallback', False)) if template_name and cachable and not extract_literal(self.kwargs['template']): # If the template name originates from a variable, it can change any time. # See PagePlaceholderNode.render_tag() why this is not allowed. raise TemplateSyntaxError("{0} tag does not allow 'cachable' for variable template names!".format(self.tag_name)) # Fetching placeholder.parent should not cause queries if fetched via PlaceholderFieldDescriptor. # See render_placeholder() for more details output = rendering.render_placeholder(request, placeholder, placeholder.parent, template_name=template_name, cachable=cachable, limit_parent_language=True, fallback_language=fallback_language ) rendering.register_frontend_media(request, output.media) # Need to track frontend media here, as the template tag can't return it. return output.html def _get_placeholder_arg(arg_name, placeholder): """ Validate and return the Placeholder object that the template variable points to. """ if placeholder is None: raise RuntimeWarning(u"placeholder object is None") elif isinstance(placeholder, Placeholder): return placeholder elif isinstance(placeholder, Manager): manager = placeholder try: parent_object = manager.instance # read RelatedManager code except AttributeError: parent_object = None try: placeholder = manager.all()[0] if parent_object is not None: placeholder.parent = parent_object # Fill GFK cache return placeholder except IndexError: raise RuntimeWarning(u"No placeholders found for query '{0}.all.0'".format(arg_name)) else: raise ValueError(u"The field '{0}' does not refer to a placeholder object!".format(arg_name)) @register.tag def render_content_items_media(parser, token): """ Render the JS/CSS includes for the media which was collected during the handling of the request. This tag should be placed at the bottom of the page. .. code-block:: html+django {% render_content_items_media %} {% render_content_items_media css %} {% render_content_items_media js %} {% render_content_items_media js local %} {% render_content_items_media js external %} """ return RenderContentItemsMedia.parse(parser, token) class RenderContentItemsMedia(BaseNode): """ The template node of the ``render_plugin_media`` tag. It renders the media object object. """ compile_args = False compile_kwargs = False min_args = 0 max_args = 2 @classmethod def validate_args(cls, tag_name, *args, **kwargs): super(RenderContentItemsMedia, cls).validate_args(tag_name, *args, **kwargs) if args: if args[0] not in ('css', 'js'): raise TemplateSyntaxError("'{0}' tag only supports `css` or `js` as first argument".format(tag_name)) if len(args) > 1 and args[1] not in ('local', 'external'): raise TemplateSyntaxError("'{0}' tag only supports `local` or `external` as second argument".format(tag_name)) def render_tag(self, context, media_type=None, domain=None): request = self.get_request(context) media = rendering.get_frontend_media(request) if not media or not (media._js or media._css): return u'' if not media_type: return media.render() elif media_type == 'js': if domain: media = _split_js(media, domain) return u'\n'.join(media.render_js()) elif media_type == 'css': if domain: media = _split_css(media, domain) return u'\n'.join(media.render_css()) else: return '' if settings.STATIC_URL is None: _LOCAL_PREFIX = settings.MEDIA_URL # backwards compatibility else: _LOCAL_PREFIX = settings.STATIC_URL def _is_local(url): # URL can be http:// if that's what's also in STATIC_URL. # Otherwise, the domain is external. return not url.startswith(('//', 'http://', 'https://')) or url.startswith(_LOCAL_PREFIX) def _split_js(media, domain): """ Extract the local or external URLs from a Media object. """ # Read internal property without creating new Media instance. if not media._js: return ImmutableMedia.empty_instance needs_local = domain == 'local' new_js = [] for url in media._js: if needs_local == _is_local(url): new_js.append(url) if not new_js: return ImmutableMedia.empty_instance else: return Media(js=new_js) def _split_css(media, domain): """ Extract the local or external URLs from a Media object. """ # Read internal property without creating new Media instance. if not media._css: return ImmutableMedia.empty_instance needs_local = domain == 'local' new_css = {} for medium, url in six.iteritems(media._css): if needs_local == _is_local(url): new_css.setdefault(medium, []).append(url) if not new_css: return ImmutableMedia.empty_instance else: return Media(css=new_css)
from collections import OrderedDict from .. import CreditCard from .. import Provider as CreditCardProvider class Provider(CreditCardProvider): """Implementation of ``pt_PT`` locale credit card For all methods that take ``card_type`` as an argument a random card type will be used if the supplied value is ``None``. The list of valid card types includes ``'visa'``, ``'mastercard'`` and ``'maestro'``. Source: https://bincheck.org/portugal """ prefix_visa = [ "400131", "400190", "400817", "402192", "402947", "402956", "403005", "403006", "403007", "403008", "403271", "404520", "404530", "405758", "406170", "406475", "407548", "407549", "407575", "408237", "408239", "409842", "409843", "410000", "410344", "410345", "410553", "410557", "411635", "411700", "411701", "411869", "412487", "412488", "412489", "412657", "412782", "412990", "413014", "413793", "413871", "415158", "415159", "415170", "415171", "415174", "415175", "415194", "415195", "415238", "415272", "415273", "415403", "415404", "415405", "415440", "415441", "415569", "415920", "415961", "416952", "416963", "416970", "417005", "417091", "417092", "417337", "418847", "419022", "419682", "419683", "419684", "421149", "421510", "422080", "422240", "422241", "422414", "422417", "422597", "422869", "423392", "423393", "424118", "424184", "424208", "424661", "425509", "425510", "425906", "426150", "426360", "426370", "427256", "427304", "427729", "427770", "427867", "428139", "428184", "428185", "428186", "428187", "429711", "430240", "430241", "431926", "433390", "433391", "433511", "433512", "433513", "433599", "433618", "433622", "433966", "437886", "438257", "439070", "440637", "440644", "440645", "442664", "443977", "443978", "444224", "444227", "445961", "445962", "446140", "446144", "449389", "450915", "451156", "451166", "454755", "455250", "455290", "455292", "455658", "456811", "456812", "457031", "458058", "458059", "459432", "459433", "459449", "460340", "460341", "460342", "461247", "461248", "461249", "462731", "462732", "464406", "465964", "476066", "476067", "476068", "476069", "476070", "476071", "476329", "477920", "477921", "477922", "477947", "477989", "478062", "478063", "479702", "479736", "483088", "485672", "486449", "486457", "489434", "489485", "490772", "490830", "490831", "490832", "490841", "490863", "491213", "491546", "491547", "491613", "492194", "493402", "493480", "493800", "493801", "493830", "498800", "499968", "499969", "499986", "422239", "422041", "464409", "464408", ] prefix_mastercard = [ "510122", "510123", "512556", "518772", "519744", "519774", "520342", "524552", "524878", "525625", "525808", "526819", "527014", "528024", "529119", "530267", "530770", "532355", "536468", "541171", "541557", "542081", "542098", "542858", "543099", "543116", "543123", "544051", "544052", "544233", "547260", "547459", "548168", "548169", "552727", "552755", "553057", "554506", "554517", "554518", "556660", "557836", "557882", "557883", "557888", ] prefix_maestro = [ "501654", "501659", "670530", "670811", "670812", "676938", "676938", "677393", "677707", "670835", "670817", ] credit_card_types = OrderedDict( ( ( "maestro", CreditCard("Maestro", prefix_maestro, 16, security_code="CVV2"), ), ( "mastercard", CreditCard("Mastercard", prefix_mastercard, 16, security_code="CVV2"), ), ("visa", CreditCard("Visa", prefix_visa, 16, security_code="CVV2")), ) )
# Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from gpu_tests.webgl_conformance_expectations import WebGLConformanceExpectations # See the GpuTestExpectations class for documentation. class WebGL2ConformanceExpectations(WebGLConformanceExpectations): def __init__(self, conformance_path): super(WebGL2ConformanceExpectations, self).__init__(conformance_path) def SetExpectations(self): # All platforms. self.Skip('deqp/functional/gles3/attriblocation.html', bug=483282) self.Skip('deqp/functional/gles3/builtinprecision*.html', bug=483282) self.Skip('deqp/functional/gles3/draw.html', bug=483282) self.Skip('deqp/functional/gles3/fbocolorbuffer.html', bug=483282) self.Skip('deqp/functional/gles3/fbocompleteness.html', bug=483282) self.Skip('deqp/functional/gles3/fbodepthbuffer.html', bug=483282) self.Skip('deqp/functional/gles3/fboinvalidate.html', bug=483282) self.Skip('deqp/functional/gles3/fbomultisample.html', bug=483282) self.Skip('deqp/functional/gles3/fborender.html', bug=483282) self.Skip('deqp/functional/gles3/fragmentoutput.html', bug=483282) self.Skip('deqp/functional/gles3/framebufferblit.html', bug=483282) self.Skip('deqp/functional/gles3/instancedrendering.html', bug=483282) self.Skip('deqp/functional/gles3/integerstatequery.html', bug=483282) self.Skip('deqp/functional/gles3/internalformatquery.html', bug=483282) self.Skip('deqp/functional/gles3/lifetime.html', bug=483282) self.Skip('deqp/functional/gles3/multisample.html', bug=483282) self.Skip('deqp/functional/gles3/negativebufferapi.html', bug=483282) self.Skip('deqp/functional/gles3/negativefragmentapi.html', bug=483282) self.Skip('deqp/functional/gles3/negativetextureapi.html', bug=483282) self.Skip('deqp/functional/gles3/negativevertexarrayapi.html', bug=483282) self.Skip('deqp/functional/gles3/occlusionquery.html', bug=483282) self.Skip('deqp/functional/gles3/pixelbufferobject.html', bug=483282) self.Skip('deqp/functional/gles3/shaderbuiltinvar.html', bug=483282) self.Skip('deqp/functional/gles3/shadercommonfunction.html', bug=483282) self.Skip('deqp/functional/gles3/shaderderivate.html', bug=483282) self.Skip('deqp/functional/gles3/shaderloop.html', bug=483282) self.Skip('deqp/functional/gles3/shadermatrix.html', bug=483282) self.Skip('deqp/functional/gles3/shaderoperator.html', bug=483282) self.Skip('deqp/functional/gles3/shaderpackingfunction.html', bug=483282) self.Skip('deqp/functional/gles3/shaderstatequery.html', bug=483282) self.Skip('deqp/functional/gles3/shadertexturefunction*.html', bug=483282) self.Skip('deqp/functional/gles3/sync.html', bug=483282) self.Skip('deqp/functional/gles3/texturefiltering*.html', bug=483282) self.Skip('deqp/functional/gles3/textureformat.html', bug=483282) self.Skip('deqp/functional/gles3/textureshadow.html', bug=483282) self.Skip('deqp/functional/gles3/texturespecification*.html', bug=483282) self.Skip('deqp/functional/gles3/texturewrap.html', bug=483282) self.Skip('deqp/functional/gles3/transformfeedback.html', bug=483282) self.Skip('deqp/functional/gles3/uniformapi.html', bug=483282) self.Skip('deqp/functional/gles3/uniformbuffers.html', bug=483282) self.Skip('deqp/functional/gles3/vertexarrays.html', bug=483282) self.Fail('deqp/data/gles3/shaders/linkage.html', bug=483282) self.Fail('deqp/data/gles3/shaders/preprocessor.html', bug=483282) self.Fail('conformance2/glsl3/forbidden-operators.html', bug=483282) self.Fail('conformance2/misc/expando-loss-2.html', bug=483282) self.Fail('conformance2/vertex_arrays/vertex-array-object.html', bug=483282) # Windows only. self.Fail('conformance2/textures/canvas/' + 'tex-2d-r8-red-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rg8-rg-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rgb8-rgb-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rgb565-rgb-unsigned_short_5_6_5.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rgb5_a1-rgba-unsigned_short_5_5_5_1.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rgba4-rgba-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-2d-rgba4-rgba-unsigned_short_4_4_4_4.html', ['win'], bug=483282) self.Fail('conformance2/textures/misc/tex-unpack-params.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-r8-red-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rg8-rg-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rgb8-rgb-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rgb565-rgb-unsigned_short_5_6_5.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rgb5_a1-rgba-unsigned_short_5_5_5_1.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rgba4-rgba-unsigned_byte.html', ['win'], bug=483282) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-2d-rgba4-rgba-unsigned_short_4_4_4_4.html', ['win'], bug=483282) self.Flaky('deqp/functional/gles3/buffercopy.html', ['win'], bug=587601) self.Skip('deqp/functional/gles3/readpixel.html', ['win'], bug=483282) self.Skip('deqp/functional/gles3/texturestatequery.html', ['win'], bug=483282) self.Fail('conformance2/glsl3/array-in-complex-expression.html', ['win'], bug=483282) self.Skip('conformance2/reading/read-pixels-pack-parameters.html', ['win'], bug=483282) self.Fail('conformance2/textures/misc/tex-input-validation.html', ['win'], bug=483282) self.Skip('conformance2/textures/misc/tex-mipmap-levels.html', ['win'], bug=483282) self.Skip('conformance2/transform_feedback/transform_feedback.html', ['win'], bug=483282) self.Fail('conformance2/glsl3/const-array-init.html', ['win'], bug=1198) # angle bug ID self.Skip('conformance2/reading/read-pixels-into-pixel-pack-buffer.html', ['win'], bug=1266) # angle bug ID self.Skip('conformance2/textures/misc/copy-texture-image.html', ['win'], bug=577144) # crash on debug self.Fail('conformance2/state/gl-object-get-calls.html', ['win'], bug=483282) # Windows 8 only. self.Fail('conformance2/reading/read-pixels-from-fbo-test.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_data/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_data/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/svg_image/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/svg_image/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/video/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/video/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image_data/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image_data/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_video/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_video/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_canvas/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_canvas/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_blob/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_blob/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image_bitmap/' + 'tex-2d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image_bitmap/' + 'tex-2d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/video/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/video/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/image_data/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/image_data/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/image/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/image/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/svg_image/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/svg_image/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/image_bitmap_from_image_data/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image_data/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_video/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_video/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/canvas/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/canvas/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/webgl_canvas/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=560555) self.Fail('conformance2/textures/image_bitmap_from_canvas/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_canvas/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_blob/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_blob/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image_bitmap/' + 'tex-3d-rgb565-rgb-unsigned_byte.html', ['win8'], bug=483282) self.Fail('conformance2/textures/image_bitmap_from_image_bitmap/' + 'tex-3d-rgb5_a1-rgba-unsigned_byte.html', ['win8'], bug=483282) # Windows Debug. Causing assertions in the GPU process which raise # a dialog box, so have to skip them rather than mark them as # failing. self.Skip('conformance2/textures/canvas/' + 'tex-2d-rgba8-rgba-unsigned_byte.html', ['win', 'debug'], bug=542901) # Win / AMD flakiness seen on the FYI waterfall. # It's unfortunate that this suppression needs to be so broad, but # basically any test that uses readPixels is potentially flaky, and # it's infeasible to suppress individual failures one by one. self.Flaky('conformance2/*', ['win', ('amd', 0x6779)], bug=491419) self.Flaky('deqp/*', ['win', ('amd', 0x6779)], bug=491419) # Win / Intel self.Fail('conformance2/buffers/uniform-buffers.html', ['win', 'intel'], bug=483282) self.Fail('deqp/functional/gles3/shaderstruct.html', ['win', 'intel'], bug=483282) # Mac only. self.Skip('deqp/data/gles3/shaders/qualification_order.html', ['mac'], bug=483282) self.Skip('deqp/data/gles3/shaders/scoping.html', ['mac'], bug=483282) self.Skip('deqp/functional/gles3/defaultvertexattribute.html', ['mac'], bug=483282) self.Skip('deqp/functional/gles3/floatstatequery.html', ['mac'], bug=483282) self.Skip('deqp/functional/gles3/texturestatequery.html', ['mac'], bug=483282) self.Skip('deqp/functional/gles3/vertexarrayobject.html', ['mac'], bug=483282) self.Skip('deqp/functional/gles3/shaderswitch.html', ['mavericks'], bug=483282) self.Fail('deqp/functional/gles3/rbostatequery.html', ['mac'], bug=569808) self.Fail('deqp/functional/gles3/fbostatequery.html', ['mac'], bug=483282) self.Fail('deqp/functional/gles3/negativeshaderapi.html', ['mac'], bug=483282) self.Fail('conformance2/buffers/buffer-overflow-test.html', ['mac'], bug=483282) self.Fail('conformance2/buffers/buffer-type-restrictions.html', ['mac'], bug=483282) self.Fail('conformance2/misc/uninitialized-test-2.html', ['mac'], bug=483282) self.Fail('conformance2/reading/read-pixels-from-fbo-test.html', ['mac'], bug=483282) self.Fail('conformance2/renderbuffers/' + 'multisampled-renderbuffer-initialization.html', ['mac'], bug=483282) self.Fail('conformance2/textures/misc/compressed-tex-image.html', ['mac'], bug=565438) self.Fail('conformance2/textures/misc/gl-get-tex-parameter.html', ['mac'], bug=483282) self.Fail('conformance2/textures/misc/tex-storage-compressed-formats.html', ['mac'], bug=295792) self.Fail('conformance2/renderbuffers/invalidate-framebuffer.html', ['mac'], bug=483282) self.Fail('conformance2/renderbuffers/framebuffer-test.html', ['mac'], bug=483282) self.Fail('conformance2/renderbuffers/readbuffer.html', ['mac'], bug=570453) self.Fail('conformance2/textures/misc/tex-storage-and-subimage-3d.html', ['mac'], bug=483282) self.Fail('conformance2/state/gl-object-get-calls.html', ['mac'], bug=483282) # Mac Retina NVIDIA self.Fail('conformance2/rendering/draw-buffers.html', ['mac', ('nvidia', 0xfe9)], bug=483282) self.Fail('conformance2/textures/misc/tex-input-validation.html', ['mac', ('nvidia', 0xfe9)], bug=483282) self.Fail('conformance2/textures/misc/tex-mipmap-levels.html', ['mac', ('nvidia', 0xfe9)], bug=483282) self.Fail('deqp/functional/gles3/shaderstruct.html', ['mac', ('nvidia', 0xfe9)], bug=483282) # Mac AMD self.Fail('deqp/functional/gles3/clipping.html', ['mac', 'amd'], bug=483282) self.Fail('deqp/functional/gles3/primitiverestart.html', ['mac', 'amd'], bug=598930) # Mac Intel self.Fail('conformance2/textures/misc/tex-unpack-params.html', ['mac', 'intel'], bug=483282) # Linux only. self.Fail('deqp/data/gles3/shaders/functions.html', ['linux'], bug=483282) self.Skip('deqp/functional/gles3/shaderswitch.html', ['linux'], bug=483282) self.Fail('conformance2/glsl3/vector-dynamic-indexing.html', ['linux'], bug=483282) self.Fail('conformance2/rendering/draw-buffers.html', ['linux'], bug=483282) self.Fail('conformance2/textures/misc/tex-unpack-params.html', ['linux'], bug=483282) # We want to mark this Flaky for all of Linux however we currently skip # all the tests on Intel. Tag this with AMD and Nvidia to avoid an # expectation conflict that would make this test run on Intel. self.Flaky('deqp/functional/gles3/negativeshaderapi.html', ['linux', 'amd', 'nvidia'], bug=483282) # Linux NVIDIA only. self.Fail('deqp/functional/gles3/fbostatequery.html', ['linux', 'nvidia'], bug=483282) self.Fail('conformance2/reading/read-pixels-from-fbo-test.html', ['linux', 'nvidia'], bug=483282) # Linux AMD only. # It looks like AMD shader compiler rejects many valid ES3 semantics. self.Skip('deqp/data/gles3/shaders/arrays.html', ['linux', 'amd'], bug=483282) self.Skip('deqp/data/gles3/shaders/qualification_order.html', ['linux', 'amd'], bug=483282) self.Skip('deqp/functional/gles3/texturestatequery.html', ['linux', 'amd'], bug=483282) self.Fail('conformance2/renderbuffers/framebuffer-texture-layer.html', ['linux', 'amd'], bug=295792) self.Fail('deqp/data/gles3/shaders/conversions.html', ['linux', 'amd'], bug=483282) self.Fail('deqp/functional/gles3/buffercopy.html', ['linux', 'amd'], bug=483282) self.Fail('deqp/functional/gles3/clipping.html', ['linux', 'amd'], bug=483282) self.Fail('deqp/functional/gles3/samplerobject.html', ['linux', 'amd'], bug=483282) self.Fail('deqp/functional/gles3/shaderprecision.html', ['linux', 'amd'], bug=483282) self.Fail('conformance2/misc/uninitialized-test-2.html', ['linux', 'amd'], bug=483282) self.Fail('conformance2/reading/read-pixels-pack-parameters.html', ['linux', 'amd'], bug=483282) self.Fail('conformance2/reading/read-pixels-into-pixel-pack-buffer.html', ['linux', 'amd'], bug=483282) self.Fail('conformance2/textures/misc/tex-mipmap-levels.html', ['linux', 'amd'], bug=483282) # Linux Intel: driver is GL 3.0 and doesn't support features needed for ES3. self.Skip('*', ['linux', 'intel'], bug=540543) # Conflicting expectations to test that the # "Expectations Have No collisions" unittest works. # page_name = 'conformance/glsl/constructors/glsl-construct-ivec4.html' # Conflict when all conditions match # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug', 'opengl']) # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug', 'opengl']) # Conflict when all conditions match (and different sets) # self.Fail(page_name, # ['linux', 'win', ('nvidia', 0x1), 'debug', 'opengl']) # self.Fail(page_name, # ['linux', 'mac', ('nvidia', 0x1), 'amd', 'debug', 'opengl']) # Conflict with one aspect not specified # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug']) # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug', 'opengl']) # Conflict with one aspect not specified (in both conditions) # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug']) # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug']) # Conflict even if the GPU is specified in a device ID # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug']) # self.Fail(page_name, # ['linux', 'nvidia', 'debug']) # Test there are no conflicts between two different devices # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug']) # self.Fail(page_name, # ['linux', ('nvidia', 0x2), 'debug']) # Test there are no conflicts between two devices with different vendors # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug']) # self.Fail(page_name, # ['linux', ('amd', 0x1), 'debug']) # Conflicts if there is a device and nothing specified for the other's # GPU vendors # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug']) # self.Fail(page_name, # ['linux', 'debug']) # Test no conflicts happen when only one aspect differs # self.Fail(page_name, # ['linux', ('nvidia', 0x1), 'debug', 'opengl']) # self.Fail(page_name, # ['win', ('nvidia', 0x1), 'debug', 'opengl']) # Conflicts if between a generic os condition and a specific version # self.Fail(page_name, # ['xp', ('nvidia', 0x1), 'debug', 'opengl']) # self.Fail(page_name, # ['win', ('nvidia', 0x1), 'debug', 'opengl'])
import datetime import decimal import itertools import re import time import urllib2 import urlparse import uuid import warnings from operator import itemgetter import gridfs from bson import Binary, DBRef, SON, ObjectId from mongoengine.python_support import (PY3, bin_type, txt_type, str_types, StringIO) from base import (BaseField, ComplexBaseField, ObjectIdField, ValidationError, get_document, BaseDocument) from queryset import DO_NOTHING, QuerySet from document import Document, EmbeddedDocument from connection import get_db, DEFAULT_CONNECTION_NAME try: from PIL import Image, ImageOps except ImportError: Image = None ImageOps = None __all__ = ['StringField', 'IntField', 'FloatField', 'BooleanField', 'DateTimeField', 'EmbeddedDocumentField', 'ListField', 'DictField', 'ObjectIdField', 'ReferenceField', 'ValidationError', 'MapField', 'DecimalField', 'ComplexDateTimeField', 'URLField', 'DynamicField', 'GenericReferenceField', 'FileField', 'BinaryField', 'SortedListField', 'EmailField', 'GeoPointField', 'ImageField', 'SequenceField', 'UUIDField', 'GenericEmbeddedDocumentField'] RECURSIVE_REFERENCE_CONSTANT = 'self' class StringField(BaseField): """A unicode string field. """ def __init__(self, regex=None, max_length=None, min_length=None, **kwargs): self.regex = re.compile(regex) if regex else None self.max_length = max_length self.min_length = min_length super(StringField, self).__init__(**kwargs) def to_python(self, value): if isinstance(value, unicode): return value try: value = value.decode('utf-8') except: pass return value def validate(self, value): if not isinstance(value, basestring): self.error('StringField only accepts string values') if self.max_length is not None and len(value) > self.max_length: self.error('String value is too long') if self.min_length is not None and len(value) < self.min_length: self.error('String value is too short') if self.regex is not None and self.regex.match(value) is None: self.error('String value did not match validation regex') def lookup_member(self, member_name): return None def prepare_query_value(self, op, value): if not isinstance(op, basestring): return value if op.lstrip('i') in ('startswith', 'endswith', 'contains', 'exact'): flags = 0 if op.startswith('i'): flags = re.IGNORECASE op = op.lstrip('i') regex = r'%s' if op == 'startswith': regex = r'^%s' elif op == 'endswith': regex = r'%s$' elif op == 'exact': regex = r'^%s$' # escape unsafe characters which could lead to a re.error value = re.escape(value) value = re.compile(regex % value, flags) return value class URLField(StringField): """A field that validates input as an URL. .. versionadded:: 0.3 """ _URL_REGEX = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' #localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) def __init__(self, verify_exists=False, url_regex=None, **kwargs): self.verify_exists = verify_exists self.url_regex = url_regex or self._URL_REGEX super(URLField, self).__init__(**kwargs) def validate(self, value): if not self.url_regex.match(value): self.error('Invalid URL: %s' % value) return if self.verify_exists: warnings.warn( "The URLField verify_exists argument has intractable security " "and performance issues. Accordingly, it has been deprecated.", DeprecationWarning ) try: request = urllib2.Request(value) urllib2.urlopen(request) except Exception, e: self.error('This URL appears to be a broken link: %s' % e) class EmailField(StringField): """A field that validates input as an E-Mail-Address. .. versionadded:: 0.4 """ EMAIL_REGEX = re.compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE # domain ) def validate(self, value): if not EmailField.EMAIL_REGEX.match(value): self.error('Invalid Mail-address: %s' % value) class IntField(BaseField): """An integer field. """ def __init__(self, min_value=None, max_value=None, **kwargs): self.min_value, self.max_value = min_value, max_value super(IntField, self).__init__(**kwargs) def to_python(self, value): try: value = int(value) except ValueError: pass return value def validate(self, value): try: value = int(value) except: self.error('%s could not be converted to int' % value) if self.min_value is not None and value < self.min_value: self.error('Integer value is too small') if self.max_value is not None and value > self.max_value: self.error('Integer value is too large') def prepare_query_value(self, op, value): if value is None: return value return int(value) class FloatField(BaseField): """An floating point number field. """ def __init__(self, min_value=None, max_value=None, **kwargs): self.min_value, self.max_value = min_value, max_value super(FloatField, self).__init__(**kwargs) def to_python(self, value): try: value = float(value) except ValueError: pass return value def validate(self, value): if isinstance(value, int): value = float(value) if not isinstance(value, float): self.error('FloatField only accepts float values') if self.min_value is not None and value < self.min_value: self.error('Float value is too small') if self.max_value is not None and value > self.max_value: self.error('Float value is too large') def prepare_query_value(self, op, value): if value is None: return value return float(value) class DecimalField(BaseField): """A fixed-point decimal number field. .. versionadded:: 0.3 """ def __init__(self, min_value=None, max_value=None, **kwargs): self.min_value, self.max_value = min_value, max_value super(DecimalField, self).__init__(**kwargs) def to_python(self, value): original_value = value if not isinstance(value, basestring): value = unicode(value) try: value = decimal.Decimal(value) except ValueError: return original_value return value def to_mongo(self, value): return unicode(value) def validate(self, value): if not isinstance(value, decimal.Decimal): if not isinstance(value, basestring): value = str(value) try: value = decimal.Decimal(value) except Exception, exc: self.error('Could not convert value to decimal: %s' % exc) if self.min_value is not None and value < self.min_value: self.error('Decimal value is too small') if self.max_value is not None and value > self.max_value: self.error('Decimal value is too large') class BooleanField(BaseField): """A boolean field type. .. versionadded:: 0.1.2 """ def to_python(self, value): try: value = bool(value) except ValueError: pass return value def validate(self, value): if not isinstance(value, bool): self.error('BooleanField only accepts boolean values') class DateTimeField(BaseField): """A datetime field. Note: Microseconds are rounded to the nearest millisecond. Pre UTC microsecond support is effecively broken. Use :class:`~mongoengine.fields.ComplexDateTimeField` if you need accurate microsecond support. """ def validate(self, value): if not isinstance(value, (datetime.datetime, datetime.date)): self.error(u'cannot parse date "%s"' % value) def to_mongo(self, value): return self.prepare_query_value(None, value) def prepare_query_value(self, op, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): return datetime.datetime(value.year, value.month, value.day) # Attempt to parse a datetime: # value = smart_str(value) # split usecs, because they are not recognized by strptime. if '.' in value: try: value, usecs = value.split('.') usecs = int(usecs) except ValueError: return None else: usecs = 0 kwargs = {'microsecond': usecs} try: # Seconds are optional, so try converting seconds first. return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6], **kwargs) except ValueError: try: # Try without seconds. return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5], **kwargs) except ValueError: # Try without hour/minutes/seconds. try: return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3], **kwargs) except ValueError: return None class ComplexDateTimeField(StringField): """ ComplexDateTimeField handles microseconds exactly instead of rounding like DateTimeField does. Derives from a StringField so you can do `gte` and `lte` filtering by using lexicographical comparison when filtering / sorting strings. The stored string has the following format: YYYY,MM,DD,HH,MM,SS,NNNNNN Where NNNNNN is the number of microseconds of the represented `datetime`. The `,` as the separator can be easily modified by passing the `separator` keyword when initializing the field. .. versionadded:: 0.5 """ def __init__(self, separator=',', **kwargs): self.names = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond'] self.separtor = separator super(ComplexDateTimeField, self).__init__(**kwargs) def _leading_zero(self, number): """ Converts the given number to a string. If it has only one digit, a leading zero so as it has always at least two digits. """ if int(number) < 10: return "0%s" % number else: return str(number) def _convert_from_datetime(self, val): """ Convert a `datetime` object to a string representation (which will be stored in MongoDB). This is the reverse function of `_convert_from_string`. >>> a = datetime(2011, 6, 8, 20, 26, 24, 192284) >>> RealDateTimeField()._convert_from_datetime(a) '2011,06,08,20,26,24,192284' """ data = [] for name in self.names: data.append(self._leading_zero(getattr(val, name))) return ','.join(data) def _convert_from_string(self, data): """ Convert a string representation to a `datetime` object (the object you will manipulate). This is the reverse function of `_convert_from_datetime`. >>> a = '2011,06,08,20,26,24,192284' >>> ComplexDateTimeField()._convert_from_string(a) datetime.datetime(2011, 6, 8, 20, 26, 24, 192284) """ data = data.split(',') data = map(int, data) values = {} for i in range(7): values[self.names[i]] = data[i] return datetime.datetime(**values) def __get__(self, instance, owner): data = super(ComplexDateTimeField, self).__get__(instance, owner) if data == None: return datetime.datetime.now() if isinstance(data, datetime.datetime): return data return self._convert_from_string(data) def __set__(self, instance, value): value = self._convert_from_datetime(value) if value else value return super(ComplexDateTimeField, self).__set__(instance, value) def validate(self, value): if not isinstance(value, datetime.datetime): self.error('Only datetime objects may used in a ' 'ComplexDateTimeField') def to_python(self, value): original_value = value try: return self._convert_from_string(value) except: return original_value def to_mongo(self, value): return self._convert_from_datetime(value) def prepare_query_value(self, op, value): return self._convert_from_datetime(value) class EmbeddedDocumentField(BaseField): """An embedded document field - with a declared document_type. Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`. """ def __init__(self, document_type, **kwargs): if not isinstance(document_type, basestring): if not issubclass(document_type, EmbeddedDocument): self.error('Invalid embedded document class provided to an ' 'EmbeddedDocumentField') self.document_type_obj = document_type super(EmbeddedDocumentField, self).__init__(**kwargs) @property def document_type(self): if isinstance(self.document_type_obj, basestring): if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT: self.document_type_obj = self.owner_document else: self.document_type_obj = get_document(self.document_type_obj) return self.document_type_obj def to_python(self, value): if not isinstance(value, self.document_type): return self.document_type._from_son(value) return value def to_mongo(self, value): if not isinstance(value, self.document_type): return value return self.document_type.to_mongo(value) def validate(self, value): """Make sure that the document instance is an instance of the EmbeddedDocument subclass provided when the document was defined. """ # Using isinstance also works for subclasses of self.document if not isinstance(value, self.document_type): self.error('Invalid embedded document instance provided to an ' 'EmbeddedDocumentField') self.document_type.validate(value) def lookup_member(self, member_name): return self.document_type._fields.get(member_name) def prepare_query_value(self, op, value): return self.to_mongo(value) class GenericEmbeddedDocumentField(BaseField): """A generic embedded document field - allows any :class:`~mongoengine.EmbeddedDocument` to be stored. Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`. .. note :: You can use the choices param to limit the acceptable EmbeddedDocument types """ def prepare_query_value(self, op, value): return self.to_mongo(value) def to_python(self, value): if isinstance(value, dict): doc_cls = get_document(value['_cls']) value = doc_cls._from_son(value) return value def validate(self, value): if not isinstance(value, EmbeddedDocument): self.error('Invalid embedded document instance provided to an ' 'GenericEmbeddedDocumentField') value.validate() def to_mongo(self, document): if document is None: return None data = document.to_mongo() if not '_cls' in data: data['_cls'] = document._class_name return data class DynamicField(BaseField): """A truly dynamic field type capable of handling different and varying types of data. Used by :class:`~mongoengine.DynamicDocument` to handle dynamic data""" def to_mongo(self, value): """Convert a Python type to a MongoDBcompatible type. """ if isinstance(value, basestring): return value if hasattr(value, 'to_mongo'): return value.to_mongo() if not isinstance(value, (dict, list, tuple)): return value is_list = False if not hasattr(value, 'items'): is_list = True value = dict([(k, v) for k, v in enumerate(value)]) data = {} for k, v in value.items(): data[k] = self.to_mongo(v) if is_list: # Convert back to a list value = [v for k, v in sorted(data.items(), key=itemgetter(0))] else: value = data return value def lookup_member(self, member_name): return member_name def prepare_query_value(self, op, value): if isinstance(value, basestring): from mongoengine.fields import StringField return StringField().prepare_query_value(op, value) return self.to_mongo(value) class ListField(ComplexBaseField): """A list field that wraps a standard field, allowing multiple instances of the field to be used as a list in the database. If using with ReferenceFields see: :ref:`one-to-many-with-listfields` .. note:: Required means it cannot be empty - as the default for ListFields is [] """ # ListFields cannot be indexed with _types - MongoDB doesn't support this _index_with_types = False def __init__(self, field=None, **kwargs): self.field = field kwargs.setdefault('default', lambda: []) super(ListField, self).__init__(**kwargs) def validate(self, value): """Make sure that a list of valid fields is being used. """ if (not isinstance(value, (list, tuple, QuerySet)) or isinstance(value, basestring)): self.error('Only lists and tuples may be used in a list field') super(ListField, self).validate(value) def prepare_query_value(self, op, value): if self.field: if op in ('set', 'unset') and (not isinstance(value, basestring) and not isinstance(value, BaseDocument) and hasattr(value, '__iter__')): return [self.field.prepare_query_value(op, v) for v in value] return self.field.prepare_query_value(op, value) return super(ListField, self).prepare_query_value(op, value) class SortedListField(ListField): """A ListField that sorts the contents of its list before writing to the database in order to ensure that a sorted list is always retrieved. .. warning:: There is a potential race condition when handling lists. If you set / save the whole list then other processes trying to save the whole list as well could overwrite changes. The safest way to append to a list is to perform a push operation. .. versionadded:: 0.4 .. versionchanged:: 0.6 - added reverse keyword """ _ordering = None _order_reverse = False def __init__(self, field, **kwargs): if 'ordering' in kwargs.keys(): self._ordering = kwargs.pop('ordering') if 'reverse' in kwargs.keys(): self._order_reverse = kwargs.pop('reverse') super(SortedListField, self).__init__(field, **kwargs) def to_mongo(self, value): value = super(SortedListField, self).to_mongo(value) if self._ordering is not None: return sorted(value, key=itemgetter(self._ordering), reverse=self._order_reverse) return sorted(value, reverse=self._order_reverse) class DictField(ComplexBaseField): """A dictionary field that wraps a standard Python dictionary. This is similar to an embedded document, but the structure is not defined. .. note:: Required means it cannot be empty - as the default for ListFields is [] .. versionadded:: 0.3 .. versionchanged:: 0.5 - Can now handle complex / varying types of data """ def __init__(self, basecls=None, field=None, *args, **kwargs): self.field = field self.basecls = basecls or BaseField if not issubclass(self.basecls, BaseField): self.error('DictField only accepts dict values') kwargs.setdefault('default', lambda: {}) super(DictField, self).__init__(*args, **kwargs) def validate(self, value): """Make sure that a list of valid fields is being used. """ if not isinstance(value, dict): self.error('Only dictionaries may be used in a DictField') if any(k for k in value.keys() if not isinstance(k, basestring)): self.error('Invalid dictionary key - documents must have only string keys') if any(('.' in k or '$' in k) for k in value.keys()): self.error('Invalid dictionary key name - keys may not contain "."' ' or "$" characters') super(DictField, self).validate(value) def lookup_member(self, member_name): return DictField(basecls=self.basecls, db_field=member_name) def prepare_query_value(self, op, value): match_operators = ['contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact', 'iexact'] if op in match_operators and isinstance(value, basestring): return StringField().prepare_query_value(op, value) return super(DictField, self).prepare_query_value(op, value) class MapField(DictField): """A field that maps a name to a specified field type. Similar to a DictField, except the 'value' of each item must match the specified field type. .. versionadded:: 0.5 """ def __init__(self, field=None, *args, **kwargs): if not isinstance(field, BaseField): self.error('Argument to MapField constructor must be a valid ' 'field') super(MapField, self).__init__(field=field, *args, **kwargs) class ReferenceField(BaseField): """A reference to a document that will be automatically dereferenced on access (lazily). Use the `reverse_delete_rule` to handle what should happen if the document the field is referencing is deleted. EmbeddedDocuments, DictFields and MapFields do not support reverse_delete_rules and an `InvalidDocumentError` will be raised if trying to set on one of these Document / Field types. The options are: * DO_NOTHING - don't do anything (default). * NULLIFY - Updates the reference to null. * CASCADE - Deletes the documents associated with the reference. * DENY - Prevent the deletion of the reference object. * PULL - Pull the reference from a :class:`~mongoengine.ListField` of references Alternative syntax for registering delete rules (useful when implementing bi-directional delete rules) .. code-block:: python class Bar(Document): content = StringField() foo = ReferenceField('Foo') Bar.register_delete_rule(Foo, 'bar', NULLIFY) .. note :: `reverse_delete_rules` do not trigger pre / post delete signals to be triggered. .. versionchanged:: 0.5 added `reverse_delete_rule` """ def __init__(self, document_type, dbref=False, reverse_delete_rule=DO_NOTHING, **kwargs): """Initialises the Reference Field. :param dbref: Store the reference as :class:`~pymongo.dbref.DBRef` or as the :class:`~pymongo.objectid.ObjectId`.id . :param reverse_delete_rule: Determines what to do when the referring object is deleted """ if not isinstance(document_type, basestring): if not issubclass(document_type, (Document, basestring)): self.error('Argument to ReferenceField constructor must be a ' 'document class or a string') if dbref is None: msg = ("ReferenceFields will default to using ObjectId " " strings in 0.8, set DBRef=True if this isn't desired") warnings.warn(msg, FutureWarning) self.dbref = dbref if dbref is not None else True # To change in 0.8 self.document_type_obj = document_type self.reverse_delete_rule = reverse_delete_rule super(ReferenceField, self).__init__(**kwargs) @property def document_type(self): if isinstance(self.document_type_obj, basestring): if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT: self.document_type_obj = self.owner_document else: self.document_type_obj = get_document(self.document_type_obj) return self.document_type_obj def __get__(self, instance, owner): """Descriptor to allow lazy dereferencing. """ if instance is None: # Document class being used rather than a document object return self # Get value from document instance if available value = instance._data.get(self.name) # Dereference DBRefs if isinstance(value, DBRef): value = self.document_type._get_db().dereference(value) if value is not None: instance._data[self.name] = self.document_type._from_son(value) return super(ReferenceField, self).__get__(instance, owner) def to_mongo(self, document): if isinstance(document, DBRef): if not self.dbref: return DBRef.id return document elif not self.dbref and isinstance(document, basestring): return document id_field_name = self.document_type._meta['id_field'] id_field = self.document_type._fields[id_field_name] if isinstance(document, Document): # We need the id from the saved object to create the DBRef id_ = document.pk if id_ is None: self.error('You can only reference documents once they have' ' been saved to the database') else: id_ = document id_ = id_field.to_mongo(id_) if self.dbref: collection = self.document_type._get_collection_name() return DBRef(collection, id_) return id_ def to_python(self, value): """Convert a MongoDB-compatible type to a Python type. """ if (not self.dbref and not isinstance(value, (DBRef, Document, EmbeddedDocument))): collection = self.document_type._get_collection_name() value = DBRef(collection, self.document_type.id.to_python(value)) return value def prepare_query_value(self, op, value): if value is None: return None return self.to_mongo(value) def validate(self, value): if not isinstance(value, (self.document_type, DBRef)): self.error("A ReferenceField only accepts DBRef or documents") if isinstance(value, Document) and value.id is None: self.error('You can only reference documents once they have been ' 'saved to the database') def lookup_member(self, member_name): return self.document_type._fields.get(member_name) class GenericReferenceField(BaseField): """A reference to *any* :class:`~mongoengine.document.Document` subclass that will be automatically dereferenced on access (lazily). .. note :: * Any documents used as a generic reference must be registered in the document registry. Importing the model will automatically register it. * You can use the choices param to limit the acceptable Document types .. versionadded:: 0.3 """ def __get__(self, instance, owner): if instance is None: return self value = instance._data.get(self.name) if isinstance(value, (dict, SON)): instance._data[self.name] = self.dereference(value) return super(GenericReferenceField, self).__get__(instance, owner) def validate(self, value): if not isinstance(value, (Document, DBRef)): self.error('GenericReferences can only contain documents') # We need the id from the saved object to create the DBRef if isinstance(value, Document) and value.id is None: self.error('You can only reference documents once they have been' ' saved to the database') def dereference(self, value): doc_cls = get_document(value['_cls']) reference = value['_ref'] doc = doc_cls._get_db().dereference(reference) if doc is not None: doc = doc_cls._from_son(doc) return doc def to_mongo(self, document): if document is None: return None if isinstance(document, (dict, SON)): return document id_field_name = document.__class__._meta['id_field'] id_field = document.__class__._fields[id_field_name] if isinstance(document, Document): # We need the id from the saved object to create the DBRef id_ = document.id if id_ is None: self.error('You can only reference documents once they have' ' been saved to the database') else: id_ = document id_ = id_field.to_mongo(id_) collection = document._get_collection_name() ref = DBRef(collection, id_) return {'_cls': document._class_name, '_ref': ref} def prepare_query_value(self, op, value): if value is None: return None return self.to_mongo(value) class BinaryField(BaseField): """A binary data field. """ def __init__(self, max_bytes=None, **kwargs): self.max_bytes = max_bytes super(BinaryField, self).__init__(**kwargs) def __set__(self, instance, value): """Handle bytearrays in python 3.1""" if PY3 and isinstance(value, bytearray): value = bin_type(value) return super(BinaryField, self).__set__(instance, value) def to_mongo(self, value): return Binary(value) def validate(self, value): if not isinstance(value, (bin_type, txt_type, Binary)): self.error("BinaryField only accepts instances of " "(%s, %s, Binary)" % ( bin_type.__name__, txt_type.__name__)) if self.max_bytes is not None and len(value) > self.max_bytes: self.error('Binary value is too long') class GridFSError(Exception): pass class GridFSProxy(object): """Proxy object to handle writing and reading of files to and from GridFS .. versionadded:: 0.4 .. versionchanged:: 0.5 - added optional size param to read .. versionchanged:: 0.6 - added collection name param """ _fs = None def __init__(self, grid_id=None, key=None, instance=None, db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs'): self.grid_id = grid_id # Store GridFS id for file self.key = key self.instance = instance self.db_alias = db_alias self.collection_name = collection_name self.newfile = None # Used for partial writes self.gridout = None def __getattr__(self, name): attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias', 'collection_name', 'newfile', 'gridout') if name in attrs: return self.__getattribute__(name) obj = self.get() if name in dir(obj): return getattr(obj, name) raise AttributeError def __get__(self, instance, value): return self def __nonzero__(self): return bool(self.grid_id) def __getstate__(self): self_dict = self.__dict__ self_dict['_fs'] = None return self_dict def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.grid_id) def __eq__(self, other): if isinstance(other, GridFSProxy): return ((self.grid_id == other.grid_id) and (self.collection_name == other.collection_name) and (self.db_alias == other.db_alias)) else: return False @property def fs(self): if not self._fs: self._fs = gridfs.GridFS(get_db(self.db_alias), self.collection_name) return self._fs def get(self, id=None): if id: self.grid_id = id if self.grid_id is None: return None try: if self.gridout is None: self.gridout = self.fs.get(self.grid_id) return self.gridout except: # File has been deleted return None def new_file(self, **kwargs): self.newfile = self.fs.new_file(**kwargs) self.grid_id = self.newfile._id def put(self, file_obj, **kwargs): if self.grid_id: raise GridFSError('This document already has a file. Either delete ' 'it or call replace to overwrite it') self.grid_id = self.fs.put(file_obj, **kwargs) self._mark_as_changed() def write(self, string): if self.grid_id: if not self.newfile: raise GridFSError('This document already has a file. Either ' 'delete it or call replace to overwrite it') else: self.new_file() self.newfile.write(string) def writelines(self, lines): if not self.newfile: self.new_file() self.grid_id = self.newfile._id self.newfile.writelines(lines) def read(self, size=-1): gridout = self.get() if gridout is None: return None else: try: return gridout.read(size) except: return "" def delete(self): # Delete file from GridFS, FileField still remains self.fs.delete(self.grid_id) self.grid_id = None self.gridout = None self._mark_as_changed() def replace(self, file_obj, **kwargs): self.delete() self.put(file_obj, **kwargs) def close(self): if self.newfile: self.newfile.close() def _mark_as_changed(self): """Inform the instance that `self.key` has been changed""" if self.instance: self.instance._mark_as_changed(self.key) class FileField(BaseField): """A GridFS storage field. .. versionadded:: 0.4 .. versionchanged:: 0.5 added optional size param for read .. versionchanged:: 0.6 added db_alias for multidb support """ proxy_class = GridFSProxy def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs): super(FileField, self).__init__(**kwargs) self.collection_name = collection_name self.db_alias = db_alias def __get__(self, instance, owner): if instance is None: return self # Check if a file already exists for this model grid_file = instance._data.get(self.name) if not isinstance(grid_file, self.proxy_class): grid_file = self.proxy_class(key=self.name, instance=instance, db_alias=self.db_alias, collection_name=self.collection_name) instance._data[self.name] = grid_file if not grid_file.key: grid_file.key = self.name grid_file.instance = instance return grid_file def __set__(self, instance, value): key = self.name if ((hasattr(value, 'read') and not isinstance(value, GridFSProxy)) or isinstance(value, str_types)): # using "FileField() = file/string" notation grid_file = instance._data.get(self.name) # If a file already exists, delete it if grid_file: try: grid_file.delete() except: pass # Create a new file with the new data grid_file.put(value) else: # Create a new proxy object as we don't already have one instance._data[key] = self.proxy_class(key=key, instance=instance, collection_name=self.collection_name) instance._data[key].put(value) else: instance._data[key] = value instance._mark_as_changed(key) def to_mongo(self, value): # Store the GridFS file id in MongoDB if isinstance(value, self.proxy_class) and value.grid_id is not None: return value.grid_id return None def to_python(self, value): if value is not None: return self.proxy_class(value, collection_name=self.collection_name, db_alias=self.db_alias) def validate(self, value): if value.grid_id is not None: if not isinstance(value, self.proxy_class): self.error('FileField only accepts GridFSProxy values') if not isinstance(value.grid_id, ObjectId): self.error('Invalid GridFSProxy value') class ImageGridFsProxy(GridFSProxy): """ Proxy for ImageField versionadded: 0.6 """ def put(self, file_obj, **kwargs): """ Insert a image in database applying field properties (size, thumbnail_size) """ field = self.instance._fields[self.key] try: img = Image.open(file_obj) img_format = img.format except: raise ValidationError('Invalid image') if (field.size and (img.size[0] > field.size['width'] or img.size[1] > field.size['height'])): size = field.size if size['force']: img = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS) else: img.thumbnail((size['width'], size['height']), Image.ANTIALIAS) thumbnail = None if field.thumbnail_size: size = field.thumbnail_size if size['force']: thumbnail = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS) else: thumbnail = img.copy() thumbnail.thumbnail((size['width'], size['height']), Image.ANTIALIAS) if thumbnail: thumb_id = self._put_thumbnail(thumbnail, img_format) else: thumb_id = None w, h = img.size io = StringIO() img.save(io, img_format) io.seek(0) return super(ImageGridFsProxy, self).put(io, width=w, height=h, format=img_format, thumbnail_id=thumb_id, **kwargs) def delete(self, *args, **kwargs): #deletes thumbnail out = self.get() if out and out.thumbnail_id: self.fs.delete(out.thumbnail_id) return super(ImageGridFsProxy, self).delete(*args, **kwargs) def _put_thumbnail(self, thumbnail, format, **kwargs): w, h = thumbnail.size io = StringIO() thumbnail.save(io, format) io.seek(0) return self.fs.put(io, width=w, height=h, format=format, **kwargs) @property def size(self): """ return a width, height of image """ out = self.get() if out: return out.width, out.height @property def format(self): """ return format of image ex: PNG, JPEG, GIF, etc """ out = self.get() if out: return out.format @property def thumbnail(self): """ return a gridfs.grid_file.GridOut representing a thumbnail of Image """ out = self.get() if out and out.thumbnail_id: return self.fs.get(out.thumbnail_id) def write(self, *args, **kwargs): raise RuntimeError("Please use \"put\" method instead") def writelines(self, *args, **kwargs): raise RuntimeError("Please use \"put\" method instead") class ImproperlyConfigured(Exception): pass class ImageField(FileField): """ A Image File storage field. @size (width, height, force): max size to store images, if larger will be automatically resized ex: size=(800, 600, True) @thumbnail (width, height, force): size to generate a thumbnail .. versionadded:: 0.6 """ proxy_class = ImageGridFsProxy def __init__(self, size=None, thumbnail_size=None, collection_name='images', **kwargs): if not Image: raise ImproperlyConfigured("PIL library was not found") params_size = ('width', 'height', 'force') extra_args = dict(size=size, thumbnail_size=thumbnail_size) for att_name, att in extra_args.items(): value = None if isinstance(att, (tuple, list)): if PY3: value = dict(itertools.zip_longest(params_size, att, fillvalue=None)) else: value = dict(map(None, params_size, att)) setattr(self, att_name, value) super(ImageField, self).__init__( collection_name=collection_name, **kwargs) class GeoPointField(BaseField): """A list storing a latitude and longitude. .. versionadded:: 0.4 """ _geo_index = True def validate(self, value): """Make sure that a geo-value is of type (x, y) """ if not isinstance(value, (list, tuple)): self.error('GeoPointField can only accept tuples or lists ' 'of (x, y)') if not len(value) == 2: self.error('Value must be a two-dimensional point') if (not isinstance(value[0], (float, int)) and not isinstance(value[1], (float, int))): self.error('Both values in point must be float or int') class SequenceField(IntField): """Provides a sequental counter (see http://www.mongodb.org/display/DOCS/Object+IDs#ObjectIDs-SequenceNumbers) .. note:: Although traditional databases often use increasing sequence numbers for primary keys. In MongoDB, the preferred approach is to use Object IDs instead. The concept is that in a very large cluster of machines, it is easier to create an object ID than have global, uniformly increasing sequence numbers. .. versionadded:: 0.5 """ def __init__(self, collection_name=None, db_alias=None, sequence_name=None, *args, **kwargs): self.collection_name = collection_name or 'mongoengine.counters' self.db_alias = db_alias or DEFAULT_CONNECTION_NAME self.sequence_name = sequence_name return super(SequenceField, self).__init__(*args, **kwargs) def generate_new_value(self): """ Generate and Increment the counter """ sequence_name = self.sequence_name or self.owner_document._get_collection_name() sequence_id = "%s.%s" % (sequence_name, self.name) collection = get_db(alias=self.db_alias)[self.collection_name] counter = collection.find_and_modify(query={"_id": sequence_id}, update={"$inc": {"next": 1}}, new=True, upsert=True) return counter['next'] def __get__(self, instance, owner): if instance is None: return self if not instance._data: return value = instance._data.get(self.name) if not value and instance._initialised: value = self.generate_new_value() instance._data[self.name] = value instance._mark_as_changed(self.name) return int(value) if value else None def __set__(self, instance, value): if value is None and instance._initialised: value = self.generate_new_value() return super(SequenceField, self).__set__(instance, value) def to_python(self, value): if value is None: value = self.generate_new_value() return value class UUIDField(BaseField): """A UUID field. .. versionadded:: 0.6 """ _binary = None def __init__(self, binary=None, **kwargs): """ Store UUID data in the database :param binary: (optional) boolean store as binary. .. versionchanged:: 0.6.19 """ if binary is None: binary = False msg = ("UUIDFields will soon default to store as binary, please " "configure binary=False if you wish to store as a string") warnings.warn(msg, FutureWarning) self._binary = binary super(UUIDField, self).__init__(**kwargs) def to_python(self, value): if not self._binary: original_value = value try: if not isinstance(value, basestring): value = unicode(value) return uuid.UUID(value) except: return original_value return value def to_mongo(self, value): if not self._binary: return unicode(value) return value def prepare_query_value(self, op, value): if value is None: return None return self.to_mongo(value) def validate(self, value): if not isinstance(value, uuid.UUID): if not isinstance(value, basestring): value = str(value) try: value = uuid.UUID(value) except Exception, exc: self.error('Could not convert to UUID: %s' % exc)
import asyncio import logging import ray.dashboard.consts as dashboard_consts import ray.dashboard.memory_utils as memory_utils # TODO(fyrestone): Not import from dashboard module. from ray.dashboard.modules.actor.actor_utils import actor_classname_from_task_spec from ray.dashboard.utils import Dict, Signal, async_loop_forever logger = logging.getLogger(__name__) class GlobalSignals: node_info_fetched = Signal(dashboard_consts.SIGNAL_NODE_INFO_FETCHED) node_summary_fetched = Signal(dashboard_consts.SIGNAL_NODE_SUMMARY_FETCHED) job_info_fetched = Signal(dashboard_consts.SIGNAL_JOB_INFO_FETCHED) worker_info_fetched = Signal(dashboard_consts.SIGNAL_WORKER_INFO_FETCHED) class DataSource: # {node id hex(str): node stats(dict of GetNodeStatsReply # in node_manager.proto)} node_stats = Dict() # {node id hex(str): node physical stats(dict from reporter_agent.py)} node_physical_stats = Dict() # {actor id hex(str): actor table data(dict of ActorTableData # in gcs.proto)} actors = Dict() # {job id hex(str): job table data(dict of JobTableData in gcs.proto)} jobs = Dict() # {node id hex(str): dashboard agent [http port(int), grpc port(int)]} agents = Dict() # {node id hex(str): gcs node info(dict of GcsNodeInfo in gcs.proto)} nodes = Dict() # {node id hex(str): ip address(str)} node_id_to_ip = Dict() # {node id hex(str): hostname(str)} node_id_to_hostname = Dict() # {node id hex(str): worker list} node_workers = Dict() # {node id hex(str): {actor id hex(str): actor table data}} node_actors = Dict() # {job id hex(str): worker list} job_workers = Dict() # {job id hex(str): {actor id hex(str): actor table data}} job_actors = Dict() # {worker id(str): core worker stats} core_worker_stats = Dict() # {job id hex(str): {event id(str): event dict}} events = Dict() # {node ip (str): log entries by pid # (dict from pid to list of latest log entries)} ip_and_pid_to_logs = Dict() # {node ip (str): error entries by pid # (dict from pid to list of latest err entries)} ip_and_pid_to_errors = Dict() class DataOrganizer: @staticmethod @async_loop_forever(dashboard_consts.PURGE_DATA_INTERVAL_SECONDS) async def purge(): # Purge data that is out of date. # These data sources are maintained by DashboardHead, # we do not needs to purge them: # * agents # * nodes # * node_id_to_ip # * node_id_to_hostname logger.info("Purge data.") alive_nodes = { node_id for node_id, node_info in DataSource.nodes.items() if node_info["state"] == "ALIVE" } for key in DataSource.node_stats.keys() - alive_nodes: DataSource.node_stats.pop(key) for key in DataSource.node_physical_stats.keys() - alive_nodes: DataSource.node_physical_stats.pop(key) @classmethod @async_loop_forever(dashboard_consts.ORGANIZE_DATA_INTERVAL_SECONDS) async def organize(cls): job_workers = {} node_workers = {} core_worker_stats = {} # await inside for loop, so we create a copy of keys(). for node_id in list(DataSource.nodes.keys()): workers = await cls.get_node_workers(node_id) for worker in workers: job_id = worker["jobId"] job_workers.setdefault(job_id, []).append(worker) for stats in worker.get("coreWorkerStats", []): worker_id = stats["workerId"] core_worker_stats[worker_id] = stats node_workers[node_id] = workers DataSource.job_workers.reset(job_workers) DataSource.node_workers.reset(node_workers) DataSource.core_worker_stats.reset(core_worker_stats) @classmethod async def get_node_workers(cls, node_id): workers = [] node_ip = DataSource.node_id_to_ip[node_id] node_logs = DataSource.ip_and_pid_to_logs.get(node_ip, {}) node_errs = DataSource.ip_and_pid_to_errors.get(node_ip, {}) node_physical_stats = DataSource.node_physical_stats.get(node_id, {}) node_stats = DataSource.node_stats.get(node_id, {}) # Merge coreWorkerStats (node stats) to workers (node physical stats) pid_to_worker_stats = {} pid_to_language = {} pid_to_job_id = {} pids_on_node = set() for core_worker_stats in node_stats.get("coreWorkersStats", []): pid = core_worker_stats["pid"] pids_on_node.add(pid) pid_to_worker_stats.setdefault(pid, []).append(core_worker_stats) pid_to_language[pid] = core_worker_stats["language"] pid_to_job_id[pid] = core_worker_stats["jobId"] # Clean up logs from a dead pid. dead_pids = set(node_logs.keys()) - pids_on_node for dead_pid in dead_pids: if dead_pid in node_logs: node_logs.mutable().pop(dead_pid) for worker in node_physical_stats.get("workers", []): worker = dict(worker) pid = worker["pid"] worker["logCount"] = len(node_logs.get(str(pid), [])) worker["errorCount"] = len(node_errs.get(str(pid), [])) worker["coreWorkerStats"] = pid_to_worker_stats.get(pid, []) worker["language"] = pid_to_language.get( pid, dashboard_consts.DEFAULT_LANGUAGE ) worker["jobId"] = pid_to_job_id.get(pid, dashboard_consts.DEFAULT_JOB_ID) await GlobalSignals.worker_info_fetched.send(node_id, worker) workers.append(worker) return workers @classmethod async def get_node_info(cls, node_id): node_physical_stats = dict(DataSource.node_physical_stats.get(node_id, {})) node_stats = dict(DataSource.node_stats.get(node_id, {})) node = DataSource.nodes.get(node_id, {}) node_ip = DataSource.node_id_to_ip.get(node_id) # Merge node log count information into the payload log_info = DataSource.ip_and_pid_to_logs.get(node_ip, {}) node_log_count = 0 for entries in log_info.values(): node_log_count += len(entries) error_info = DataSource.ip_and_pid_to_errors.get(node_ip, {}) node_err_count = 0 for entries in error_info.values(): node_err_count += len(entries) node_stats.pop("coreWorkersStats", None) view_data = node_stats.get("viewData", []) ray_stats = cls._extract_view_data( view_data, {"object_store_used_memory", "object_store_available_memory"} ) node_info = node_physical_stats # Merge node stats to node physical stats under raylet node_info["raylet"] = node_stats node_info["raylet"].update(ray_stats) # Merge GcsNodeInfo to node physical stats node_info["raylet"].update(node) # Merge actors to node physical stats node_info["actors"] = DataSource.node_actors.get(node_id, {}) # Update workers to node physical stats node_info["workers"] = DataSource.node_workers.get(node_id, []) node_info["logCount"] = node_log_count node_info["errorCount"] = node_err_count await GlobalSignals.node_info_fetched.send(node_info) return node_info @classmethod async def get_node_summary(cls, node_id): node_physical_stats = dict(DataSource.node_physical_stats.get(node_id, {})) node_stats = dict(DataSource.node_stats.get(node_id, {})) node = DataSource.nodes.get(node_id, {}) node_physical_stats.pop("workers", None) node_stats.pop("workersStats", None) view_data = node_stats.get("viewData", []) ray_stats = cls._extract_view_data( view_data, {"object_store_used_memory", "object_store_available_memory"} ) node_stats.pop("viewData", None) node_summary = node_physical_stats # Merge node stats to node physical stats node_summary["raylet"] = node_stats node_summary["raylet"].update(ray_stats) # Merge GcsNodeInfo to node physical stats node_summary["raylet"].update(node) await GlobalSignals.node_summary_fetched.send(node_summary) return node_summary @classmethod async def get_all_node_summary(cls): return [ await DataOrganizer.get_node_summary(node_id) for node_id in DataSource.nodes.keys() ] @classmethod async def get_all_node_details(cls): return [ await DataOrganizer.get_node_info(node_id) for node_id in DataSource.nodes.keys() ] @classmethod async def get_all_actors(cls): result = {} for index, (actor_id, actor) in enumerate(DataSource.actors.items()): result[actor_id] = await cls._get_actor(actor) # There can be thousands of actors including dead ones. Processing # them all can take many seconds, which blocks all other requests # to the dashboard. The ideal solution might be to implement # pagination. For now, use a workaround to yield to the event loop # periodically, so other request handlers have a chance to run and # avoid long latencies. if index % 1000 == 0 and index > 0: # Canonical way to yield to the event loop: # https://github.com/python/asyncio/issues/284 await asyncio.sleep(0) return result @staticmethod async def _get_actor(actor): actor = dict(actor) worker_id = actor["address"]["workerId"] core_worker_stats = DataSource.core_worker_stats.get(worker_id, {}) actor_constructor = core_worker_stats.get( "actorTitle", "Unknown actor constructor" ) actor["actorConstructor"] = actor_constructor actor.update(core_worker_stats) # TODO(fyrestone): remove this, give a link from actor # info to worker info in front-end. node_id = actor["address"]["rayletId"] pid = core_worker_stats.get("pid") node_physical_stats = DataSource.node_physical_stats.get(node_id, {}) actor_process_stats = None actor_process_gpu_stats = [] if pid: for process_stats in node_physical_stats.get("workers", []): if process_stats["pid"] == pid: actor_process_stats = process_stats break for gpu_stats in node_physical_stats.get("gpus", []): for process in gpu_stats.get("processes", []): if process["pid"] == pid: actor_process_gpu_stats.append(gpu_stats) break actor["gpus"] = actor_process_gpu_stats actor["processStats"] = actor_process_stats return actor @classmethod async def get_actor_creation_tasks(cls): infeasible_tasks = sum( ( list(node_stats.get("infeasibleTasks", [])) for node_stats in DataSource.node_stats.values() ), [], ) new_infeasible_tasks = [] for task in infeasible_tasks: task = dict(task) task["actorClass"] = actor_classname_from_task_spec(task) task["state"] = "INFEASIBLE" new_infeasible_tasks.append(task) resource_pending_tasks = sum( ( list(data.get("readyTasks", [])) for data in DataSource.node_stats.values() ), [], ) new_resource_pending_tasks = [] for task in resource_pending_tasks: task = dict(task) task["actorClass"] = actor_classname_from_task_spec(task) task["state"] = "PENDING_RESOURCES" new_resource_pending_tasks.append(task) results = { task["actorCreationTaskSpec"]["actorId"]: task for task in new_resource_pending_tasks + new_infeasible_tasks } return results @classmethod async def get_memory_table( cls, sort_by=memory_utils.SortingType.OBJECT_SIZE, group_by=memory_utils.GroupByType.STACK_TRACE, ): all_worker_stats = [] for node_stats in DataSource.node_stats.values(): all_worker_stats.extend(node_stats.get("coreWorkersStats", [])) memory_information = memory_utils.construct_memory_table( all_worker_stats, group_by=group_by, sort_by=sort_by ) return memory_information @staticmethod def _extract_view_data(views, data_keys): view_data = {} for view in views: view_name = view["viewName"] if view_name in data_keys: if not view.get("measures"): view_data[view_name] = 0 continue measure = view["measures"][0] if "doubleValue" in measure: measure_value = measure["doubleValue"] elif "intValue" in measure: measure_value = measure["intValue"] else: measure_value = 0 view_data[view_name] = measure_value return view_data
#!/usr/bin/env python # pylint: disable=missing-docstring # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| # # Copyright 2016 Red Hat, Inc. and/or its affiliates # and other contributors as indicated by the @author tags. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DOCUMENTATION = ''' --- module: yedit short_description: Create, modify, and idempotently manage yaml files. description: - Modify yaml files programmatically. options: state: description: - State represents whether to create, modify, delete, or list yaml required: true default: present choices: ["present", "absent", "list"] aliases: [] debug: description: - Turn on debug information. required: false default: false aliases: [] src: description: - The file that is the target of the modifications. required: false default: None aliases: [] content: description: - Content represents the yaml content you desire to work with. This could be the file contents to write or the inmemory data to modify. required: false default: None aliases: [] content_type: description: - The python type of the content parameter. required: false default: 'dict' aliases: [] key: description: - The path to the value you wish to modify. Emtpy string means the top of the document. required: false default: '' aliases: [] value: description: - The incoming value of parameter 'key'. required: false default: aliases: [] value_type: description: - The python type of the incoming value. required: false default: '' aliases: [] update: description: - Whether the update should be performed on a dict/hash or list/array object. required: false default: false aliases: [] append: description: - Whether to append to an array/list. When the key does not exist or is null, a new array is created. When the key is of a non-list type, nothing is done. required: false default: false aliases: [] index: description: - Used in conjunction with the update parameter. This will update a specific index in an array/list. required: false default: false aliases: [] curr_value: description: - Used in conjunction with the update parameter. This is the current value of 'key' in the yaml file. required: false default: false aliases: [] curr_value_format: description: - Format of the incoming current value. choices: ["yaml", "json", "str"] required: false default: false aliases: [] backup: description: - Whether to make a backup copy of the current file when performing an edit. required: false default: true aliases: [] author: - "Kenny Woodson <kwoodson@redhat.com>" extends_documentation_fragment: [] ''' EXAMPLES = ''' # Simple insert of key, value - name: insert simple key, value yedit: src: somefile.yml key: test value: somevalue state: present # Results: # test: somevalue # Multilevel insert of key, value - name: insert simple key, value yedit: src: somefile.yml key: a#b#c value: d state: present # Results: # a: # b: # c: d ''' ''' module for managing yaml files ''' import json import os import re import ruamel.yaml as yaml # This is here because of a bug that causes yaml # to incorrectly handle timezone info on timestamps #def timestamp_constructor(_, node): # ''' return timestamps as strings''' # return str(node.value) #yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor) class YeditException(Exception): ''' Exception class for Yedit ''' pass class Yedit(object): ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False): self.content = content self._separator = separator self.filename = filename self.__yaml_dict = content self.content_type = content_type self.backup = backup self.load(content_type=self.content_type) if self.__yaml_dict == None: self.__yaml_dict = {} @property def separator(self): ''' getter method for yaml_dict ''' return self._separator @separator.setter def separator(self): ''' getter method for yaml_dict ''' return self._separator @property def yaml_dict(self): ''' getter method for yaml_dict ''' return self.__yaml_dict @yaml_dict.setter def yaml_dict(self, value): ''' setter method for yaml_dict ''' self.__yaml_dict = value @staticmethod def parse_key(key, sep='.'): '''parse the key allowing the appropriate separator''' common_separators = list(Yedit.com_sep - set([sep])) return re.findall(Yedit.re_key % ''.join(common_separators), key) @staticmethod def valid_key(key, sep='.'): '''validate the incoming key''' common_separators = list(Yedit.com_sep - set([sep])) if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): return False return True @staticmethod def remove_entry(data, key, sep='.'): ''' remove data at location key ''' if key == '' and isinstance(data, dict): data.clear() return True elif key == '' and isinstance(data, list): del data[:] return True if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key, None) elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1: data = data[int(arr_ind)] else: return None # process last index for remove # expected list entry if key_indexes[-1][0]: if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: del data[int(key_indexes[-1][0])] return True # expected dict entry elif key_indexes[-1][1]: if isinstance(data, dict): del data[key_indexes[-1][1]] return True @staticmethod def add_entry(data, key, item=None, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a#b return c ''' if key == '': pass elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key: if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]: data = data[dict_key] continue elif data and not isinstance(data, dict): return None data[dict_key] = {} data = data[dict_key] elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1: data = data[int(arr_ind)] else: return None if key == '': data = item # process last index for add # expected list entry elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: data[int(key_indexes[-1][0])] = item # expected dict entry elif key_indexes[-1][1] and isinstance(data, dict): data[key_indexes[-1][1]] = item return data @staticmethod def get_entry(data, key, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a.b return c ''' if key == '': pass elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes: if dict_key and isinstance(data, dict): data = data.get(dict_key, None) elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1: data = data[int(arr_ind)] else: return None return data def write(self): ''' write to file ''' if not self.filename: raise YeditException('Please specify a filename.') if self.backup and self.file_exists(): shutil.copy(self.filename, self.filename + '.orig') tmp_filename = self.filename + '.yedit' try: with open(tmp_filename, 'w') as yfd: # pylint: disable=no-member,maybe-no-member if hasattr(self.yaml_dict, 'fa'): self.yaml_dict.fa.set_block_style() yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) except Exception as err: raise YeditException(err.message) os.rename(tmp_filename, self.filename) return (True, self.yaml_dict) def read(self): ''' read from file ''' # check if it exists if self.filename == None or not self.file_exists(): return None contents = None with open(self.filename) as yfd: contents = yfd.read() return contents def file_exists(self): ''' return whether file exists ''' if os.path.exists(self.filename): return True return False def load(self, content_type='yaml'): ''' return yaml file ''' contents = self.read() if not contents and not self.content: return None if self.content: if isinstance(self.content, dict): self.yaml_dict = self.content return self.yaml_dict elif isinstance(self.content, str): contents = self.content # check if it is yaml try: if content_type == 'yaml' and contents: self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader) # pylint: disable=no-member,maybe-no-member if hasattr(self.yaml_dict, 'fa'): self.yaml_dict.fa.set_block_style() elif content_type == 'json' and contents: self.yaml_dict = json.loads(contents) except yaml.YAMLError as err: # Error loading yaml or json raise YeditException('Problem with loading yaml file. %s' % err) return self.yaml_dict def get(self, key): ''' get a specified key''' try: entry = Yedit.get_entry(self.yaml_dict, key, self.separator) except KeyError as _: entry = None return entry def pop(self, path, key_or_item): ''' remove a key, value pair from a dict or an item for a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry == None: return (False, self.yaml_dict) if isinstance(entry, dict): # pylint: disable=no-member,maybe-no-member if entry.has_key(key_or_item): entry.pop(key_or_item) return (True, self.yaml_dict) return (False, self.yaml_dict) elif isinstance(entry, list): # pylint: disable=no-member,maybe-no-member ind = None try: ind = entry.index(key_or_item) except ValueError: return (False, self.yaml_dict) entry.pop(ind) return (True, self.yaml_dict) return (False, self.yaml_dict) def delete(self, path): ''' remove path from a dict''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry == None: return (False, self.yaml_dict) result = Yedit.remove_entry(self.yaml_dict, path, self.separator) if not result: return (False, self.yaml_dict) return (True, self.yaml_dict) def exists(self, path, value): ''' check if value exists at path''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if isinstance(entry, list): if value in entry: return True return False elif isinstance(entry, dict): if isinstance(value, dict): rval = False for key, val in value.items(): if entry[key] != val: rval = False break else: rval = True return rval return value in entry return entry == value def append(self, path, value): '''append value to a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry is None: self.put(path, []) entry = Yedit.get_entry(self.yaml_dict, path, self.separator) if not isinstance(entry, list): return (False, self.yaml_dict) # pylint: disable=no-member,maybe-no-member entry.append(value) return (True, self.yaml_dict) # pylint: disable=too-many-arguments def update(self, path, value, index=None, curr_value=None): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if isinstance(entry, dict): # pylint: disable=no-member,maybe-no-member if not isinstance(value, dict): raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \ ' value=[%s] [%s]' % (value, type(value))) entry.update(value) return (True, self.yaml_dict) elif isinstance(entry, list): # pylint: disable=no-member,maybe-no-member ind = None if curr_value: try: ind = entry.index(curr_value) except ValueError: return (False, self.yaml_dict) elif index != None: ind = index if ind != None and entry[ind] != value: entry[ind] = value return (True, self.yaml_dict) # see if it exists in the list try: ind = entry.index(value) except ValueError: # doesn't exist, append it entry.append(value) return (True, self.yaml_dict) #already exists, return if ind != None: return (False, self.yaml_dict) return (False, self.yaml_dict) def put(self, path, value): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry == value: return (False, self.yaml_dict) # deepcopy didn't work tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) # pylint: disable=no-member if hasattr(self.yaml_dict, 'fa'): tmp_copy.fa.set_block_style() result = Yedit.add_entry(tmp_copy, path, value, self.separator) if not result: return (False, self.yaml_dict) self.yaml_dict = tmp_copy return (True, self.yaml_dict) def create(self, path, value): ''' create a yaml file ''' if not self.file_exists(): # deepcopy didn't work tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) # pylint: disable=no-member if hasattr(self.yaml_dict, 'fa'): tmp_copy.fa.set_block_style() result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result: self.yaml_dict = tmp_copy return (True, self.yaml_dict) return (False, self.yaml_dict) def get_curr_value(invalue, val_type): '''return the current value''' if invalue == None: return None curr_value = invalue if val_type == 'yaml': curr_value = yaml.load(invalue) elif val_type == 'json': curr_value = json.loads(invalue) return curr_value def parse_value(inc_value, vtype=''): '''determine value type passed''' true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', ] false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF'] # It came in as a string but you didn't specify value_type as string # we will convert to bool if it matches any of the above cases if isinstance(inc_value, str) and 'bool' in vtype: if inc_value not in true_bools and inc_value not in false_bools: raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' % (inc_value, vtype)) elif isinstance(inc_value, bool) and 'str' in vtype: inc_value = str(inc_value) # If vtype is not str then go ahead and attempt to yaml load it. if isinstance(inc_value, str) and 'str' not in vtype: try: inc_value = yaml.load(inc_value) except Exception as _: raise YeditException('Could not determine type of incoming value. value=[%s] vtype=[%s]' \ % (type(inc_value), vtype)) return inc_value # pylint: disable=too-many-branches def main(): ''' ansible oc module for secrets ''' module = AnsibleModule( argument_spec=dict( state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), src=dict(default=None, type='str'), content=dict(default=None), content_type=dict(default='dict', choices=['dict']), key=dict(default='', type='str'), value=dict(), value_type=dict(default='', type='str'), update=dict(default=False, type='bool'), append=dict(default=False, type='bool'), index=dict(default=None, type='int'), curr_value=dict(default=None, type='str'), curr_value_format=dict(default='yaml', choices=['yaml', 'json', 'str'], type='str'), backup=dict(default=True, type='bool'), separator=dict(default='.', type='str'), ), mutually_exclusive=[["curr_value", "index"], ['update', "append"]], required_one_of=[["content", "src"]], ) yamlfile = Yedit(filename=module.params['src'], backup=module.params['backup'], separator=module.params['separator'], ) if module.params['src']: rval = yamlfile.load() if yamlfile.yaml_dict == None and module.params['state'] != 'present': module.fail_json(msg='Error opening file [%s]. Verify that the' + \ ' file exists, that it is has correct permissions, and is valid yaml.') if module.params['state'] == 'list': if module.params['content']: content = parse_value(module.params['content'], module.params['content_type']) yamlfile.yaml_dict = content if module.params['key']: rval = yamlfile.get(module.params['key']) or {} module.exit_json(changed=False, result=rval, state="list") elif module.params['state'] == 'absent': if module.params['content']: content = parse_value(module.params['content'], module.params['content_type']) yamlfile.yaml_dict = content if module.params['update']: rval = yamlfile.pop(module.params['key'], module.params['value']) else: rval = yamlfile.delete(module.params['key']) if rval[0] and module.params['src']: yamlfile.write() module.exit_json(changed=rval[0], result=rval[1], state="absent") elif module.params['state'] == 'present': # check if content is different than what is in the file if module.params['content']: content = parse_value(module.params['content'], module.params['content_type']) # We had no edits to make and the contents are the same if yamlfile.yaml_dict == content and module.params['value'] == None: module.exit_json(changed=False, result=yamlfile.yaml_dict, state="present") yamlfile.yaml_dict = content # we were passed a value; parse it if module.params['value']: value = parse_value(module.params['value'], module.params['value_type']) key = module.params['key'] if module.params['update']: curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) rval = yamlfile.update(key, value, module.params['index'], curr_value) elif module.params['append']: rval = yamlfile.append(key, value) else: rval = yamlfile.put(key, value) if rval[0] and module.params['src']: yamlfile.write() module.exit_json(changed=rval[0], result=rval[1], state="present") # no edits to make if module.params['src']: rval = yamlfile.write() module.exit_json(changed=rval[0], result=rval[1], state="present") module.exit_json(changed=False, result=yamlfile.yaml_dict, state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % module.params['state'], state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required if __name__ == '__main__': from ansible.module_utils.basic import * main()
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from pytest import raises as assert_raises from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson, bellman_ford, construct_dist_matrix, NegativeCycleError) import scipy.sparse import pytest directed_G = np.array([[0, 3, 3, 0, 0], [0, 0, 0, 2, 4], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [2, 0, 0, 2, 0]], dtype=float) undirected_G = np.array([[0, 3, 3, 1, 2], [3, 0, 0, 2, 4], [3, 0, 0, 0, 0], [1, 2, 0, 0, 2], [2, 4, 0, 2, 0]], dtype=float) unweighted_G = (directed_G > 0).astype(float) directed_SP = [[0, 3, 3, 5, 7], [3, 0, 6, 2, 4], [np.inf, np.inf, 0, np.inf, np.inf], [1, 4, 4, 0, 8], [2, 5, 5, 2, 0]] directed_pred = np.array([[-9999, 0, 0, 1, 1], [3, -9999, 0, 1, 1], [-9999, -9999, -9999, -9999, -9999], [3, 0, 0, -9999, 1], [4, 0, 0, 4, -9999]], dtype=float) undirected_SP = np.array([[0, 3, 3, 1, 2], [3, 0, 6, 2, 4], [3, 6, 0, 4, 5], [1, 2, 4, 0, 2], [2, 4, 5, 2, 0]], dtype=float) undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2], [np.inf, 0, np.inf, 2, np.inf], [np.inf, np.inf, 0, np.inf, np.inf], [1, 2, np.inf, 0, 2], [2, np.inf, np.inf, 2, 0]], dtype=float) undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5) undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf undirected_pred = np.array([[-9999, 0, 0, 0, 0], [1, -9999, 0, 1, 1], [2, 0, -9999, 0, 0], [3, 3, 0, -9999, 3], [4, 4, 0, 4, -9999]], dtype=float) methods = ['auto', 'FW', 'D', 'BF', 'J'] def test_dijkstra_limit(): limits = [0, 2, np.inf] results = [undirected_SP_limit_0, undirected_SP_limit_2, undirected_SP] def check(limit, result): SP = dijkstra(undirected_G, directed=False, limit=limit) assert_array_almost_equal(SP, result) for limit, result in zip(limits, results): check(limit, result) def test_directed(): def check(method): SP = shortest_path(directed_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP, directed_SP) for method in methods: check(method) def test_undirected(): def check(method, directed_in): if directed_in: SP1 = shortest_path(directed_G, method=method, directed=False, overwrite=False) assert_array_almost_equal(SP1, undirected_SP) else: SP2 = shortest_path(undirected_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP2, undirected_SP) for method in methods: for directed_in in (True, False): check(method, directed_in) @pytest.mark.parametrize('directed, SP_ans', ((True, directed_SP), (False, undirected_SP))) @pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4])) def test_dijkstra_indices_min_only(directed, SP_ans, indices): SP_ans = np.array(SP_ans) indices = np.array(indices, dtype=np.int64) min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)] min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype) for k in range(SP_ans.shape[0]): min_d_ans[k] = SP_ans[min_ind_ans[k], k] min_ind_ans[np.isinf(min_d_ans)] = -9999 SP, pred, sources = dijkstra(directed_G, directed=directed, indices=indices, min_only=True, return_predecessors=True) assert_array_almost_equal(SP, min_d_ans) assert_array_equal(min_ind_ans, sources) SP = dijkstra(directed_G, directed=directed, indices=indices, min_only=True, return_predecessors=False) assert_array_almost_equal(SP, min_d_ans) @pytest.mark.parametrize('n', (10, 100, 1000)) def test_shortest_path_min_only_random(n): np.random.seed(1234) data = scipy.sparse.rand(n, n, density=0.5, format='lil', random_state=42, dtype=np.float) data.setdiag(np.zeros(n, dtype=np.bool)) # choose some random vertices v = np.arange(n) np.random.shuffle(v) indices = v[:int(n*.1)] ds, pred, sources = dijkstra(data, directed=False, indices=indices, min_only=True, return_predecessors=True) for k in range(n): p = pred[k] s = sources[k] while(p != -9999): assert(sources[p] == s) p = pred[p] def test_shortest_path_indices(): indices = np.arange(4) def check(func, indshape): outshape = indshape + (5,) SP = func(directed_G, directed=False, indices=indices.reshape(indshape)) assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape)) for indshape in [(4,), (4, 1), (2, 2)]: for func in (dijkstra, bellman_ford, johnson, shortest_path): check(func, indshape) assert_raises(ValueError, shortest_path, directed_G, method='FW', indices=indices) def test_predecessors(): SP_res = {True: directed_SP, False: undirected_SP} pred_res = {True: directed_pred, False: undirected_pred} def check(method, directed): SP, pred = shortest_path(directed_G, method, directed=directed, overwrite=False, return_predecessors=True) assert_array_almost_equal(SP, SP_res[directed]) assert_array_almost_equal(pred, pred_res[directed]) for method in methods: for directed in (True, False): check(method, directed) def test_construct_shortest_path(): def check(method, directed): SP1, pred = shortest_path(directed_G, directed=directed, overwrite=False, return_predecessors=True) SP2 = construct_dist_matrix(directed_G, pred, directed=directed) assert_array_almost_equal(SP1, SP2) for method in methods: for directed in (True, False): check(method, directed) def test_unweighted_path(): def check(method, directed): SP1 = shortest_path(directed_G, directed=directed, overwrite=False, unweighted=True) SP2 = shortest_path(unweighted_G, directed=directed, overwrite=False, unweighted=False) assert_array_almost_equal(SP1, SP2) for method in methods: for directed in (True, False): check(method, directed) def test_negative_cycles(): # create a small graph with a negative cycle graph = np.ones([5, 5]) graph.flat[::6] = 0 graph[1, 2] = -2 def check(method, directed): assert_raises(NegativeCycleError, shortest_path, graph, method, directed) for method in ['FW', 'J', 'BF']: for directed in (True, False): check(method, directed) def test_masked_input(): G = np.ma.masked_equal(directed_G, 0) def check(method): SP = shortest_path(directed_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP, directed_SP) for method in methods: check(method) def test_overwrite(): G = np.array([[0, 3, 3, 1, 2], [3, 0, 0, 2, 4], [3, 0, 0, 0, 0], [1, 2, 0, 0, 2], [2, 4, 0, 2, 0]], dtype=float) foo = G.copy() shortest_path(foo, overwrite=False) assert_array_equal(foo, G) @pytest.mark.parametrize('method', methods) def test_buffer(method): # Smoke test that sparse matrices with read-only buffers (e.g., those from # joblib workers) do not cause:: # # ValueError: buffer source array is read-only # G = scipy.sparse.csr_matrix([[1.]]) G.data.flags['WRITEABLE'] = False shortest_path(G, method=method)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A driver for XenServer or Xen Cloud Platform. **Related Flags** :xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. :xenapi_connection_username: Username for connection to XenServer/Xen Cloud Platform (default: root). :xenapi_connection_password: Password for connection to XenServer/Xen Cloud Platform. :target_host: the iSCSI Target Host IP address, i.e. the IP address for the nova-volume host :target_port: iSCSI Target Port, 3260 Default :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' **Variable Naming Scheme** - suffix "_ref" for opaque references - suffix "_uuid" for UUIDs - suffix "_rec" for record objects """ import contextlib import cPickle as pickle import urlparse import xmlrpclib from eventlet import queue from eventlet import timeout from nova import context from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova.virt import driver from nova.virt.xenapi import host from nova.virt.xenapi import pool from nova.virt.xenapi import pool_states from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vmops from nova.virt.xenapi import volumeops LOG = logging.getLogger(__name__) xenapi_opts = [ cfg.StrOpt('xenapi_connection_url', default=None, help='URL for connection to XenServer/Xen Cloud Platform. ' 'Required if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('xenapi_connection_username', default='root', help='Username for connection to XenServer/Xen Cloud Platform. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('xenapi_connection_password', default=None, help='Password for connection to XenServer/Xen Cloud Platform. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.IntOpt('xenapi_connection_concurrent', default=5, help='Maximum number of concurrent XenAPI connections. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval', default=5.0, help='The interval used for polling of coalescing vhds. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.BoolOpt('xenapi_check_host', default=True, help='Ensure compute service is running on host XenAPI ' 'connects to.'), cfg.IntOpt('xenapi_vhd_coalesce_max_attempts', default=5, help='Max number of times to poll for VHD to coalesce. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('xenapi_sr_base_path', default='/var/run/sr-mount', help='Base path to the storage repository'), cfg.StrOpt('target_host', default=None, help='iSCSI Target Host'), cfg.StrOpt('target_port', default='3260', help='iSCSI Target Port, 3260 Default'), cfg.StrOpt('iqn_prefix', default='iqn.2010-10.org.openstack', help='IQN Prefix'), # NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick, # when we pull support for it, we should remove this cfg.BoolOpt('xenapi_remap_vbd_dev', default=False, help='Used to enable the remapping of VBD dev ' '(Works around an issue in Ubuntu Maverick)'), cfg.StrOpt('xenapi_remap_vbd_dev_prefix', default='sd', help='Specify prefix to remap VBD dev to ' '(ex. /dev/xvdb -> /dev/sdb)'), cfg.IntOpt('xenapi_login_timeout', default=10, help='Timeout in seconds for XenAPI login.'), ] CONF = cfg.CONF CONF.register_opts(xenapi_opts) CONF.import_opt('host', 'nova.config') class XenAPIDriver(driver.ComputeDriver): """A connection to XenServer or Xen Cloud Platform""" def __init__(self, virtapi, read_only=False): super(XenAPIDriver, self).__init__(virtapi) url = CONF.xenapi_connection_url username = CONF.xenapi_connection_username password = CONF.xenapi_connection_password if not url or password is None: raise Exception(_('Must specify xenapi_connection_url, ' 'xenapi_connection_username (optionally), and ' 'xenapi_connection_password to use ' 'compute_driver=xenapi.XenAPIDriver')) self._session = XenAPISession(url, username, password, self.virtapi) self._volumeops = volumeops.VolumeOps(self._session) self._host_state = None self._host = host.Host(self._session, self.virtapi) self._vmops = vmops.VMOps(self._session, self.virtapi) self._initiator = None self._hypervisor_hostname = None self._pool = pool.ResourcePool(self._session, self.virtapi) @property def host_state(self): if not self._host_state: self._host_state = host.HostState(self._session) return self._host_state def init_host(self, host): if CONF.xenapi_check_host: vm_utils.ensure_correct_host(self._session) try: vm_utils.cleanup_attached_vdis(self._session) except Exception: LOG.exception(_('Failure while cleaning up attached VDIs')) def list_instances(self): """List VM instances""" return self._vmops.list_instances() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create VM instance""" self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM""" # TODO(Vek): Need to pass context in for access to auth_token self._vmops.confirm_migration(migration, instance, network_info) def finish_revert_migration(self, instance, network_info, block_device_info=None): """Finish reverting a resize, powering back on the instance""" # NOTE(vish): Xen currently does not use network info. self._vmops.finish_revert_migration(instance, block_device_info) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None): """Completes a resize, turning on the migrated instance""" self._vmops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info) def snapshot(self, context, instance, image_id): """ Create snapshot from a running VM instance """ self._vmops.snapshot(context, instance, image_id) def reboot(self, instance, network_info, reboot_type, block_device_info=None): """Reboot VM instance""" self._vmops.reboot(instance, reboot_type) def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance""" self._vmops.set_admin_password(instance, new_pass) def inject_file(self, instance, b64_path, b64_contents): """Create a file on the VM instance. The file path and contents should be base64-encoded. """ self._vmops.inject_file(instance, b64_path, b64_contents) def change_instance_metadata(self, context, instance, diff): """Apply a diff to the instance metadata.""" self._vmops.change_instance_metadata(instance, diff) def destroy(self, instance, network_info, block_device_info=None): """Destroy VM instance""" self._vmops.destroy(instance, network_info, block_device_info) def pause(self, instance): """Pause VM instance""" self._vmops.pause(instance) def unpause(self, instance): """Unpause paused VM instance""" self._vmops.unpause(instance) def migrate_disk_and_power_off(self, context, instance, dest, instance_type, network_info, block_device_info=None): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" # NOTE(vish): Xen currently does not use network info. rv = self._vmops.migrate_disk_and_power_off(context, instance, dest, instance_type) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) name_label = self._vmops._get_orig_vm_name_label(instance) for vol in block_device_mapping: connection_info = vol['connection_info'] mount_device = vol['mount_device'].rpartition("/")[2] self._volumeops.detach_volume(connection_info, name_label, mount_device) return rv def suspend(self, instance): """suspend the specified instance""" self._vmops.suspend(instance) def resume(self, instance, network_info, block_device_info=None): """resume the specified instance""" self._vmops.resume(instance) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance""" self._vmops.rescue(context, instance, network_info, image_meta, rescue_password) def unrescue(self, instance, network_info): """Unrescue the specified instance""" self._vmops.unrescue(instance) def power_off(self, instance): """Power off the specified instance""" self._vmops.power_off(instance) def power_on(self, instance): """Power on the specified instance""" self._vmops.power_on(instance) def soft_delete(self, instance): """Soft delete the specified instance""" self._vmops.soft_delete(instance) def restore(self, instance): """Restore the specified instance""" self._vmops.restore(instance) def poll_rebooting_instances(self, timeout, instances): """Poll for rebooting instances""" self._vmops.poll_rebooting_instances(timeout, instances) def reset_network(self, instance): """reset networking for specified instance""" self._vmops.reset_network(instance) def inject_network_info(self, instance, network_info): """inject network info for specified instance""" self._vmops.inject_network_info(instance, network_info) def plug_vifs(self, instance_ref, network_info): """Plug VIFs into networks.""" self._vmops.plug_vifs(instance_ref, network_info) def unplug_vifs(self, instance_ref, network_info): """Unplug VIFs from networks.""" self._vmops.unplug_vifs(instance_ref, network_info) def get_info(self, instance): """Return data about VM instance""" return self._vmops.get_info(instance) def get_diagnostics(self, instance): """Return data about VM diagnostics""" return self._vmops.get_diagnostics(instance) def get_all_bw_counters(self, instances): """Return bandwidth usage counters for each interface on each running VM""" # we only care about VMs that correspond to a nova-managed # instance: imap = dict([(inst.name, inst.uuid) for inst in instances]) bwcounters = [] # get a dictionary of instance names. values are dictionaries # of mac addresses with values that are the bw counters: # e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}} all_counters = self._vmops.get_all_bw_counters() for instance_name, counters in all_counters.iteritems(): if instance_name in imap: # yes these are stats for a nova-managed vm # correlate the stats with the nova instance uuid: for vif_counter in counters.values(): vif_counter['uuid'] = imap[instance_name] bwcounters.append(vif_counter) return bwcounters def get_console_output(self, instance): """Return snapshot of console""" return self._vmops.get_console_output(instance) def get_vnc_console(self, instance): """Return link to instance's VNC console""" return self._vmops.get_vnc_console(instance) def get_volume_connector(self, instance): """Return volume connector information""" if not self._initiator or not self._hypervisor_hostname: stats = self.get_host_stats(refresh=True) try: self._initiator = stats['host_other-config']['iscsi_iqn'] self._hypervisor_hostname = stats['host_hostname'] except (TypeError, KeyError) as err: LOG.warn(_('Could not determine key: %s') % err, instance=instance) self._initiator = None return { 'ip': self.get_host_ip_addr(), 'initiator': self._initiator, 'host': self._hypervisor_hostname } @staticmethod def get_host_ip_addr(): xs_url = urlparse.urlparse(CONF.xenapi_connection_url) return xs_url.netloc def attach_volume(self, connection_info, instance_name, mountpoint): """Attach volume storage to VM instance""" return self._volumeops.attach_volume(connection_info, instance_name, mountpoint) def detach_volume(self, connection_info, instance_name, mountpoint): """Detach volume storage to VM instance""" return self._volumeops.detach_volume(connection_info, instance_name, mountpoint) def get_console_pool_info(self, console_type): xs_url = urlparse.urlparse(CONF.xenapi_connection_url) return {'address': xs_url.netloc, 'username': CONF.xenapi_connection_username, 'password': CONF.xenapi_connection_password} def get_available_resource(self, nodename): """Retrieve resource info. This method is called when nova-compute launches, and as part of a periodic task. :param nodename: ignored in this driver :returns: dictionary describing resources """ host_stats = self.get_host_stats(refresh=True) # Updating host information total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024) # NOTE(belliott) memory-free-computed is a value provided by XenServer # for gauging free memory more conservatively than memory-free. free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024) total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024) used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024) dic = {'vcpus': 0, 'memory_mb': total_ram_mb, 'local_gb': total_disk_gb, 'vcpus_used': 0, 'memory_mb_used': total_ram_mb - free_ram_mb, 'local_gb_used': used_disk_gb, 'hypervisor_type': 'xen', 'hypervisor_version': 0, 'hypervisor_hostname': host_stats['host_hostname'], 'cpu_info': host_stats['host_cpu_info']['cpu_count']} return dic def ensure_filtering_rules_for_instance(self, instance_ref, network_info): # NOTE(salvatore-orlando): it enforces security groups on # host initialization and live migration. # In XenAPI we do not assume instances running upon host initialization return def check_can_live_migrate_destination(self, ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit """ return self._vmops.check_can_live_migrate_destination(ctxt, instance_ref, block_migration, disk_over_commit) def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param ctxt: security context :param disk_over_commit: if true, allow disk over commit """ pass def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination includes the block_migration flag """ self._vmops.check_can_live_migrate_source(ctxt, instance_ref, dest_check_data) def get_instance_disk_info(self, instance_name): """Used by libvirt for live migration. We rely on xenapi checks to do this for us.""" pass def pre_block_migration(self, ctxt, instance_ref, disk_info_json): """Used by libvirt for live migration. We rely on xenapi checks to do this for us. May be used in the future to populate the vdi/vif maps""" pass def live_migration(self, ctxt, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Performs the live migration of the specified instance. :params ctxt: security context :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params dest: destination host :params post_method: post operation method. expected nova.compute.manager.post_live_migration. :params recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :params block_migration: if true, migrate VM disk. :params migrate_data: implementation specific params """ self._vmops.live_migrate(ctxt, instance_ref, dest, post_method, recover_method, block_migration, migrate_data) def pre_live_migration(self, context, instance_ref, block_device_info, network_info, migrate_data=None): """Preparation live migration. :params block_device_info: It must be the result of _get_instance_volume_bdms() at compute manager. """ # TODO(JohnGarbutt) look again when boot-from-volume hits trunk pass def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, block_migration): """Post operation of live migration at destination host. :params ctxt: security context :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params network_info: instance network infomation :params : block_migration: if true, post operation of block_migraiton. """ # TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel pass def unfilter_instance(self, instance_ref, network_info): """Removes security groups configured for an instance.""" return self._vmops.unfilter_instance(instance_ref, network_info) def refresh_security_group_rules(self, security_group_id): """ Updates security group rules for all instances associated with a given security group Invoked when security group rules are updated """ return self._vmops.refresh_security_group_rules(security_group_id) def refresh_security_group_members(self, security_group_id): """ Updates security group rules for all instances associated with a given security group Invoked when instances are added/removed to a security group """ return self._vmops.refresh_security_group_members(security_group_id) def refresh_instance_security_rules(self, instance): """ Updates security group rules for specified instance Invoked when instances are added/removed to a security group or when a rule is added/removed to a security group """ return self._vmops.refresh_instance_security_rules(instance) def refresh_provider_fw_rules(self): return self._vmops.refresh_provider_fw_rules() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first.""" return self.host_state.get_host_stats(refresh=refresh) def host_power_action(self, host, action): """The only valid values for 'action' on XenServer are 'reboot' or 'shutdown', even though the API also accepts 'startup'. As this is not technically possible on XenServer, since the host is the same physical machine as the hypervisor, if this is requested, we need to raise an exception. """ if action in ("reboot", "shutdown"): return self._host.host_power_action(host, action) else: msg = _("Host startup on XenServer is not supported.") raise NotImplementedError(msg) def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._host.set_host_enabled(host, enabled) def get_host_uptime(self, host): """Returns the result of calling "uptime" on the target host.""" return self._host.get_host_uptime(host) def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation.""" return self._host.host_maintenance_mode(host, mode) def add_to_aggregate(self, context, aggregate, host, **kwargs): """Add a compute host to an aggregate.""" return self._pool.add_to_aggregate(context, aggregate, host, **kwargs) def remove_from_aggregate(self, context, aggregate, host, **kwargs): """Remove a compute host from an aggregate.""" return self._pool.remove_from_aggregate(context, aggregate, host, **kwargs) def undo_aggregate_operation(self, context, op, aggregate, host, set_error=True): """Undo aggregate operation when pool error raised""" return self._pool.undo_aggregate_operation(context, op, aggregate, host, set_error) def legacy_nwinfo(self): """ Indicate if the driver requires the legacy network_info format. """ # TODO(tr3buchet): remove this function once all virts return false return False def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted""" self._vmops.power_on(instance) def get_per_instance_usage(self): """Get information about instance resource usage. :returns: dict of nova uuid => dict of usage info """ return self._vmops.get_per_instance_usage() class XenAPISession(object): """The session to invoke XenAPI SDK calls""" def __init__(self, url, user, pw, virtapi): import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() self.is_slave = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) self._populate_session_pool(url, user, pw, exception) self.host_uuid = self._get_host_uuid() self.product_version, self.product_brand = \ self._get_product_version_and_brand() self._virtapi = virtapi def _create_first_session(self, url, user, pw, exception): try: session = self._create_session(url) with timeout.Timeout(CONF.xenapi_login_timeout, exception): session.login_with_password(user, pw) except self.XenAPI.Failure, e: # if user and pw of the master are different, we're doomed! if e.details[0] == 'HOST_IS_SLAVE': master = e.details[1] url = pool.swap_xapi_host(url, master) session = self.XenAPI.Session(url) session.login_with_password(user, pw) self.is_slave = True else: raise self._sessions.put(session) return url def _populate_session_pool(self, url, user, pw, exception): for i in xrange(CONF.xenapi_connection_concurrent - 1): session = self._create_session(url) with timeout.Timeout(CONF.xenapi_login_timeout, exception): session.login_with_password(user, pw) self._sessions.put(session) def _get_host_uuid(self): if self.is_slave: aggr = self._virtapi.aggregate_get_by_host( context.get_admin_context(), CONF.host, key=pool_states.POOL_FLAG)[0] if not aggr: LOG.error(_('Host is member of a pool, but DB ' 'says otherwise')) raise exception.AggregateHostNotFound() return aggr.metadetails[CONF.host] else: with self._get_session() as session: host_ref = session.xenapi.session.get_this_host(session.handle) return session.xenapi.host.get_uuid(host_ref) def _get_product_version_and_brand(self): """Return a tuple of (major, minor, rev) for the host version and a string of the product brand""" software_version = self._get_software_version() product_version_str = software_version.get('product_version') product_brand = software_version.get('product_brand') if None in (product_version_str, product_brand): return (None, None) product_version = tuple(int(part) for part in product_version_str.split('.')) return product_version, product_brand def _get_software_version(self): host = self.get_xenapi_host() return self.call_xenapi('host.get_software_version', host) def get_session_id(self): """Return a string session_id. Used for vnc consoles.""" with self._get_session() as session: return str(session._session) @contextlib.contextmanager def _get_session(self): """Return exclusive session for scope of with statement""" session = self._sessions.get() try: yield session finally: self._sessions.put(session) def get_xenapi_host(self): """Return the xenapi host on which nova-compute runs on.""" with self._get_session() as session: return session.xenapi.host.get_by_uuid(self.host_uuid) def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread.""" with self._get_session() as session: return session.xenapi_request(method, args) def call_plugin(self, plugin, fn, args): """Call host.call_plugin on a background thread.""" # NOTE(johannes): Fetch host before we acquire a session. Since # get_xenapi_host() acquires a session too, it can result in a # deadlock if multiple greenthreads race with each other. See # bug 924918 host = self.get_xenapi_host() # NOTE(armando): pass the host uuid along with the args so that # the plugin gets executed on the right host when using XS pools args['host_uuid'] = self.host_uuid with self._get_session() as session: return self._unwrap_plugin_exceptions( session.xenapi.host.call_plugin, host, plugin, fn, args) def call_plugin_serialized(self, plugin, fn, *args, **kwargs): params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))} rv = self.call_plugin(plugin, fn, params) return pickle.loads(rv) def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" return self.XenAPI.Session(url) def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details""" try: return func(*args, **kwargs) except self.XenAPI.Failure, exc: LOG.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): params = None try: # FIXME(comstud): eval is evil. params = eval(exc.details[3]) except Exception: raise exc raise self.XenAPI.Failure(params) else: raise except xmlrpclib.ProtocolError, exc: LOG.debug(_("Got exception: %s"), exc) raise def get_rec(self, record_type, ref): try: return self.call_xenapi('%s.get_record' % record_type, ref) except self.XenAPI.Failure, e: if e.details[0] != 'HANDLE_INVALID': raise return None def get_all_refs_and_recs(self, record_type): """Retrieve all refs and recs for a Xen record type. Handles race-conditions where the record may be deleted between the `get_all` call and the `get_record` call. """ for ref in self.call_xenapi('%s.get_all' % record_type): rec = self.get_rec(record_type, ref) # Check to make sure the record still exists. It may have # been deleted between the get_all call and get_record call if rec: yield ref, rec
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for gclient.py. See gclient_smoketest.py for integration tests. """ import Queue import copy import logging import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import gclient import gclient_utils from testing_support import trial_dir def write(filename, content): """Writes the content of a file and create the directories as needed.""" filename = os.path.abspath(filename) dirname = os.path.dirname(filename) if not os.path.isdir(dirname): os.makedirs(dirname) with open(filename, 'w') as f: f.write(content) class SCMMock(object): def __init__(self, unit_test, url): self.unit_test = unit_test self.url = url def RunCommand(self, command, options, args, file_list): self.unit_test.assertEquals('None', command) self.unit_test.processed.put(self.url) def FullUrlForRelativeUrl(self, url): return self.url + url # pylint: disable=R0201 def DoesRemoteURLMatch(self, _): return True def GetActualRemoteURL(self, _): return self.url class GclientTest(trial_dir.TestCase): def setUp(self): super(GclientTest, self).setUp() self.processed = Queue.Queue() self.previous_dir = os.getcwd() os.chdir(self.root_dir) # Manual mocks. self._old_createscm = gclient.gclient_scm.CreateSCM gclient.gclient_scm.CreateSCM = self._createscm self._old_sys_stdout = sys.stdout sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout) sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout) def tearDown(self): self.assertEquals([], self._get_processed()) gclient.gclient_scm.CreateSCM = self._old_createscm sys.stdout = self._old_sys_stdout os.chdir(self.previous_dir) super(GclientTest, self).tearDown() def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None): self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url) self.assertTrue(root_dir.startswith(self.root_dir), root_dir) return SCMMock(self, parsed_url) def testDependencies(self): self._dependencies('1') def testDependenciesJobs(self): self._dependencies('1000') def _dependencies(self, jobs): """Verifies that dependencies are processed in the right order. e.g. if there is a dependency 'src' and another 'src/third_party/bar', that bar isn't fetched until 'src' is done. Also test that a From() dependency should not be processed when it is listed as a requirement. Args: |jobs| is the number of parallel jobs simulated. """ parser = gclient.OptionParser() options, args = parser.parse_args(['--jobs', jobs]) write( '.gclient', 'solutions = [\n' ' { "name": "foo", "url": "svn://example.com/foo" },\n' ' { "name": "bar", "url": "svn://example.com/bar" },\n' ' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n' ']') write( os.path.join('foo', 'DEPS'), 'deps = {\n' ' "foo/dir1": "/dir1",\n' # This one will depend on dir1/dir2 in bar. ' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n' ' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n' ' "foo/dir1/dir2/dir5/dir6":\n' ' From("foo/dir1/dir2/dir3/dir4", "foo/dir1/dir2"),\n' '}') write( os.path.join('bar', 'DEPS'), 'deps = {\n' # There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2. ' "foo/dir1/dir2": "/dir1/dir2",\n' '}') write( os.path.join('bar/empty', 'DEPS'), 'deps = {\n' '}') # Test From() write( os.path.join('foo/dir1/dir2/dir3/dir4', 'DEPS'), 'deps = {\n' # This one should not be fetched or set as a requirement. ' "foo/dir1/dir2/dir5": "svn://example.com/x",\n' # This foo/dir1/dir2 points to a different url than the one in bar. ' "foo/dir1/dir2": "/dir1/another",\n' '}') obj = gclient.GClient.LoadCurrentConfig(options) self._check_requirements(obj.dependencies[0], {}) self._check_requirements(obj.dependencies[1], {}) obj.RunOnDeps('None', args) actual = self._get_processed() first_3 = [ 'svn://example.com/bar', 'svn://example.com/bar_empty', 'svn://example.com/foo', ] if jobs != 1: # We don't care of the ordering of these items except that bar must be # before bar/empty. self.assertTrue( actual.index('svn://example.com/bar') < actual.index('svn://example.com/bar_empty')) self.assertEquals(first_3, sorted(actual[0:3])) else: self.assertEquals(first_3, actual[0:3]) self.assertEquals( [ 'svn://example.com/foo/dir1', 'svn://example.com/bar/dir1/dir2', 'svn://example.com/foo/dir1/dir2/dir3', 'svn://example.com/foo/dir1/dir2/dir3/dir4', 'svn://example.com/foo/dir1/dir2/dir3/dir4/dir1/another', ], actual[3:]) self.assertEquals(3, len(obj.dependencies)) self.assertEquals('foo', obj.dependencies[0].name) self.assertEquals('bar', obj.dependencies[1].name) self.assertEquals('bar/empty', obj.dependencies[2].name) self._check_requirements( obj.dependencies[0], { 'foo/dir1': ['bar', 'bar/empty', 'foo'], 'foo/dir1/dir2/dir3': ['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'], 'foo/dir1/dir2/dir3/dir4': [ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2', 'foo/dir1/dir2/dir3'], 'foo/dir1/dir2/dir5/dir6': [ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2', 'foo/dir1/dir2/dir3/dir4'], }) self._check_requirements( obj.dependencies[1], { 'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'], }) self._check_requirements( obj.dependencies[2], {}) self._check_requirements( obj, { 'foo': [], 'bar': [], 'bar/empty': ['bar'], }) def _check_requirements(self, solution, expected): for dependency in solution.dependencies: e = expected.pop(dependency.name) a = sorted(dependency.requirements) self.assertEquals(e, a, (dependency.name, e, a)) self.assertEquals({}, expected) def _get_processed(self): """Retrieves the item in the order they were processed.""" items = [] try: while True: items.append(self.processed.get_nowait()) except Queue.Empty: pass return items def testAutofix(self): # Invalid urls causes pain when specifying requirements. Make sure it's # auto-fixed. d = gclient.Dependency( None, 'name', 'proto://host/path/@revision', None, None, None, None, None, '', True) self.assertEquals('proto://host/path@revision', d.url) def testStr(self): parser = gclient.OptionParser() options, _ = parser.parse_args([]) obj = gclient.GClient('foo', options) obj.add_dependencies_and_close( [ gclient.Dependency( obj, 'foo', 'url', None, None, None, None, None, 'DEPS', True), gclient.Dependency( obj, 'bar', 'url', None, None, None, None, None, 'DEPS', True), ], []) obj.dependencies[0].add_dependencies_and_close( [ gclient.Dependency( obj.dependencies[0], 'foo/dir1', 'url', None, None, None, None, None, 'DEPS', True), gclient.Dependency( obj.dependencies[0], 'foo/dir2', gclient.GClientKeywords.FromImpl('bar'), None, None, None, None, None, 'DEPS', True), gclient.Dependency( obj.dependencies[0], 'foo/dir3', gclient.GClientKeywords.FileImpl('url'), None, None, None, None, None, 'DEPS', True), ], []) # Make sure __str__() works fine. # pylint: disable=W0212 obj.dependencies[0]._file_list.append('foo') str_obj = str(obj) self.assertEquals(471, len(str_obj), '%d\n%s' % (len(str_obj), str_obj)) def testHooks(self): topdir = self.root_dir gclient_fn = os.path.join(topdir, '.gclient') fh = open(gclient_fn, 'w') print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]' fh.close() subdir_fn = os.path.join(topdir, 'top') os.mkdir(subdir_fn) deps_fn = os.path.join(subdir_fn, 'DEPS') fh = open(deps_fn, 'w') hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}] print >> fh, 'hooks = %s' % repr(hooks) fh.close() fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w') print >> fh, 'bogus content' fh.close() os.chdir(topdir) parser = gclient.OptionParser() options, _ = parser.parse_args([]) options.force = True client = gclient.GClient.LoadCurrentConfig(options) work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False) for s in client.dependencies: work_queue.enqueue(s) work_queue.flush({}, None, [], options=options) self.assertEqual(client.GetHooks(options), [x['action'] for x in hooks]) def testCustomHooks(self): topdir = self.root_dir gclient_fn = os.path.join(topdir, '.gclient') fh = open(gclient_fn, 'w') extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}] print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",' '"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}]) print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]' fh.close() subdir_fn = os.path.join(topdir, 'top') os.mkdir(subdir_fn) deps_fn = os.path.join(subdir_fn, 'DEPS') fh = open(deps_fn, 'w') hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}] hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']}) skip_hooks = [ {'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}] skip_hooks.append( {'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']}) print >> fh, 'hooks = %s' % repr(hooks + skip_hooks) fh.close() # Make sure the custom hooks for that project don't affect the next one. subdir_fn = os.path.join(topdir, 'bottom') os.mkdir(subdir_fn) deps_fn = os.path.join(subdir_fn, 'DEPS') fh = open(deps_fn, 'w') sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}] sub_hooks.append( {'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']}) print >> fh, 'hooks = %s' % repr(sub_hooks) fh.close() fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w') print >> fh, 'bogus content' fh.close() os.chdir(topdir) parser = gclient.OptionParser() options, _ = parser.parse_args([]) options.force = True client = gclient.GClient.LoadCurrentConfig(options) work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False) for s in client.dependencies: work_queue.enqueue(s) work_queue.flush({}, None, [], options=options) self.assertEqual(client.GetHooks(options), [x['action'] for x in hooks + extra_hooks + sub_hooks]) def testTargetOS(self): """Verifies that specifying a target_os pulls in all relevant dependencies. The target_os variable allows specifying the name of an additional OS which should be considered when selecting dependencies from a DEPS' deps_os. The value will be appended to the _enforced_os tuple. """ write( '.gclient', 'solutions = [\n' ' { "name": "foo",\n' ' "url": "svn://example.com/foo",\n' ' }]\n' 'target_os = ["baz"]') write( os.path.join('foo', 'DEPS'), 'deps = {\n' ' "foo/dir1": "/dir1",' '}\n' 'deps_os = {\n' ' "unix": { "foo/dir2": "/dir2", },\n' ' "baz": { "foo/dir3": "/dir3", },\n' '}') parser = gclient.OptionParser() options, _ = parser.parse_args(['--jobs', '1']) options.deps_os = "unix" obj = gclient.GClient.LoadCurrentConfig(options) self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os)) def testTargetOsWithTargetOsOnly(self): """Verifies that specifying a target_os and target_os_only pulls in only the relevant dependencies. The target_os variable allows specifying the name of an additional OS which should be considered when selecting dependencies from a DEPS' deps_os. With target_os_only also set, the _enforced_os tuple will be set to only the target_os value. """ write( '.gclient', 'solutions = [\n' ' { "name": "foo",\n' ' "url": "svn://example.com/foo",\n' ' }]\n' 'target_os = ["baz"]\n' 'target_os_only = True') write( os.path.join('foo', 'DEPS'), 'deps = {\n' ' "foo/dir1": "/dir1",' '}\n' 'deps_os = {\n' ' "unix": { "foo/dir2": "/dir2", },\n' ' "baz": { "foo/dir3": "/dir3", },\n' '}') parser = gclient.OptionParser() options, _ = parser.parse_args(['--jobs', '1']) options.deps_os = "unix" obj = gclient.GClient.LoadCurrentConfig(options) self.assertEqual(['baz'], sorted(obj.enforced_os)) def testTargetOsOnlyWithoutTargetOs(self): """Verifies that specifying a target_os_only without target_os_only raises an exception. """ write( '.gclient', 'solutions = [\n' ' { "name": "foo",\n' ' "url": "svn://example.com/foo",\n' ' }]\n' 'target_os_only = True') write( os.path.join('foo', 'DEPS'), 'deps = {\n' ' "foo/dir1": "/dir1",' '}\n' 'deps_os = {\n' ' "unix": { "foo/dir2": "/dir2", },\n' '}') parser = gclient.OptionParser() options, _ = parser.parse_args(['--jobs', '1']) options.deps_os = "unix" exception_raised = False try: gclient.GClient.LoadCurrentConfig(options) except gclient_utils.Error: exception_raised = True self.assertTrue(exception_raised) def testTargetOsInDepsFile(self): """Verifies that specifying a target_os value in a DEPS file pulls in all relevant dependencies. The target_os variable in a DEPS file allows specifying the name of an additional OS which should be considered when selecting dependencies from a DEPS' deps_os. The value will be appended to the _enforced_os tuple. """ write( '.gclient', 'solutions = [\n' ' { "name": "foo",\n' ' "url": "svn://example.com/foo",\n' ' },\n' ' { "name": "bar",\n' ' "url": "svn://example.com/bar",\n' ' }]\n') write( os.path.join('foo', 'DEPS'), 'target_os = ["baz"]\n' 'deps_os = {\n' ' "unix": { "foo/unix": "/unix", },\n' ' "baz": { "foo/baz": "/baz", },\n' ' "jaz": { "foo/jaz": "/jaz", },\n' '}') write( os.path.join('bar', 'DEPS'), 'deps_os = {\n' ' "unix": { "bar/unix": "/unix", },\n' ' "baz": { "bar/baz": "/baz", },\n' ' "jaz": { "bar/jaz": "/jaz", },\n' '}') parser = gclient.OptionParser() options, _ = parser.parse_args(['--jobs', '1']) options.deps_os = 'unix' obj = gclient.GClient.LoadCurrentConfig(options) obj.RunOnDeps('None', []) self.assertEqual(['unix'], sorted(obj.enforced_os)) self.assertEquals( [ 'svn://example.com/bar', 'svn://example.com/bar/unix', 'svn://example.com/foo', 'svn://example.com/foo/baz', 'svn://example.com/foo/unix', ], sorted(self._get_processed())) def testUpdateWithOsDeps(self): """Verifies that complicated deps_os constructs result in the correct data also with multple operating systems. Also see testDepsOsOverrideDepsInDepsFile.""" test_data = [ # Tuples of deps, deps_os, os_list and expected_deps. ( # OS doesn't need module. {'foo': 'default_foo'}, {'os1': { 'foo': None } }, ['os1'], {'foo': None} ), ( # OS wants a different version of module. {'foo': 'default_foo'}, {'os1': { 'foo': 'os1_foo'} }, ['os1'], {'foo': 'os1_foo'} ), ( # OS with no overrides at all. {'foo': 'default_foo'}, {'os1': { 'foo': None } }, ['os2'], {'foo': 'default_foo'} ), ( # One OS doesn't need module, one OS wants the default. {'foo': 'default_foo'}, {'os1': { 'foo': None }, 'os2': {}}, ['os1', 'os2'], {'foo': 'default_foo'} ), ( # One OS doesn't need module, another OS wants a special version. {'foo': 'default_foo'}, {'os1': { 'foo': None }, 'os2': { 'foo': 'os2_foo'}}, ['os1', 'os2'], {'foo': 'os2_foo'} ), ( # One OS wants to add a module. {'foo': 'default_foo'}, {'os1': { 'bar': 'os1_bar' }}, ['os1'], {'foo': 'default_foo', 'bar': 'os1_bar'} ), ( # One OS wants to add a module. One doesn't care. {'foo': 'default_foo'}, {'os1': { 'bar': 'os1_bar' }}, ['os1', 'os2'], {'foo': 'default_foo', 'bar': 'os1_bar'} ), ( # Two OSes want to add a module with the same definition. {'foo': 'default_foo'}, {'os1': { 'bar': 'os12_bar' }, 'os2': { 'bar': 'os12_bar' }}, ['os1', 'os2'], {'foo': 'default_foo', 'bar': 'os12_bar'} ), ] for deps, deps_os, target_os_list, expected_deps in test_data: orig_deps = copy.deepcopy(deps) result = gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list) self.assertEqual(result, expected_deps) self.assertEqual(deps, orig_deps) def testLateOverride(self): """Verifies expected behavior of LateOverride.""" url = "git@github.com:dart-lang/spark.git" d = gclient.Dependency(None, 'name', 'url', None, None, None, None, None, '', True) late_url = d.LateOverride(url) self.assertEquals(url, late_url) def testDepsOsOverrideDepsInDepsFile(self): """Verifies that a 'deps_os' path can override a 'deps' path. Also see testUpdateWithOsDeps above. """ write( '.gclient', 'solutions = [\n' ' { "name": "foo",\n' ' "url": "svn://example.com/foo",\n' ' },]\n') write( os.path.join('foo', 'DEPS'), 'target_os = ["baz"]\n' 'deps = {\n' ' "foo/src": "/src",\n' # This path is to be overridden by similar path # in deps_os['unix']. '}\n' 'deps_os = {\n' ' "unix": { "foo/unix": "/unix",' ' "foo/src": "/src_unix"},\n' ' "baz": { "foo/baz": "/baz",\n' ' "foo/src": None},\n' ' "jaz": { "foo/jaz": "/jaz", },\n' '}') parser = gclient.OptionParser() options, _ = parser.parse_args(['--jobs', '1']) options.deps_os = 'unix' obj = gclient.GClient.LoadCurrentConfig(options) obj.RunOnDeps('None', []) self.assertEqual(['unix'], sorted(obj.enforced_os)) self.assertEquals( [ 'svn://example.com/foo', 'svn://example.com/foo/baz', 'svn://example.com/foo/src_unix', 'svn://example.com/foo/unix', ], sorted(self._get_processed())) def testRecursionOverride(self): """Verifies gclient respects the recursion var syntax. We check several things here: - recursion = 3 sets recursion on the foo dep to exactly 3 (we pull /fizz, but not /fuzz) - pulling foo/bar at recursion level 1 (in .gclient) is overriden by a later pull of foo/bar at recursion level 2 (in the dep tree) """ write( '.gclient', 'solutions = [\n' ' { "name": "foo", "url": "svn://example.com/foo" },\n' ' { "name": "foo/bar", "url": "svn://example.com/bar" },\n' ']') write( os.path.join('foo', 'DEPS'), 'deps = {\n' ' "bar": "/bar",\n' '}\n' 'recursion = 3') write( os.path.join('bar', 'DEPS'), 'deps = {\n' ' "baz": "/baz",\n' '}') write( os.path.join('baz', 'DEPS'), 'deps = {\n' ' "fizz": "/fizz",\n' '}') write( os.path.join('fizz', 'DEPS'), 'deps = {\n' ' "fuzz": "/fuzz",\n' '}') options, _ = gclient.OptionParser().parse_args([]) obj = gclient.GClient.LoadCurrentConfig(options) obj.RunOnDeps('None', []) self.assertEquals( [ 'svn://example.com/foo', 'svn://example.com/bar', 'svn://example.com/foo/bar', 'svn://example.com/foo/bar/baz', 'svn://example.com/foo/bar/baz/fizz', ], self._get_processed()) if __name__ == '__main__': sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout) sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True) sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr) sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True) logging.basicConfig( level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][ min(sys.argv.count('-v'), 3)], format='%(relativeCreated)4d %(levelname)5s %(module)13s(' '%(lineno)d) %(message)s') unittest.main()
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- # # BitBake Tests for the Data Store (data.py/data_smart.py) # # Copyright (C) 2010 Chris Larson # Copyright (C) 2012 Richard Purdie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # import unittest import bb import bb.data import bb.parse class DataExpansions(unittest.TestCase): def setUp(self): self.d = bb.data.init() self.d["foo"] = "value_of_foo" self.d["bar"] = "value_of_bar" self.d["value_of_foo"] = "value_of_'value_of_foo'" def test_one_var(self): val = self.d.expand("${foo}") self.assertEqual(str(val), "value_of_foo") def test_indirect_one_var(self): val = self.d.expand("${${foo}}") self.assertEqual(str(val), "value_of_'value_of_foo'") def test_indirect_and_another(self): val = self.d.expand("${${foo}} ${bar}") self.assertEqual(str(val), "value_of_'value_of_foo' value_of_bar") def test_python_snippet(self): val = self.d.expand("${@5*12}") self.assertEqual(str(val), "60") def test_expand_in_python_snippet(self): val = self.d.expand("${@'boo ' + '${foo}'}") self.assertEqual(str(val), "boo value_of_foo") def test_python_snippet_getvar(self): val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}") self.assertEqual(str(val), "value_of_foo value_of_bar") def test_python_snippet_syntax_error(self): self.d.setVar("FOO", "${@foo = 5}") self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) def test_python_snippet_runtime_error(self): self.d.setVar("FOO", "${@int('test')}") self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) def test_python_snippet_error_path(self): self.d.setVar("FOO", "foo value ${BAR}") self.d.setVar("BAR", "bar value ${@int('test')}") self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) def test_value_containing_value(self): val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}") self.assertEqual(str(val), "value_of_foo value_of_bar") def test_reference_undefined_var(self): val = self.d.expand("${undefinedvar} meh") self.assertEqual(str(val), "${undefinedvar} meh") def test_double_reference(self): self.d.setVar("BAR", "bar value") self.d.setVar("FOO", "${BAR} foo ${BAR}") val = self.d.getVar("FOO", True) self.assertEqual(str(val), "bar value foo bar value") def test_direct_recursion(self): self.d.setVar("FOO", "${FOO}") self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) def test_indirect_recursion(self): self.d.setVar("FOO", "${BAR}") self.d.setVar("BAR", "${BAZ}") self.d.setVar("BAZ", "${FOO}") self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) def test_recursion_exception(self): self.d.setVar("FOO", "${BAR}") self.d.setVar("BAR", "${${@'FOO'}}") self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) def test_incomplete_varexp_single_quotes(self): self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc") val = self.d.getVar("FOO", True) self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc") def test_nonstring(self): self.d.setVar("TEST", 5) val = self.d.getVar("TEST", True) self.assertEqual(str(val), "5") def test_rename(self): self.d.renameVar("foo", "newfoo") self.assertEqual(self.d.getVar("newfoo"), "value_of_foo") self.assertEqual(self.d.getVar("foo"), None) def test_deletion(self): self.d.delVar("foo") self.assertEqual(self.d.getVar("foo"), None) def test_keys(self): keys = self.d.keys() self.assertEqual(keys, ['value_of_foo', 'foo', 'bar']) class TestNestedExpansions(unittest.TestCase): def setUp(self): self.d = bb.data.init() self.d["foo"] = "foo" self.d["bar"] = "bar" self.d["value_of_foobar"] = "187" def test_refs(self): val = self.d.expand("${value_of_${foo}${bar}}") self.assertEqual(str(val), "187") #def test_python_refs(self): # val = self.d.expand("${@${@3}**2 + ${@4}**2}") # self.assertEqual(str(val), "25") def test_ref_in_python_ref(self): val = self.d.expand("${@'${foo}' + 'bar'}") self.assertEqual(str(val), "foobar") def test_python_ref_in_ref(self): val = self.d.expand("${${@'f'+'o'+'o'}}") self.assertEqual(str(val), "foo") def test_deep_nesting(self): depth = 100 val = self.d.expand("${" * depth + "foo" + "}" * depth) self.assertEqual(str(val), "foo") #def test_deep_python_nesting(self): # depth = 50 # val = self.d.expand("${@" * depth + "1" + "+1}" * depth) # self.assertEqual(str(val), str(depth + 1)) def test_mixed(self): val = self.d.expand("${value_of_${@('${foo}'+'bar')[0:3]}${${@'BAR'.lower()}}}") self.assertEqual(str(val), "187") def test_runtime(self): val = self.d.expand("${${@'value_of' + '_f'+'o'+'o'+'b'+'a'+'r'}}") self.assertEqual(str(val), "187") class TestMemoize(unittest.TestCase): def test_memoized(self): d = bb.data.init() d.setVar("FOO", "bar") self.assertTrue(d.getVar("FOO") is d.getVar("FOO")) def test_not_memoized(self): d1 = bb.data.init() d2 = bb.data.init() d1.setVar("FOO", "bar") d2.setVar("FOO", "bar2") self.assertTrue(d1.getVar("FOO") is not d2.getVar("FOO")) def test_changed_after_memoized(self): d = bb.data.init() d.setVar("foo", "value of foo") self.assertEqual(str(d.getVar("foo")), "value of foo") d.setVar("foo", "second value of foo") self.assertEqual(str(d.getVar("foo")), "second value of foo") def test_same_value(self): d = bb.data.init() d.setVar("foo", "value of") d.setVar("bar", "value of") self.assertEqual(d.getVar("foo"), d.getVar("bar")) class TestConcat(unittest.TestCase): def setUp(self): self.d = bb.data.init() self.d.setVar("FOO", "foo") self.d.setVar("VAL", "val") self.d.setVar("BAR", "bar") def test_prepend(self): self.d.setVar("TEST", "${VAL}") self.d.prependVar("TEST", "${FOO}:") self.assertEqual(self.d.getVar("TEST", True), "foo:val") def test_append(self): self.d.setVar("TEST", "${VAL}") self.d.appendVar("TEST", ":${BAR}") self.assertEqual(self.d.getVar("TEST", True), "val:bar") def test_multiple_append(self): self.d.setVar("TEST", "${VAL}") self.d.prependVar("TEST", "${FOO}:") self.d.appendVar("TEST", ":val2") self.d.appendVar("TEST", ":${BAR}") self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar") class TestConcatOverride(unittest.TestCase): def setUp(self): self.d = bb.data.init() self.d.setVar("FOO", "foo") self.d.setVar("VAL", "val") self.d.setVar("BAR", "bar") def test_prepend(self): self.d.setVar("TEST", "${VAL}") self.d.setVar("TEST_prepend", "${FOO}:") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "foo:val") def test_append(self): self.d.setVar("TEST", "${VAL}") self.d.setVar("TEST_append", ":${BAR}") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "val:bar") def test_multiple_append(self): self.d.setVar("TEST", "${VAL}") self.d.setVar("TEST_prepend", "${FOO}:") self.d.setVar("TEST_append", ":val2") self.d.setVar("TEST_append", ":${BAR}") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar") def test_remove(self): self.d.setVar("TEST", "${VAL} ${BAR}") self.d.setVar("TEST_remove", "val") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "bar") def test_doubleref_remove(self): self.d.setVar("TEST", "${VAL} ${BAR}") self.d.setVar("TEST_remove", "val") self.d.setVar("TEST_TEST", "${TEST} ${TEST}") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST_TEST", True), "bar bar") def test_empty_remove(self): self.d.setVar("TEST", "") self.d.setVar("TEST_remove", "val") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "") def test_remove_expansion(self): self.d.setVar("BAR", "Z") self.d.setVar("TEST", "${BAR}/X Y") self.d.setVar("TEST_remove", "${BAR}/X") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "Y") def test_remove_expansion_items(self): self.d.setVar("TEST", "A B C D") self.d.setVar("BAR", "B D") self.d.setVar("TEST_remove", "${BAR}") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "A C") class TestOverrides(unittest.TestCase): def setUp(self): self.d = bb.data.init() self.d.setVar("OVERRIDES", "foo:bar:local") self.d.setVar("TEST", "testvalue") def test_no_override(self): bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "testvalue") def test_one_override(self): self.d.setVar("TEST_bar", "testvalue2") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "testvalue2") def test_multiple_override(self): self.d.setVar("TEST_bar", "testvalue2") self.d.setVar("TEST_local", "testvalue3") self.d.setVar("TEST_foo", "testvalue4") bb.data.update_data(self.d) self.assertEqual(self.d.getVar("TEST", True), "testvalue3") class TestFlags(unittest.TestCase): def setUp(self): self.d = bb.data.init() self.d.setVar("foo", "value of foo") self.d.setVarFlag("foo", "flag1", "value of flag1") self.d.setVarFlag("foo", "flag2", "value of flag2") def test_setflag(self): self.assertEqual(self.d.getVarFlag("foo", "flag1"), "value of flag1") self.assertEqual(self.d.getVarFlag("foo", "flag2"), "value of flag2") def test_delflag(self): self.d.delVarFlag("foo", "flag2") self.assertEqual(self.d.getVarFlag("foo", "flag1"), "value of flag1") self.assertEqual(self.d.getVarFlag("foo", "flag2"), None) class Contains(unittest.TestCase): def setUp(self): self.d = bb.data.init() self.d.setVar("SOMEFLAG", "a b c") def test_contains(self): self.assertTrue(bb.utils.contains("SOMEFLAG", "a", True, False, self.d)) self.assertTrue(bb.utils.contains("SOMEFLAG", "b", True, False, self.d)) self.assertTrue(bb.utils.contains("SOMEFLAG", "c", True, False, self.d)) self.assertTrue(bb.utils.contains("SOMEFLAG", "a b", True, False, self.d)) self.assertTrue(bb.utils.contains("SOMEFLAG", "b c", True, False, self.d)) self.assertTrue(bb.utils.contains("SOMEFLAG", "c a", True, False, self.d)) self.assertTrue(bb.utils.contains("SOMEFLAG", "a b c", True, False, self.d)) self.assertTrue(bb.utils.contains("SOMEFLAG", "c b a", True, False, self.d)) self.assertFalse(bb.utils.contains("SOMEFLAG", "x", True, False, self.d)) self.assertFalse(bb.utils.contains("SOMEFLAG", "a x", True, False, self.d)) self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b", True, False, self.d)) self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b a", True, False, self.d)) def test_contains_any(self): self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a", True, False, self.d)) self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b", True, False, self.d)) self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c", True, False, self.d)) self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a b", True, False, self.d)) self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b c", True, False, self.d)) self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c a", True, False, self.d)) self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a x", True, False, self.d)) self.assertTrue(bb.utils.contains_any("SOMEFLAG", "x c", True, False, self.d)) self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x", True, False, self.d)) self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d))
#!/usr/bin/env python3 # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Unit tests for grit.format.rc''' from __future__ import print_function import os import re import sys if __name__ == '__main__': sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) import tempfile import unittest from six import StringIO from grit import grd_reader from grit import util from grit.node import structure from grit.tool import build _PREAMBLE = '''\ #include "resource.h" #include <winresrc.h> #ifdef IDC_STATIC #undef IDC_STATIC #endif #define IDC_STATIC (-1) ''' class DummyOutput(object): def __init__(self, type, language, file = 'hello.gif'): self.type = type self.language = language self.file = file def GetType(self): return self.type def GetLanguage(self): return self.language def GetOutputFilename(self): return self.file class FormatRcUnittest(unittest.TestCase): def testMessages(self): root = util.ParseGrdForUnittest(""" <messages> <message name="IDS_BTN_GO" desc="Button text" meaning="verb">Go!</message> <message name="IDS_GREETING" desc="Printed to greet the currently logged in user"> Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today? </message> <message name="BONGO" desc="Flippo nippo"> Howdie "Mr. Elephant", how are you doing? ''' </message> <message name="IDS_WITH_LINEBREAKS"> Good day sir, I am a bee Sting sting </message> </messages> """) buf = StringIO() build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) output = util.StripBlankLinesAndComments(buf.getvalue()) self.assertEqual(_PREAMBLE + u'''\ STRINGTABLE BEGIN IDS_BTN_GO "Go!" IDS_GREETING "Hello %s, how are you doing today?" BONGO "Howdie ""Mr. Elephant"", how are you doing? " IDS_WITH_LINEBREAKS "Good day sir,\\nI am a bee\\nSting sting" END''', output) def testRcSection(self): root = util.ParseGrdForUnittest(r''' <structures> <structure type="menu" name="IDC_KLONKMENU" file="grit\testdata\klonk.rc" encoding="utf-16" /> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\testdata\klonk.rc" encoding="utf-16" /> <structure type="version" name="VS_VERSION_INFO" file="grit\testdata\klonk.rc" encoding="utf-16" /> </structures>''') root.SetOutputLanguage('en') root.RunGatherers() buf = StringIO() build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) output = util.StripBlankLinesAndComments(buf.getvalue()) expected = _PREAMBLE + u'''\ IDC_KLONKMENU MENU BEGIN POPUP "&File" BEGIN MENUITEM "E&xit", IDM_EXIT MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE POPUP "gonk" BEGIN MENUITEM "Klonk && is [good]", ID_GONK_KLONKIS END END POPUP "&Help" BEGIN MENUITEM "&About ...", IDM_ABOUT END END IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75 STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU CAPTION "About" FONT 8, "System", 0, 0, 0x0 BEGIN ICON IDI_KLONK,IDC_MYICON,14,9,20,20 LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8, SS_NOPREFIX LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8 DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button", BS_AUTORADIOBUTTON,46,51,84,10 END VS_VERSION_INFO VERSIONINFO FILEVERSION 1,0,0,1 PRODUCTVERSION 1,0,0,1 FILEFLAGSMASK 0x17L #ifdef _DEBUG FILEFLAGS 0x1L #else FILEFLAGS 0x0L #endif FILEOS 0x4L FILETYPE 0x1L FILESUBTYPE 0x0L BEGIN BLOCK "StringFileInfo" BEGIN BLOCK "040904b0" BEGIN VALUE "FileDescription", "klonk Application" VALUE "FileVersion", "1, 0, 0, 1" VALUE "InternalName", "klonk" VALUE "LegalCopyright", "Copyright (C) 2005" VALUE "OriginalFilename", "klonk.exe" VALUE "ProductName", " klonk Application" VALUE "ProductVersion", "1, 0, 0, 1" END END BLOCK "VarFileInfo" BEGIN VALUE "Translation", 0x409, 1200 END END'''.strip() for expected_line, output_line in zip(expected.split(), output.split()): self.assertEqual(expected_line, output_line) def testRcIncludeStructure(self): root = util.ParseGrdForUnittest(''' <structures> <structure type="tr_html" name="IDR_HTML" file="bingo.html"/> <structure type="tr_html" name="IDR_HTML2" file="bingo2.html"/> </structures>''', base_dir = '/temp') # We do not run gatherers as it is not needed and wouldn't find the file buf = StringIO() build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) output = util.StripBlankLinesAndComments(buf.getvalue()) expected = (_PREAMBLE + u'IDR_HTML HTML "%s"\n' u'IDR_HTML2 HTML "%s"' % (util.normpath('/temp/bingo.html').replace('\\', '\\\\'), util.normpath('/temp/bingo2.html').replace('\\', '\\\\'))) # hackety hack to work on win32&lin output = re.sub(r'"[c-zC-Z]:', '"', output) self.assertEqual(expected, output) def testRcIncludeFile(self): root = util.ParseGrdForUnittest(''' <includes> <include type="TXT" name="TEXT_ONE" file="bingo.txt"/> <include type="TXT" name="TEXT_TWO" file="bingo2.txt" filenameonly="true" /> </includes>''', base_dir = '/temp') buf = StringIO() build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) output = util.StripBlankLinesAndComments(buf.getvalue()) expected = (_PREAMBLE + u'TEXT_ONE TXT "%s"\n' u'TEXT_TWO TXT "%s"' % (util.normpath('/temp/bingo.txt').replace('\\', '\\\\'), 'bingo2.txt')) # hackety hack to work on win32&lin output = re.sub(r'"[c-zC-Z]:', '"', output) self.assertEqual(expected, output) def testRcIncludeFlattenedHtmlFile(self): input_file = util.PathFromRoot('grit/testdata/include_test.html') output_file = '%s/HTML_FILE1_include_test.html' % tempfile.gettempdir() root = util.ParseGrdForUnittest(''' <includes> <include name="HTML_FILE1" flattenhtml="true" file="%s" type="BINDATA" /> </includes>''' % input_file) buf = StringIO() build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en', output_file), buf) output = util.StripBlankLinesAndComments(buf.getvalue()) expected = (_PREAMBLE + u'HTML_FILE1 BINDATA "HTML_FILE1_include_test.html"') # hackety hack to work on win32&lin output = re.sub(r'"[c-zC-Z]:', '"', output) self.assertEqual(expected, output) file_contents = util.ReadFile(output_file, 'utf-8') # Check for the content added by the <include> tag. self.failUnless(file_contents.find('Hello Include!') != -1) # Check for the content that was removed by if tag. self.failUnless(file_contents.find('should be removed') == -1) # Check for the content that was kept in place by if. self.failUnless(file_contents.find('should be kept') != -1) self.failUnless(file_contents.find('in the middle...') != -1) self.failUnless(file_contents.find('at the end...') != -1) # Check for nested content that was kept self.failUnless(file_contents.find('nested true should be kept') != -1) self.failUnless(file_contents.find('silbing true should be kept') != -1) # Check for removed "<if>" and "</if>" tags. self.failUnless(file_contents.find('<if expr=') == -1) self.failUnless(file_contents.find('</if>') == -1) os.remove(output_file) def testStructureNodeOutputfile(self): input_file = util.PathFromRoot('grit/testdata/simple.html') root = util.ParseGrdForUnittest(''' <structures> <structure type="tr_html" name="IDR_HTML" file="%s" /> </structures>''' % input_file) struct, = root.GetChildrenOfType(structure.StructureNode) # We must run the gatherer since we'll be wanting the translation of the # file. The file exists in the location pointed to. root.SetOutputLanguage('en') root.RunGatherers() output_dir = tempfile.gettempdir() en_file = struct.FileForLanguage('en', output_dir) self.failUnless(en_file == input_file) fr_file = struct.FileForLanguage('fr', output_dir) self.failUnless(fr_file == os.path.join(output_dir, 'fr_simple.html')) contents = util.ReadFile(fr_file, 'utf-8') self.failUnless(contents.find('<p>') != -1) # should contain the markup self.failUnless(contents.find('Hello!') == -1) # should be translated os.remove(fr_file) def testChromeHtmlNodeOutputfile(self): input_file = util.PathFromRoot('grit/testdata/chrome_html.html') output_file = '%s/HTML_FILE1_chrome_html.html' % tempfile.gettempdir() root = util.ParseGrdForUnittest(''' <structures> <structure type="chrome_html" name="HTML_FILE1" file="%s" flattenhtml="true" /> </structures>''' % input_file) struct, = root.GetChildrenOfType(structure.StructureNode) struct.gatherer.SetDefines({'scale_factors': '2x'}) # We must run the gatherers since we'll be wanting the chrome_html output. # The file exists in the location pointed to. root.SetOutputLanguage('en') root.RunGatherers() buf = StringIO() build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en', output_file), buf) output = util.StripBlankLinesAndComments(buf.getvalue()) expected = (_PREAMBLE + u'HTML_FILE1 BINDATA "HTML_FILE1_chrome_html.html"') # hackety hack to work on win32&lin output = re.sub(r'"[c-zC-Z]:', '"', output) self.assertEqual(expected, output) file_contents = util.ReadFile(output_file, 'utf-8') # Check for the content added by the <include> tag. self.failUnless(file_contents.find('Hello Include!') != -1) # Check for inserted -webkit-image-set. self.failUnless(file_contents.find('content: -webkit-image-set') != -1) os.remove(output_file) def testSubstitutionHtml(self): input_file = util.PathFromRoot('grit/testdata/toolbar_about.html') root = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="1" allow_pseudo="False"> <structures fallback_to_english="True"> <structure type="tr_html" name="IDR_HTML" file="%s" expand_variables="true"/> </structures> </release> </grit> ''' % input_file), util.PathFromRoot('.')) root.SetOutputLanguage('ar') # We must run the gatherers since we'll be wanting the translation of the # file. The file exists in the location pointed to. root.RunGatherers() output_dir = tempfile.gettempdir() struct, = root.GetChildrenOfType(structure.StructureNode) ar_file = struct.FileForLanguage('ar', output_dir) self.failUnless(ar_file == os.path.join(output_dir, 'ar_toolbar_about.html')) contents = util.ReadFile(ar_file, 'utf-8') self.failUnless(contents.find('dir="RTL"') != -1) os.remove(ar_file) def testFallbackToEnglish(self): root = util.ParseGrdForUnittest(r''' <structures fallback_to_english="True"> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\testdata\klonk.rc" encoding="utf-16" /> </structures>''', base_dir=util.PathFromRoot('.')) root.SetOutputLanguage('en') root.RunGatherers() buf = StringIO() formatter = build.RcBuilder.ProcessNode( root, DummyOutput('rc_all', 'bingobongo'), buf) output = util.StripBlankLinesAndComments(buf.getvalue()) self.assertEqual(_PREAMBLE + '''\ IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75 STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU CAPTION "About" FONT 8, "System", 0, 0, 0x0 BEGIN ICON IDI_KLONK,IDC_MYICON,14,9,20,20 LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8, SS_NOPREFIX LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8 DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button", BS_AUTORADIOBUTTON,46,51,84,10 END''', output) def testSubstitutionRc(self): root = grd_reader.Parse(StringIO(r'''<?xml version="1.0" encoding="UTF-8"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <outputs> <output lang="en" type="rc_all" filename="grit\testdata\klonk_resources.rc"/> </outputs> <release seq="1" allow_pseudo="False"> <structures> <structure type="menu" name="IDC_KLONKMENU" file="grit\testdata\klonk.rc" encoding="utf-16" expand_variables="true" /> </structures> <messages> <message name="good" sub_variable="true"> excellent </message> </messages> </release> </grit> '''), util.PathFromRoot('.')) root.SetOutputLanguage('en') root.RunGatherers() buf = StringIO() build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) output = buf.getvalue() self.assertEqual(''' // This file is automatically generated by GRIT. Do not edit. #include "resource.h" #include <winresrc.h> #ifdef IDC_STATIC #undef IDC_STATIC #endif #define IDC_STATIC (-1) LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL IDC_KLONKMENU MENU BEGIN POPUP "&File" BEGIN MENUITEM "E&xit", IDM_EXIT MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE POPUP "gonk" BEGIN MENUITEM "Klonk && is excellent", ID_GONK_KLONKIS END END POPUP "&Help" BEGIN MENUITEM "&About ...", IDM_ABOUT END END STRINGTABLE BEGIN good "excellent" END '''.strip(), output.strip()) if __name__ == '__main__': unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates java source files from a mojom.Module.""" import argparse import ast import contextlib import os import re import shutil import sys import tempfile from jinja2 import contextfilter import mojom.fileutil as fileutil import mojom.generate.generator as generator import mojom.generate.module as mojom from mojom.generate.template_expander import UseJinja # Item 0 of sys.path is the directory of the main file; item 1 is PYTHONPATH # (if set); item 2 is system libraries. sys.path.insert( 1, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, 'build', 'android', 'gyp')) from util import build_utils # TODO(crbug.com/1174969): Remove this once Python2 is obsoleted. if sys.version_info.major != 2: basestring = str long = int GENERATOR_PREFIX = 'java' _spec_to_java_type = { mojom.BOOL.spec: 'boolean', mojom.DCPIPE.spec: 'org.chromium.mojo.system.DataPipe.ConsumerHandle', mojom.DOUBLE.spec: 'double', mojom.DPPIPE.spec: 'org.chromium.mojo.system.DataPipe.ProducerHandle', mojom.FLOAT.spec: 'float', mojom.HANDLE.spec: 'org.chromium.mojo.system.UntypedHandle', mojom.INT16.spec: 'short', mojom.INT32.spec: 'int', mojom.INT64.spec: 'long', mojom.INT8.spec: 'byte', mojom.MSGPIPE.spec: 'org.chromium.mojo.system.MessagePipeHandle', mojom.PLATFORMHANDLE.spec: 'org.chromium.mojo.system.UntypedHandle', mojom.NULLABLE_DCPIPE.spec: 'org.chromium.mojo.system.DataPipe.ConsumerHandle', mojom.NULLABLE_DPPIPE.spec: 'org.chromium.mojo.system.DataPipe.ProducerHandle', mojom.NULLABLE_HANDLE.spec: 'org.chromium.mojo.system.UntypedHandle', mojom.NULLABLE_MSGPIPE.spec: 'org.chromium.mojo.system.MessagePipeHandle', mojom.NULLABLE_PLATFORMHANDLE.spec: 'org.chromium.mojo.system.UntypedHandle', mojom.NULLABLE_SHAREDBUFFER.spec: 'org.chromium.mojo.system.SharedBufferHandle', mojom.NULLABLE_STRING.spec: 'String', mojom.SHAREDBUFFER.spec: 'org.chromium.mojo.system.SharedBufferHandle', mojom.STRING.spec: 'String', mojom.UINT16.spec: 'short', mojom.UINT32.spec: 'int', mojom.UINT64.spec: 'long', mojom.UINT8.spec: 'byte', } _spec_to_decode_method = { mojom.BOOL.spec: 'readBoolean', mojom.DCPIPE.spec: 'readConsumerHandle', mojom.DOUBLE.spec: 'readDouble', mojom.DPPIPE.spec: 'readProducerHandle', mojom.FLOAT.spec: 'readFloat', mojom.HANDLE.spec: 'readUntypedHandle', mojom.INT16.spec: 'readShort', mojom.INT32.spec: 'readInt', mojom.INT64.spec: 'readLong', mojom.INT8.spec: 'readByte', mojom.MSGPIPE.spec: 'readMessagePipeHandle', mojom.PLATFORMHANDLE.spec: 'readUntypedHandle', mojom.NULLABLE_DCPIPE.spec: 'readConsumerHandle', mojom.NULLABLE_DPPIPE.spec: 'readProducerHandle', mojom.NULLABLE_HANDLE.spec: 'readUntypedHandle', mojom.NULLABLE_MSGPIPE.spec: 'readMessagePipeHandle', mojom.NULLABLE_PLATFORMHANDLE.spec: 'readUntypedHandle', mojom.NULLABLE_SHAREDBUFFER.spec: 'readSharedBufferHandle', mojom.NULLABLE_STRING.spec: 'readString', mojom.SHAREDBUFFER.spec: 'readSharedBufferHandle', mojom.STRING.spec: 'readString', mojom.UINT16.spec: 'readShort', mojom.UINT32.spec: 'readInt', mojom.UINT64.spec: 'readLong', mojom.UINT8.spec: 'readByte', } _java_primitive_to_boxed_type = { 'boolean': 'Boolean', 'byte': 'Byte', 'double': 'Double', 'float': 'Float', 'int': 'Integer', 'long': 'Long', 'short': 'Short', } _java_reserved_types = [ # These two may clash with commonly used classes on Android. 'Manifest', 'R' ] def UpperCamelCase(name): return ''.join([x.capitalize() for x in generator.SplitCamelCase(name)]) def CamelCase(name): uccc = UpperCamelCase(name) return uccc[0].lower() + uccc[1:] def ConstantStyle(name): return generator.ToUpperSnakeCase(name) def GetNameForElement(element): if (mojom.IsEnumKind(element) or mojom.IsInterfaceKind(element) or mojom.IsStructKind(element) or mojom.IsUnionKind(element)): name = UpperCamelCase(element.name) if name in _java_reserved_types: return name + '_' return name if (mojom.IsInterfaceRequestKind(element) or mojom.IsAssociatedKind(element) or mojom.IsPendingRemoteKind(element) or mojom.IsPendingReceiverKind(element)): return GetNameForElement(element.kind) if isinstance(element, (mojom.Method, mojom.Parameter, mojom.Field)): return CamelCase(element.name) if isinstance(element, mojom.EnumValue): return (GetNameForElement(element.enum) + '.' + ConstantStyle(element.name)) if isinstance(element, (mojom.NamedValue, mojom.Constant, mojom.EnumField)): return ConstantStyle(element.name) raise Exception('Unexpected element: %s' % element) def GetInterfaceResponseName(method): return UpperCamelCase(method.name) + '_Response' # TODO(crbug.com/1093146): remove after migrating downstream references. def GetInterfaceResponseNameOld(method): return UpperCamelCase(method.name + '_Response') def ParseStringAttribute(attribute): assert isinstance(attribute, basestring) return attribute def GetJavaTrueFalse(value): return 'true' if value else 'false' def GetArrayNullabilityFlags(kind): """Returns nullability flags for an array type, see Decoder.java. As we have dedicated decoding functions for arrays, we have to pass nullability information about both the array itself, as well as the array element type there. """ assert mojom.IsArrayKind(kind) ARRAY_NULLABLE = \ 'org.chromium.mojo.bindings.BindingsHelper.ARRAY_NULLABLE' ELEMENT_NULLABLE = \ 'org.chromium.mojo.bindings.BindingsHelper.ELEMENT_NULLABLE' NOTHING_NULLABLE = \ 'org.chromium.mojo.bindings.BindingsHelper.NOTHING_NULLABLE' flags_to_set = [] if mojom.IsNullableKind(kind): flags_to_set.append(ARRAY_NULLABLE) if mojom.IsNullableKind(kind.kind): flags_to_set.append(ELEMENT_NULLABLE) if not flags_to_set: flags_to_set = [NOTHING_NULLABLE] return ' | '.join(flags_to_set) def AppendEncodeDecodeParams(initial_params, context, kind, bit): """ Appends standard parameters shared between encode and decode calls. """ params = list(initial_params) if (kind == mojom.BOOL): params.append(str(bit)) if mojom.IsReferenceKind(kind): if mojom.IsArrayKind(kind): params.append(GetArrayNullabilityFlags(kind)) else: params.append(GetJavaTrueFalse(mojom.IsNullableKind(kind))) if mojom.IsArrayKind(kind): params.append(GetArrayExpectedLength(kind)) if mojom.IsInterfaceKind(kind): params.append('%s.MANAGER' % GetJavaType(context, kind)) if mojom.IsPendingRemoteKind(kind): params.append('%s.MANAGER' % GetJavaType(context, kind.kind)) if mojom.IsArrayKind(kind) and mojom.IsInterfaceKind(kind.kind): params.append('%s.MANAGER' % GetJavaType(context, kind.kind)) if mojom.IsArrayKind(kind) and mojom.IsPendingRemoteKind(kind.kind): params.append('%s.MANAGER' % GetJavaType(context, kind.kind.kind)) return params @contextfilter def DecodeMethod(context, kind, offset, bit): def _DecodeMethodName(kind): if mojom.IsArrayKind(kind): return _DecodeMethodName(kind.kind) + 's' if mojom.IsEnumKind(kind): return _DecodeMethodName(mojom.INT32) if mojom.IsInterfaceRequestKind(kind) or mojom.IsPendingReceiverKind(kind): return 'readInterfaceRequest' if mojom.IsInterfaceKind(kind) or mojom.IsPendingRemoteKind(kind): return 'readServiceInterface' if (mojom.IsAssociatedInterfaceRequestKind(kind) or mojom.IsPendingAssociatedReceiverKind(kind)): return 'readAssociatedInterfaceRequestNotSupported' if (mojom.IsAssociatedInterfaceKind(kind) or mojom.IsPendingAssociatedRemoteKind(kind)): return 'readAssociatedServiceInterfaceNotSupported' return _spec_to_decode_method[kind.spec] methodName = _DecodeMethodName(kind) params = AppendEncodeDecodeParams([ str(offset) ], context, kind, bit) return '%s(%s)' % (methodName, ', '.join(params)) @contextfilter def EncodeMethod(context, kind, variable, offset, bit): params = AppendEncodeDecodeParams( [ variable, str(offset) ], context, kind, bit) return 'encode(%s)' % ', '.join(params) def GetPackage(module): if module.attributes and 'JavaPackage' in module.attributes: return ParseStringAttribute(module.attributes['JavaPackage']) # Default package. if module.namespace: return 'org.chromium.' + module.namespace return 'org.chromium' def GetNameForKind(context, kind): def _GetNameHierachy(kind): hierachy = [] if kind.parent_kind: hierachy = _GetNameHierachy(kind.parent_kind) hierachy.append(GetNameForElement(kind)) return hierachy module = context.resolve('module') elements = [] if GetPackage(module) != GetPackage(kind.module): elements += [GetPackage(kind.module)] elements += _GetNameHierachy(kind) return '.'.join(elements) @contextfilter def GetJavaClassForEnum(context, kind): return GetNameForKind(context, kind) def GetBoxedJavaType(context, kind, with_generics=True): unboxed_type = GetJavaType(context, kind, False, with_generics) if unboxed_type in _java_primitive_to_boxed_type: return _java_primitive_to_boxed_type[unboxed_type] return unboxed_type @contextfilter def GetJavaType(context, kind, boxed=False, with_generics=True): if boxed: return GetBoxedJavaType(context, kind) if (mojom.IsStructKind(kind) or mojom.IsInterfaceKind(kind) or mojom.IsUnionKind(kind)): return GetNameForKind(context, kind) if mojom.IsPendingRemoteKind(kind): return GetNameForKind(context, kind.kind) if mojom.IsInterfaceRequestKind(kind) or mojom.IsPendingReceiverKind(kind): return ('org.chromium.mojo.bindings.InterfaceRequest<%s>' % GetNameForKind(context, kind.kind)) if (mojom.IsAssociatedInterfaceKind(kind) or mojom.IsPendingAssociatedRemoteKind(kind)): return 'org.chromium.mojo.bindings.AssociatedInterfaceNotSupported' if (mojom.IsAssociatedInterfaceRequestKind(kind) or mojom.IsPendingAssociatedReceiverKind(kind)): return 'org.chromium.mojo.bindings.AssociatedInterfaceRequestNotSupported' if mojom.IsMapKind(kind): if with_generics: return 'java.util.Map<%s, %s>' % ( GetBoxedJavaType(context, kind.key_kind), GetBoxedJavaType(context, kind.value_kind)) else: return 'java.util.Map' if mojom.IsArrayKind(kind): return '%s[]' % GetJavaType(context, kind.kind, boxed, with_generics) if mojom.IsEnumKind(kind): return 'int' return _spec_to_java_type[kind.spec] @contextfilter def DefaultValue(context, field): assert field.default if isinstance(field.kind, mojom.Struct): assert field.default == 'default' return 'new %s()' % GetJavaType(context, field.kind) return '(%s) %s' % ( GetJavaType(context, field.kind), ExpressionToText(context, field.default, kind_spec=field.kind.spec)) @contextfilter def ConstantValue(context, constant): return '(%s) %s' % ( GetJavaType(context, constant.kind), ExpressionToText(context, constant.value, kind_spec=constant.kind.spec)) @contextfilter def NewArray(context, kind, size): if mojom.IsArrayKind(kind.kind): return NewArray(context, kind.kind, size) + '[]' return 'new %s[%s]' % ( GetJavaType(context, kind.kind, boxed=False, with_generics=False), size) @contextfilter def ExpressionToText(context, token, kind_spec=''): def _TranslateNamedValue(named_value): entity_name = GetNameForElement(named_value) if named_value.parent_kind: return GetJavaType(context, named_value.parent_kind) + '.' + entity_name # Handle the case where named_value is a module level constant: if not isinstance(named_value, mojom.EnumValue): entity_name = (GetConstantsMainEntityName(named_value.module) + '.' + entity_name) if GetPackage(named_value.module) == GetPackage(context.resolve('module')): return entity_name return GetPackage(named_value.module) + '.' + entity_name if isinstance(token, mojom.NamedValue): return _TranslateNamedValue(token) if kind_spec.startswith('i') or kind_spec.startswith('u'): number = ast.literal_eval(token.lstrip('+ ')) if not isinstance(number, (int, long)): raise ValueError('got unexpected type %r for int literal %r' % ( type(number), token)) # If the literal is too large to fit a signed long, convert it to the # equivalent signed long. if number >= 2 ** 63: number -= 2 ** 64 if number < 2 ** 31 and number >= -2 ** 31: return '%d' % number return '%dL' % number if isinstance(token, mojom.BuiltinValue): if token.value == 'double.INFINITY': return 'java.lang.Double.POSITIVE_INFINITY' if token.value == 'double.NEGATIVE_INFINITY': return 'java.lang.Double.NEGATIVE_INFINITY' if token.value == 'double.NAN': return 'java.lang.Double.NaN' if token.value == 'float.INFINITY': return 'java.lang.Float.POSITIVE_INFINITY' if token.value == 'float.NEGATIVE_INFINITY': return 'java.lang.Float.NEGATIVE_INFINITY' if token.value == 'float.NAN': return 'java.lang.Float.NaN' return token def GetArrayKind(kind, size = None): if size is None: return mojom.Array(kind) else: array = mojom.Array(kind, 0) array.java_map_size = size return array def GetArrayExpectedLength(kind): if mojom.IsArrayKind(kind) and kind.length is not None: return getattr(kind, 'java_map_size', str(kind.length)) else: return 'org.chromium.mojo.bindings.BindingsHelper.UNSPECIFIED_ARRAY_LENGTH' def IsPointerArrayKind(kind): if not mojom.IsArrayKind(kind): return False sub_kind = kind.kind return mojom.IsObjectKind(sub_kind) and not mojom.IsUnionKind(sub_kind) def IsUnionArrayKind(kind): if not mojom.IsArrayKind(kind): return False sub_kind = kind.kind return mojom.IsUnionKind(sub_kind) def GetConstantsMainEntityName(module): if module.attributes and 'JavaConstantsClassName' in module.attributes: return ParseStringAttribute(module.attributes['JavaConstantsClassName']) # This constructs the name of the embedding classes for module level constants # by extracting the mojom's filename and prepending it to Constants. return (UpperCamelCase(module.path.split('/')[-1].rsplit('.', 1)[0]) + 'Constants') def GetMethodOrdinalName(method): return ConstantStyle(method.name) + '_ORDINAL' def HasMethodWithResponse(interface): for method in interface.methods: if method.response_parameters is not None: return True return False def HasMethodWithoutResponse(interface): for method in interface.methods: if method.response_parameters is None: return True return False @contextlib.contextmanager def TempDir(): dirname = tempfile.mkdtemp() try: yield dirname finally: shutil.rmtree(dirname) def EnumCoversContinuousRange(kind): if not kind.fields: return False number_of_unique_keys = len(set(map( lambda field: field.numeric_value, kind.fields))) if kind.max_value - kind.min_value + 1 != number_of_unique_keys: return False return True class Generator(generator.Generator): def _GetJinjaExports(self): return { 'package': GetPackage(self.module), } @staticmethod def GetTemplatePrefix(): return "java_templates" def GetFilters(self): java_filters = { 'array_expected_length': GetArrayExpectedLength, 'array': GetArrayKind, 'constant_value': ConstantValue, 'covers_continuous_range': EnumCoversContinuousRange, 'decode_method': DecodeMethod, 'default_value': DefaultValue, 'encode_method': EncodeMethod, 'expression_to_text': ExpressionToText, 'has_method_without_response': HasMethodWithoutResponse, 'has_method_with_response': HasMethodWithResponse, 'interface_response_name': GetInterfaceResponseName, 'interface_response_name_old': GetInterfaceResponseNameOld, 'is_array_kind': mojom.IsArrayKind, 'is_any_handle_kind': mojom.IsAnyHandleKind, "is_enum_kind": mojom.IsEnumKind, 'is_interface_request_kind': mojom.IsInterfaceRequestKind, 'is_map_kind': mojom.IsMapKind, 'is_nullable_kind': mojom.IsNullableKind, 'is_pointer_array_kind': IsPointerArrayKind, 'is_reference_kind': mojom.IsReferenceKind, 'is_struct_kind': mojom.IsStructKind, 'is_union_array_kind': IsUnionArrayKind, 'is_union_kind': mojom.IsUnionKind, 'java_class_for_enum': GetJavaClassForEnum, 'java_true_false': GetJavaTrueFalse, 'java_type': GetJavaType, 'method_ordinal_name': GetMethodOrdinalName, 'name': GetNameForElement, 'new_array': NewArray, 'ucc': lambda x: UpperCamelCase(x.name), } return java_filters def _GetJinjaExportsForInterface(self, interface): exports = self._GetJinjaExports() exports.update({'interface': interface}) return exports @UseJinja('enum.java.tmpl') def _GenerateEnumSource(self, enum): exports = self._GetJinjaExports() exports.update({'enum': enum}) return exports @UseJinja('struct.java.tmpl') def _GenerateStructSource(self, struct): exports = self._GetJinjaExports() exports.update({'struct': struct}) return exports @UseJinja('union.java.tmpl') def _GenerateUnionSource(self, union): exports = self._GetJinjaExports() exports.update({'union': union}) return exports @UseJinja('interface.java.tmpl') def _GenerateInterfaceSource(self, interface): return self._GetJinjaExportsForInterface(interface) @UseJinja('interface_internal.java.tmpl') def _GenerateInterfaceInternalSource(self, interface): return self._GetJinjaExportsForInterface(interface) @UseJinja('constants.java.tmpl') def _GenerateConstantsSource(self, module): exports = self._GetJinjaExports() exports.update({'main_entity': GetConstantsMainEntityName(module), 'constants': module.constants}) return exports def _DoGenerateFiles(self): fileutil.EnsureDirectoryExists(self.output_dir) for struct in self.module.structs: self.WriteWithComment(self._GenerateStructSource(struct), '%s.java' % GetNameForElement(struct)) for union in self.module.unions: self.WriteWithComment(self._GenerateUnionSource(union), '%s.java' % GetNameForElement(union)) for enum in self.module.enums: self.WriteWithComment(self._GenerateEnumSource(enum), '%s.java' % GetNameForElement(enum)) for interface in self.module.interfaces: self.WriteWithComment(self._GenerateInterfaceSource(interface), '%s.java' % GetNameForElement(interface)) self.WriteWithComment(self._GenerateInterfaceInternalSource(interface), '%s_Internal.java' % GetNameForElement(interface)) if self.module.constants: self.WriteWithComment(self._GenerateConstantsSource(self.module), '%s.java' % GetConstantsMainEntityName(self.module)) def GenerateFiles(self, unparsed_args): # TODO(rockot): Support variant output for Java. if self.variant: raise Exception("Variants not supported in Java bindings.") self.module.Stylize(generator.Stylizer()) parser = argparse.ArgumentParser() parser.add_argument('--java_output_directory', dest='java_output_directory') args = parser.parse_args(unparsed_args) package_path = GetPackage(self.module).replace('.', '/') # Generate the java files in a temporary directory and place a single # srcjar in the output directory. basename = "%s.srcjar" % self.module.path zip_filename = os.path.join(self.output_dir, basename) with TempDir() as temp_java_root: self.output_dir = os.path.join(temp_java_root, package_path) self._DoGenerateFiles(); build_utils.ZipDir(zip_filename, temp_java_root) if args.java_output_directory: # If requested, generate the java files directly into indicated directory. self.output_dir = os.path.join(args.java_output_directory, package_path) self._DoGenerateFiles(); def GetJinjaParameters(self): return { 'lstrip_blocks': True, 'trim_blocks': True, } def GetGlobals(self): return { 'namespace': self.module.namespace, 'module': self.module, }
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- # # BitBake 'Build' implementation # # Core code for function execution and task handling in the # BitBake build tools. # # Copyright (C) 2003, 2004 Chris Larson # # Based on Gentoo's portage.py. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #Based on functions from the base bb module, Copyright 2003 Holger Schurig import os import sys import logging import shlex import glob import bb import bb.msg import bb.process from contextlib import nested from bb import data, event, utils bblogger = logging.getLogger('BitBake') logger = logging.getLogger('BitBake.Build') NULL = open(os.devnull, 'r+') # When we execute a python function we'd like certain things # in all namespaces, hence we add them to __builtins__ # If we do not do this and use the exec globals, they will # not be available to subfunctions. __builtins__['bb'] = bb __builtins__['os'] = os class FuncFailed(Exception): def __init__(self, name = None, logfile = None): self.logfile = logfile self.name = name if name: self.msg = 'Function failed: %s' % name else: self.msg = "Function failed" def __str__(self): if self.logfile and os.path.exists(self.logfile): msg = ("%s (see %s for further information)" % (self.msg, self.logfile)) else: msg = self.msg return msg class TaskBase(event.Event): """Base class for task events""" def __init__(self, t, d ): self._task = t self._package = d.getVar("PF", True) event.Event.__init__(self) self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName()) def getTask(self): return self._task def setTask(self, task): self._task = task def getDisplayName(self): return bb.event.getName(self)[4:] task = property(getTask, setTask, None, "task property") class TaskStarted(TaskBase): """Task execution started""" class TaskSucceeded(TaskBase): """Task execution completed""" class TaskFailed(TaskBase): """Task execution failed""" def __init__(self, task, logfile, metadata, errprinted = False): self.logfile = logfile self.errprinted = errprinted super(TaskFailed, self).__init__(task, metadata) class TaskFailedSilent(TaskBase): """Task execution failed (silently)""" def __init__(self, task, logfile, metadata): self.logfile = logfile super(TaskFailedSilent, self).__init__(task, metadata) def getDisplayName(self): # Don't need to tell the user it was silent return "Failed" class TaskInvalid(TaskBase): def __init__(self, task, metadata): super(TaskInvalid, self).__init__(task, metadata) self._message = "No such task '%s'" % task class LogTee(object): def __init__(self, logger, outfile): self.outfile = outfile self.logger = logger self.name = self.outfile.name def write(self, string): self.logger.plain(string) self.outfile.write(string) def __enter__(self): self.outfile.__enter__() return self def __exit__(self, *excinfo): self.outfile.__exit__(*excinfo) def __repr__(self): return '<LogTee {0}>'.format(self.name) def flush(self): self.outfile.flush() def exec_func(func, d, dirs = None): """Execute an BB 'function'""" body = data.getVar(func, d) if not body: if body is None: logger.warn("Function %s doesn't exist", func) return flags = data.getVarFlags(func, d) cleandirs = flags.get('cleandirs') if cleandirs: for cdir in data.expand(cleandirs, d).split(): bb.utils.remove(cdir, True) if dirs is None: dirs = flags.get('dirs') if dirs: dirs = data.expand(dirs, d).split() if dirs: for adir in dirs: bb.utils.mkdirhier(adir) adir = dirs[-1] else: adir = data.getVar('B', d, 1) bb.utils.mkdirhier(adir) ispython = flags.get('python') lockflag = flags.get('lockfiles') if lockflag: lockfiles = [data.expand(f, d) for f in lockflag.split()] else: lockfiles = None tempdir = data.getVar('T', d, 1) # or func allows items to be executed outside of the normal # task set, such as buildhistory task = data.getVar('BB_RUNTASK', d, 1) or func if task == func: taskfunc = task else: taskfunc = "%s.%s" % (task, func) runfmt = data.getVar('BB_RUNFMT', d, 1) or "run.{func}.{pid}" runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid()) runfile = os.path.join(tempdir, runfn) bb.utils.mkdirhier(os.path.dirname(runfile)) with bb.utils.fileslocked(lockfiles): if ispython: exec_func_python(func, d, runfile, cwd=adir) else: exec_func_shell(func, d, runfile, cwd=adir) _functionfmt = """ def {function}(d): {body} {function}(d) """ logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s") def exec_func_python(func, d, runfile, cwd=None): """Execute a python BB 'function'""" bbfile = d.getVar('FILE', True) code = _functionfmt.format(function=func, body=d.getVar(func, True)) bb.utils.mkdirhier(os.path.dirname(runfile)) with open(runfile, 'w') as script: script.write(code) if cwd: try: olddir = os.getcwd() except OSError: olddir = None os.chdir(cwd) bb.debug(2, "Executing python function %s" % func) try: comp = utils.better_compile(code, func, bbfile) utils.better_exec(comp, {"d": d}, code, bbfile) except: if sys.exc_info()[0] in (bb.parse.SkipPackage, bb.build.FuncFailed): raise raise FuncFailed(func, None) finally: bb.debug(2, "Python function %s finished" % func) if cwd and olddir: try: os.chdir(olddir) except OSError: pass def exec_func_shell(func, d, runfile, cwd=None): """Execute a shell function from the metadata Note on directory behavior. The 'dirs' varflag should contain a list of the directories you need created prior to execution. The last item in the list is where we will chdir/cd to. """ # Don't let the emitted shell script override PWD d.delVarFlag('PWD', 'export') with open(runfile, 'w') as script: script.write('#!/bin/sh -e\n') data.emit_func(func, script, d) if bb.msg.loggerVerboseLogs: script.write("set -x\n") if cwd: script.write("cd %s\n" % cwd) script.write("%s\n" % func) os.chmod(runfile, 0775) cmd = runfile if d.getVarFlag(func, 'fakeroot'): fakerootcmd = d.getVar('FAKEROOT', True) if fakerootcmd: cmd = [fakerootcmd, runfile] if bb.msg.loggerDefaultVerbose: logfile = LogTee(logger, sys.stdout) else: logfile = sys.stdout bb.debug(2, "Executing shell function %s" % func) try: bb.process.run(cmd, shell=False, stdin=NULL, log=logfile) except bb.process.CmdError: logfn = d.getVar('BB_LOGFILE', True) raise FuncFailed(func, logfn) bb.debug(2, "Shell function %s finished" % func) def _task_data(fn, task, d): localdata = data.createCopy(d) localdata.setVar('BB_FILENAME', fn) localdata.setVar('BB_CURRENTTASK', task[3:]) localdata.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], d.getVar('OVERRIDES', False))) localdata.finalize() data.expandKeys(localdata) return localdata def _exec_task(fn, task, d, quieterr): """Execute a BB 'task' Execution of a task involves a bit more setup than executing a function, running it with its own local metadata, and with some useful variables set. """ if not data.getVarFlag(task, 'task', d): event.fire(TaskInvalid(task, d), d) logger.error("No such task: %s" % task) return 1 logger.debug(1, "Executing task %s", task) localdata = _task_data(fn, task, d) tempdir = localdata.getVar('T', True) if not tempdir: bb.fatal("T variable not set, unable to build") bb.utils.mkdirhier(tempdir) # Determine the logfile to generate logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}' logbase = logfmt.format(task=task, pid=os.getpid()) # Document the order of the tasks... logorder = os.path.join(tempdir, 'log.task_order') try: logorderfile = file(logorder, 'a') except OSError: logger.exception("Opening log file '%s'", logorder) pass logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase)) logorderfile.close() # Setup the courtesy link to the logfn loglink = os.path.join(tempdir, 'log.{0}'.format(task)) logfn = os.path.join(tempdir, logbase) if loglink: bb.utils.remove(loglink) try: os.symlink(logbase, loglink) except OSError: pass prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True) postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True) class ErrorCheckHandler(logging.Handler): def __init__(self): self.triggered = False logging.Handler.__init__(self, logging.ERROR) def emit(self, record): self.triggered = True # Handle logfiles si = file('/dev/null', 'r') try: bb.utils.mkdirhier(os.path.dirname(logfn)) logfile = file(logfn, 'w') except OSError: logger.exception("Opening log file '%s'", logfn) pass # Dup the existing fds so we dont lose them osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()] oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()] ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()] # Replace those fds with our own os.dup2(si.fileno(), osi[1]) os.dup2(logfile.fileno(), oso[1]) os.dup2(logfile.fileno(), ose[1]) # Ensure python logging goes to the logfile handler = logging.StreamHandler(logfile) handler.setFormatter(logformatter) # Always enable full debug output into task logfiles handler.setLevel(logging.DEBUG - 2) bblogger.addHandler(handler) errchk = ErrorCheckHandler() bblogger.addHandler(errchk) localdata.setVar('BB_LOGFILE', logfn) localdata.setVar('BB_RUNTASK', task) event.fire(TaskStarted(task, localdata), localdata) try: for func in (prefuncs or '').split(): exec_func(func, localdata) exec_func(task, localdata) for func in (postfuncs or '').split(): exec_func(func, localdata) except FuncFailed as exc: if quieterr: event.fire(TaskFailedSilent(task, logfn, localdata), localdata) else: errprinted = errchk.triggered logger.error(str(exc)) event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata) return 1 finally: sys.stdout.flush() sys.stderr.flush() bblogger.removeHandler(handler) # Restore the backup fds os.dup2(osi[0], osi[1]) os.dup2(oso[0], oso[1]) os.dup2(ose[0], ose[1]) # Close the backup fds os.close(osi[0]) os.close(oso[0]) os.close(ose[0]) si.close() logfile.close() if os.path.exists(logfn) and os.path.getsize(logfn) == 0: logger.debug(2, "Zero size logfn %s, removing", logfn) bb.utils.remove(logfn) bb.utils.remove(loglink) event.fire(TaskSucceeded(task, localdata), localdata) if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'): make_stamp(task, localdata) return 0 def exec_task(fn, task, d, profile = False): try: quieterr = False if d.getVarFlag(task, "quieterrors") is not None: quieterr = True if profile: profname = "profile-%s.log" % (os.path.basename(fn) + "-" + task) try: import cProfile as profile except: import profile prof = profile.Profile() ret = profile.Profile.runcall(prof, _exec_task, fn, task, d, quieterr) prof.dump_stats(profname) bb.utils.process_profilelog(profname) return ret else: return _exec_task(fn, task, d, quieterr) except Exception: from traceback import format_exc if not quieterr: logger.error("Build of %s failed" % (task)) logger.error(format_exc()) failedevent = TaskFailed(task, None, d, True) event.fire(failedevent, d) return 1 def stamp_internal(taskname, d, file_name): """ Internal stamp helper function Makes sure the stamp directory exists Returns the stamp path+filename In the bitbake core, d can be a CacheData and file_name will be set. When called in task context, d will be a data store, file_name will not be set """ taskflagname = taskname if taskname.endswith("_setscene") and taskname != "do_setscene": taskflagname = taskname.replace("_setscene", "") if file_name: stamp = d.stamp_base[file_name].get(taskflagname) or d.stamp[file_name] extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or "" else: stamp = d.getVarFlag(taskflagname, 'stamp-base', True) or d.getVar('STAMP', True) file_name = d.getVar('BB_FILENAME', True) extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or "" if not stamp: return stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo) stampdir = os.path.dirname(stamp) if bb.parse.cached_mtime_noerror(stampdir) == 0: bb.utils.mkdirhier(stampdir) return stamp def stamp_cleanmask_internal(taskname, d, file_name): """ Internal stamp helper function to generate stamp cleaning mask Returns the stamp path+filename In the bitbake core, d can be a CacheData and file_name will be set. When called in task context, d will be a data store, file_name will not be set """ taskflagname = taskname if taskname.endswith("_setscene") and taskname != "do_setscene": taskflagname = taskname.replace("_setscene", "") if file_name: stamp = d.stamp_base_clean[file_name].get(taskflagname) or d.stampclean[file_name] extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or "" else: stamp = d.getVarFlag(taskflagname, 'stamp-base-clean', True) or d.getVar('STAMPCLEAN', True) file_name = d.getVar('BB_FILENAME', True) extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or "" if not stamp: return [] cleanmask = bb.parse.siggen.stampcleanmask(stamp, file_name, taskname, extrainfo) return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")] def make_stamp(task, d, file_name = None): """ Creates/updates a stamp for a given task (d can be a data dict or dataCache) """ cleanmask = stamp_cleanmask_internal(task, d, file_name) for mask in cleanmask: for name in glob.glob(mask): # Preserve sigdata files in the stamps directory if "sigdata" in name: continue # Preserve taint files in the stamps directory if name.endswith('.taint'): continue os.unlink(name) stamp = stamp_internal(task, d, file_name) # Remove the file and recreate to force timestamp # change on broken NFS filesystems if stamp: bb.utils.remove(stamp) f = open(stamp, "w") f.close() # If we're in task context, write out a signature file for each task # as it completes if not task.endswith("_setscene") and task != "do_setscene" and not file_name: file_name = d.getVar('BB_FILENAME', True) bb.parse.siggen.dump_sigtask(file_name, task, d.getVar('STAMP', True), True) def del_stamp(task, d, file_name = None): """ Removes a stamp for a given task (d can be a data dict or dataCache) """ stamp = stamp_internal(task, d, file_name) bb.utils.remove(stamp) def write_taint(task, d, file_name = None): """ Creates a "taint" file which will force the specified task and its dependents to be re-run the next time by influencing the value of its taskhash. (d can be a data dict or dataCache) """ import uuid if file_name: taintfn = d.stamp[file_name] + '.' + task + '.taint' else: taintfn = d.getVar('STAMP', True) + '.' + task + '.taint' bb.utils.mkdirhier(os.path.dirname(taintfn)) # The specific content of the taint file is not really important, # we just need it to be random, so a random UUID is used with open(taintfn, 'w') as taintf: taintf.write(str(uuid.uuid4())) def stampfile(taskname, d, file_name = None): """ Return the stamp for a given task (d can be a data dict or dataCache) """ return stamp_internal(taskname, d, file_name) def add_tasks(tasklist, d): task_deps = data.getVar('_task_deps', d) if not task_deps: task_deps = {} if not 'tasks' in task_deps: task_deps['tasks'] = [] if not 'parents' in task_deps: task_deps['parents'] = {} for task in tasklist: task = data.expand(task, d) data.setVarFlag(task, 'task', 1, d) if not task in task_deps['tasks']: task_deps['tasks'].append(task) flags = data.getVarFlags(task, d) def getTask(name): if not name in task_deps: task_deps[name] = {} if name in flags: deptask = data.expand(flags[name], d) task_deps[name][task] = deptask getTask('depends') getTask('rdepends') getTask('deptask') getTask('rdeptask') getTask('recrdeptask') getTask('nostamp') getTask('fakeroot') getTask('noexec') getTask('umask') task_deps['parents'][task] = [] if 'deps' in flags: for dep in flags['deps']: dep = data.expand(dep, d) task_deps['parents'][task].append(dep) # don't assume holding a reference data.setVar('_task_deps', task_deps, d) def remove_task(task, kill, d): """Remove an BB 'task'. If kill is 1, also remove tasks that depend on this task.""" data.delVarFlag(task, 'task', d)
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import OrderedDict import argparse import binascii import contextlib import glob import itertools import json import os import random import six import string import subprocess import sys import tempfile import traceback import uuid import errno import numpy as np ARROW_HOME = os.path.abspath(__file__).rsplit("/", 2)[0] # Control for flakiness np.random.seed(12345) def load_version_from_pom(): import xml.etree.ElementTree as ET tree = ET.parse(os.path.join(ARROW_HOME, 'java', 'pom.xml')) tag_pattern = '{http://maven.apache.org/POM/4.0.0}version' version_tag = list(tree.getroot().findall(tag_pattern))[0] return version_tag.text def guid(): return uuid.uuid4().hex # from pandas RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1)) def rands(nchars): """ Generate one random byte string. See `rands_array` if you want to create an array of random strings. """ return ''.join(np.random.choice(RANDS_CHARS, nchars)) def tobytes(o): if isinstance(o, six.text_type): return o.encode('utf8') return o def frombytes(o): if isinstance(o, six.binary_type): return o.decode('utf8') return o # from the merge_arrow_pr.py script def run_cmd(cmd): if isinstance(cmd, six.string_types): cmd = cmd.split(' ') try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: # this avoids hiding the stdout / stderr of failed processes print('Command failed: %s' % ' '.join(cmd)) print('With output:') print('--------------') print(frombytes(e.output)) print('--------------') raise e return frombytes(output) # ---------------------------------------------------------------------- # Data generation class DataType(object): def __init__(self, name, nullable=True): self.name = name self.nullable = nullable def get_json(self): return OrderedDict([ ('name', self.name), ('type', self._get_type()), ('nullable', self.nullable), ('children', self._get_children()) ]) def _make_is_valid(self, size): if self.nullable: return np.random.randint(0, 2, size=size) else: return np.ones(size) class Column(object): def __init__(self, name, count): self.name = name self.count = count def __len__(self): return self.count def _get_children(self): return [] def _get_buffers(self): return [] def get_json(self): entries = [ ('name', self.name), ('count', self.count) ] buffers = self._get_buffers() entries.extend(buffers) children = self._get_children() if len(children) > 0: entries.append(('children', children)) return OrderedDict(entries) class PrimitiveType(DataType): def _get_children(self): return [] class PrimitiveColumn(Column): def __init__(self, name, count, is_valid, values): super(PrimitiveColumn, self).__init__(name, count) self.is_valid = is_valid self.values = values def _encode_value(self, x): return x def _get_buffers(self): return [ ('VALIDITY', [int(v) for v in self.is_valid]), ('DATA', list([self._encode_value(x) for x in self.values])) ] TEST_INT_MAX = 2 ** 31 - 1 TEST_INT_MIN = ~TEST_INT_MAX class IntegerType(PrimitiveType): def __init__(self, name, is_signed, bit_width, nullable=True, min_value=TEST_INT_MIN, max_value=TEST_INT_MAX): super(IntegerType, self).__init__(name, nullable=nullable) self.is_signed = is_signed self.bit_width = bit_width self.min_value = min_value self.max_value = max_value def _get_generated_data_bounds(self): if self.is_signed: signed_iinfo = np.iinfo('int' + str(self.bit_width)) min_value, max_value = signed_iinfo.min, signed_iinfo.max else: unsigned_iinfo = np.iinfo('uint' + str(self.bit_width)) min_value, max_value = 0, unsigned_iinfo.max lower_bound = max(min_value, self.min_value) upper_bound = min(max_value, self.max_value) return lower_bound, upper_bound def _get_type(self): return OrderedDict([ ('name', 'int'), ('isSigned', self.is_signed), ('bitWidth', self.bit_width) ]) def generate_column(self, size, name=None): lower_bound, upper_bound = self._get_generated_data_bounds() return self.generate_range(size, lower_bound, upper_bound, name=name) def generate_range(self, size, lower, upper, name=None): values = [int(x) for x in np.random.randint(lower, upper, size=size)] is_valid = self._make_is_valid(size) if name is None: name = self.name return PrimitiveColumn(name, size, is_valid, values) class DateType(IntegerType): DAY = 0 MILLISECOND = 1 # 1/1/1 to 12/31/9999 _ranges = { DAY: [-719162, 2932896], MILLISECOND: [-62135596800000, 253402214400000] } def __init__(self, name, unit, nullable=True): bit_width = 32 if unit == self.DAY else 64 min_value, max_value = self._ranges[unit] super(DateType, self).__init__( name, True, bit_width, nullable=nullable, min_value=min_value, max_value=max_value ) self.unit = unit def _get_type(self): return OrderedDict([ ('name', 'date'), ('unit', 'DAY' if self.unit == self.DAY else 'MILLISECOND') ]) TIMEUNIT_NAMES = { 's': 'SECOND', 'ms': 'MILLISECOND', 'us': 'MICROSECOND', 'ns': 'NANOSECOND' } class TimeType(IntegerType): BIT_WIDTHS = { 's': 32, 'ms': 32, 'us': 64, 'ns': 64 } _ranges = { 's': [0, 86400], 'ms': [0, 86400000], 'us': [0, 86400000000], 'ns': [0, 86400000000000] } def __init__(self, name, unit='s', nullable=True): min_val, max_val = self._ranges[unit] super(TimeType, self).__init__(name, True, self.BIT_WIDTHS[unit], nullable=nullable, min_value=min_val, max_value=max_val) self.unit = unit def _get_type(self): return OrderedDict([ ('name', 'time'), ('unit', TIMEUNIT_NAMES[self.unit]), ('bitWidth', self.bit_width) ]) class TimestampType(IntegerType): # 1/1/1 to 12/31/9999 _ranges = { 's': [-62135596800, 253402214400], 'ms': [-62135596800000, 253402214400000], 'us': [-62135596800000000, 253402214400000000], # Physical range for int64, ~584 years and change 'ns': [np.iinfo('int64').min, np.iinfo('int64').max] } def __init__(self, name, unit='s', tz=None, nullable=True): min_val, max_val = self._ranges[unit] super(TimestampType, self).__init__(name, True, 64, nullable=nullable, min_value=min_val, max_value=max_val) self.unit = unit self.tz = tz def _get_type(self): fields = [ ('name', 'timestamp'), ('unit', TIMEUNIT_NAMES[self.unit]) ] if self.tz is not None: fields.append(('timezone', self.tz)) return OrderedDict(fields) class DurationIntervalType(IntegerType): def __init__(self, name, unit='s', nullable=True): min_val, max_val = np.iinfo('int64').min, np.iinfo('int64').max, super(DurationIntervalType, self).__init__( name, True, 64, nullable=nullable, min_value=min_val, max_value=max_val) self.unit = unit def _get_type(self): fields = [ ('name', 'duration'), ('unit', TIMEUNIT_NAMES[self.unit]) ] return OrderedDict(fields) class YearMonthIntervalType(IntegerType): def __init__(self, name, nullable=True): min_val, max_val = [-10000*12, 10000*12] # +/- 10000 years. super(YearMonthIntervalType, self).__init__( name, True, 32, nullable=nullable, min_value=min_val, max_value=max_val) def _get_type(self): fields = [ ('name', 'interval'), ('unit', 'YEAR_MONTH'), ] return OrderedDict(fields) class DayTimeIntervalType(PrimitiveType): def __init__(self, name, nullable=True): super(DayTimeIntervalType, self).__init__(name, nullable=True) @property def numpy_type(self): return object def _get_type(self): return OrderedDict([ ('name', 'interval'), ('unit', 'DAY_TIME'), ]) def generate_column(self, size, name=None): min_day_value, max_day_value = -10000*366, 10000*366 values = [{'days': random.randint(min_day_value, max_day_value), 'milliseconds': random.randint(-86400000, +86400000)} for _ in range(size)] is_valid = self._make_is_valid(size) if name is None: name = self.name return PrimitiveColumn(name, size, is_valid, values) class FloatingPointType(PrimitiveType): def __init__(self, name, bit_width, nullable=True): super(FloatingPointType, self).__init__(name, nullable=nullable) self.bit_width = bit_width self.precision = { 16: 'HALF', 32: 'SINGLE', 64: 'DOUBLE' }[self.bit_width] @property def numpy_type(self): return 'float' + str(self.bit_width) def _get_type(self): return OrderedDict([ ('name', 'floatingpoint'), ('precision', self.precision) ]) def generate_column(self, size, name=None): values = np.random.randn(size) * 1000 values = np.round(values, 3) is_valid = self._make_is_valid(size) if name is None: name = self.name return PrimitiveColumn(name, size, is_valid, values) DECIMAL_PRECISION_TO_VALUE = { key: (1 << (8 * i - 1)) - 1 for i, key in enumerate( [1, 3, 5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 32, 34, 36], start=1, ) } def decimal_range_from_precision(precision): assert 1 <= precision <= 38 try: max_value = DECIMAL_PRECISION_TO_VALUE[precision] except KeyError: return decimal_range_from_precision(precision - 1) else: return ~max_value, max_value class DecimalType(PrimitiveType): def __init__(self, name, precision, scale, bit_width=128, nullable=True): super(DecimalType, self).__init__(name, nullable=True) self.precision = precision self.scale = scale self.bit_width = bit_width @property def numpy_type(self): return object def _get_type(self): return OrderedDict([ ('name', 'decimal'), ('precision', self.precision), ('scale', self.scale), ]) def generate_column(self, size, name=None): min_value, max_value = decimal_range_from_precision(self.precision) values = [random.randint(min_value, max_value) for _ in range(size)] is_valid = self._make_is_valid(size) if name is None: name = self.name return DecimalColumn(name, size, is_valid, values, self.bit_width) class DecimalColumn(PrimitiveColumn): def __init__(self, name, count, is_valid, values, bit_width=128): super(DecimalColumn, self).__init__(name, count, is_valid, values) self.bit_width = bit_width def _encode_value(self, x): return str(x) class BooleanType(PrimitiveType): bit_width = 1 def _get_type(self): return OrderedDict([('name', 'bool')]) @property def numpy_type(self): return 'bool' def generate_column(self, size, name=None): values = list(map(bool, np.random.randint(0, 2, size=size))) is_valid = self._make_is_valid(size) if name is None: name = self.name return PrimitiveColumn(name, size, is_valid, values) class BinaryType(PrimitiveType): @property def numpy_type(self): return object @property def column_class(self): return BinaryColumn def _get_type(self): return OrderedDict([('name', 'binary')]) def generate_column(self, size, name=None): K = 7 is_valid = self._make_is_valid(size) values = [] for i in range(size): if is_valid[i]: draw = (np.random.randint(0, 255, size=K) .astype(np.uint8) .tostring()) values.append(draw) else: values.append(b"") if name is None: name = self.name return self.column_class(name, size, is_valid, values) class FixedSizeBinaryType(PrimitiveType): def __init__(self, name, byte_width, nullable=True): super(FixedSizeBinaryType, self).__init__(name, nullable=nullable) self.byte_width = byte_width @property def numpy_type(self): return object @property def column_class(self): return FixedSizeBinaryColumn def _get_type(self): return OrderedDict([('name', 'fixedsizebinary'), ('byteWidth', self.byte_width)]) def _get_type_layout(self): return OrderedDict([ ('vectors', [OrderedDict([('type', 'VALIDITY'), ('typeBitWidth', 1)]), OrderedDict([('type', 'DATA'), ('typeBitWidth', self.byte_width)])])]) def generate_column(self, size, name=None): is_valid = self._make_is_valid(size) values = [] for i in range(size): draw = (np.random.randint(0, 255, size=self.byte_width) .astype(np.uint8) .tostring()) values.append(draw) if name is None: name = self.name return self.column_class(name, size, is_valid, values) class StringType(BinaryType): @property def column_class(self): return StringColumn def _get_type(self): return OrderedDict([('name', 'utf8')]) def generate_column(self, size, name=None): K = 7 is_valid = self._make_is_valid(size) values = [] for i in range(size): if is_valid[i]: values.append(tobytes(rands(K))) else: values.append(b"") if name is None: name = self.name return self.column_class(name, size, is_valid, values) class JsonSchema(object): def __init__(self, fields): self.fields = fields def get_json(self): return OrderedDict([ ('fields', [field.get_json() for field in self.fields]) ]) class BinaryColumn(PrimitiveColumn): def _encode_value(self, x): return frombytes(binascii.hexlify(x).upper()) def _get_buffers(self): offset = 0 offsets = [0] data = [] for i, v in enumerate(self.values): if self.is_valid[i]: offset += len(v) else: v = b"" offsets.append(offset) data.append(self._encode_value(v)) return [ ('VALIDITY', [int(x) for x in self.is_valid]), ('OFFSET', offsets), ('DATA', data) ] class FixedSizeBinaryColumn(PrimitiveColumn): def _encode_value(self, x): return ''.join('{:02x}'.format(c).upper() for c in x) def _get_buffers(self): data = [] for i, v in enumerate(self.values): data.append(self._encode_value(v)) return [ ('VALIDITY', [int(x) for x in self.is_valid]), ('DATA', data) ] class StringColumn(BinaryColumn): def _encode_value(self, x): return frombytes(x) class ListType(DataType): def __init__(self, name, value_type, nullable=True): super(ListType, self).__init__(name, nullable=nullable) self.value_type = value_type def _get_type(self): return OrderedDict([ ('name', 'list') ]) def _get_children(self): return [self.value_type.get_json()] def generate_column(self, size, name=None): MAX_LIST_SIZE = 4 is_valid = self._make_is_valid(size) list_sizes = np.random.randint(0, MAX_LIST_SIZE + 1, size=size) offsets = [0] offset = 0 for i in range(size): if is_valid[i]: offset += int(list_sizes[i]) offsets.append(offset) # The offset now is the total number of elements in the child array values = self.value_type.generate_column(offset) if name is None: name = self.name return ListColumn(name, size, is_valid, offsets, values) class ListColumn(Column): def __init__(self, name, count, is_valid, offsets, values): super(ListColumn, self).__init__(name, count) self.is_valid = is_valid self.offsets = offsets self.values = values def _get_buffers(self): return [ ('VALIDITY', [int(v) for v in self.is_valid]), ('OFFSET', list(self.offsets)) ] def _get_children(self): return [self.values.get_json()] class MapType(DataType): def __init__(self, name, key_type, item_type, nullable=True, keysSorted=False): super(MapType, self).__init__(name, nullable=nullable) assert not key_type.nullable self.key_type = key_type self.item_type = item_type self.pair_type = StructType('entries', [key_type, item_type], False) self.keysSorted = keysSorted def _get_type(self): return OrderedDict([ ('name', 'map'), ('keysSorted', self.keysSorted) ]) def _get_children(self): return [self.pair_type.get_json()] def generate_column(self, size, name=None): MAX_MAP_SIZE = 4 is_valid = self._make_is_valid(size) map_sizes = np.random.randint(0, MAX_MAP_SIZE + 1, size=size) offsets = [0] offset = 0 for i in range(size): if is_valid[i]: offset += int(map_sizes[i]) offsets.append(offset) # The offset now is the total number of elements in the child array pairs = self.pair_type.generate_column(offset) if name is None: name = self.name return MapColumn(name, size, is_valid, offsets, pairs) class MapColumn(Column): def __init__(self, name, count, is_valid, offsets, pairs): super(MapColumn, self).__init__(name, count) self.is_valid = is_valid self.offsets = offsets self.pairs = pairs def _get_buffers(self): return [ ('VALIDITY', [int(v) for v in self.is_valid]), ('OFFSET', list(self.offsets)) ] def _get_children(self): return [self.pairs.get_json()] class FixedSizeListType(DataType): def __init__(self, name, value_type, list_size, nullable=True): super(FixedSizeListType, self).__init__(name, nullable=nullable) self.value_type = value_type self.list_size = list_size def _get_type(self): return OrderedDict([ ('name', 'fixedsizelist'), ('listSize', self.list_size) ]) def _get_children(self): return [self.value_type.get_json()] def generate_column(self, size, name=None): is_valid = self._make_is_valid(size) values = self.value_type.generate_column(size * self.list_size) if name is None: name = self.name return FixedSizeListColumn(name, size, is_valid, values) class FixedSizeListColumn(Column): def __init__(self, name, count, is_valid, values): super(FixedSizeListColumn, self).__init__(name, count) self.is_valid = is_valid self.values = values def _get_buffers(self): return [ ('VALIDITY', [int(v) for v in self.is_valid]) ] def _get_children(self): return [self.values.get_json()] class StructType(DataType): def __init__(self, name, field_types, nullable=True): super(StructType, self).__init__(name, nullable=nullable) self.field_types = field_types def _get_type(self): return OrderedDict([ ('name', 'struct') ]) def _get_children(self): return [type_.get_json() for type_ in self.field_types] def generate_column(self, size, name=None): is_valid = self._make_is_valid(size) field_values = [type_.generate_column(size) for type_ in self.field_types] if name is None: name = self.name return StructColumn(name, size, is_valid, field_values) class Dictionary(object): def __init__(self, id_, field, values, ordered=False): self.id_ = id_ self.field = field self.values = values self.ordered = ordered def __len__(self): return len(self.values) def get_json(self): dummy_batch = JsonRecordBatch(len(self.values), [self.values]) return OrderedDict([ ('id', self.id_), ('data', dummy_batch.get_json()) ]) class DictionaryType(DataType): def __init__(self, name, index_type, dictionary, nullable=True): super(DictionaryType, self).__init__(name, nullable=nullable) assert isinstance(index_type, IntegerType) assert isinstance(dictionary, Dictionary) self.index_type = index_type self.dictionary = dictionary def get_json(self): dict_field = self.dictionary.field return OrderedDict([ ('name', self.name), ('type', dict_field._get_type()), ('nullable', self.nullable), ('children', dict_field._get_children()), ('dictionary', OrderedDict([ ('id', self.dictionary.id_), ('indexType', self.index_type._get_type()), ('isOrdered', self.dictionary.ordered) ])) ]) def generate_column(self, size, name=None): if name is None: name = self.name return self.index_type.generate_range(size, 0, len(self.dictionary), name=name) class StructColumn(Column): def __init__(self, name, count, is_valid, field_values): super(StructColumn, self).__init__(name, count) self.is_valid = is_valid self.field_values = field_values def _get_buffers(self): return [ ('VALIDITY', [int(v) for v in self.is_valid]) ] def _get_children(self): return [field.get_json() for field in self.field_values] class JsonRecordBatch(object): def __init__(self, count, columns): self.count = count self.columns = columns def get_json(self): return OrderedDict([ ('count', self.count), ('columns', [col.get_json() for col in self.columns]) ]) # SKIP categories SKIP_ARROW = 'arrow' SKIP_FLIGHT = 'flight' class JsonFile(object): def __init__(self, name, schema, batches, dictionaries=None, skip=None, path=None): self.name = name self.schema = schema self.dictionaries = dictionaries or [] self.batches = batches self.skip = set() self.path = path if skip: self.skip.update(skip) def get_json(self): entries = [ ('schema', self.schema.get_json()) ] if len(self.dictionaries) > 0: entries.append(('dictionaries', [dictionary.get_json() for dictionary in self.dictionaries])) entries.append(('batches', [batch.get_json() for batch in self.batches])) return OrderedDict(entries) def write(self, path): with open(path, 'wb') as f: f.write(json.dumps(self.get_json(), indent=2).encode('utf-8')) self.path = path def skip_category(self, category): """Skip this test for the given category. Category should be SKIP_ARROW or SKIP_FLIGHT. """ self.skip.add(category) return self def get_field(name, type_, nullable=True): if type_ == 'binary': return BinaryType(name, nullable=nullable) elif type_ == 'utf8': return StringType(name, nullable=nullable) elif type_.startswith('fixedsizebinary_'): byte_width = int(type_.split('_')[1]) return FixedSizeBinaryType(name, byte_width=byte_width, nullable=nullable) dtype = np.dtype(type_) if dtype.kind in ('i', 'u'): return IntegerType(name, dtype.kind == 'i', dtype.itemsize * 8, nullable=nullable) elif dtype.kind == 'f': return FloatingPointType(name, dtype.itemsize * 8, nullable=nullable) elif dtype.kind == 'b': return BooleanType(name, nullable=nullable) else: raise TypeError(dtype) def _generate_file(name, fields, batch_sizes, dictionaries=None, skip=None): schema = JsonSchema(fields) batches = [] for size in batch_sizes: columns = [] for field in fields: col = field.generate_column(size) columns.append(col) batches.append(JsonRecordBatch(size, columns)) return JsonFile(name, schema, batches, dictionaries, skip=skip) def generate_primitive_case(batch_sizes, name='primitive'): types = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'binary', 'utf8', 'fixedsizebinary_19', 'fixedsizebinary_120'] fields = [] for type_ in types: fields.append(get_field(type_ + "_nullable", type_, True)) fields.append(get_field(type_ + "_nonnullable", type_, False)) return _generate_file(name, fields, batch_sizes) def generate_decimal_case(): fields = [ DecimalType(name='f{}'.format(i), precision=precision, scale=2) for i, precision in enumerate(range(3, 39)) ] possible_batch_sizes = 7, 10 batch_sizes = [possible_batch_sizes[i % 2] for i in range(len(fields))] return _generate_file('decimal', fields, batch_sizes) def generate_datetime_case(): fields = [ DateType('f0', DateType.DAY), DateType('f1', DateType.MILLISECOND), TimeType('f2', 's'), TimeType('f3', 'ms'), TimeType('f4', 'us'), TimeType('f5', 'ns'), TimestampType('f6', 's'), TimestampType('f7', 'ms'), TimestampType('f8', 'us'), TimestampType('f9', 'ns'), TimestampType('f10', 'ms', tz=None), TimestampType('f11', 's', tz='UTC'), TimestampType('f12', 'ms', tz='US/Eastern'), TimestampType('f13', 'us', tz='Europe/Paris'), TimestampType('f14', 'ns', tz='US/Pacific'), ] batch_sizes = [7, 10] return _generate_file("datetime", fields, batch_sizes) def generate_interval_case(): fields = [ DurationIntervalType('f1', 's'), DurationIntervalType('f2', 'ms'), DurationIntervalType('f3', 'us'), DurationIntervalType('f4', 'ns'), YearMonthIntervalType('f5'), DayTimeIntervalType('f6'), ] batch_sizes = [7, 10] return _generate_file("interval", fields, batch_sizes) def generate_map_case(): # TODO(bkietz): separated from nested_case so it can be # independently skipped, consolidate after JS supports map fields = [ MapType('map_nullable', get_field('key', 'utf8', False), get_field('value', 'int32')), ] batch_sizes = [7, 10] return _generate_file("map", fields, batch_sizes) def generate_nested_case(): fields = [ ListType('list_nullable', get_field('item', 'int32')), FixedSizeListType('fixedsizelist_nullable', get_field('item', 'int32'), 4), StructType('struct_nullable', [get_field('f1', 'int32'), get_field('f2', 'utf8')]), # TODO(wesm): this causes segfault # ListType('list_nonnullable', get_field('item', 'int32'), False), ] batch_sizes = [7, 10] return _generate_file("nested", fields, batch_sizes) def generate_dictionary_case(): dict_type0 = StringType('dictionary1') dict_type1 = StringType('dictionary1') dict_type2 = get_field('dictionary2', 'int64') dict0 = Dictionary(0, dict_type0, dict_type0.generate_column(10, name='DICT0')) dict1 = Dictionary(1, dict_type1, dict_type1.generate_column(5, name='DICT1')) dict2 = Dictionary(2, dict_type2, dict_type2.generate_column(50, name='DICT2')) fields = [ DictionaryType('dict0', get_field('', 'int8'), dict0), DictionaryType('dict1', get_field('', 'int32'), dict1), DictionaryType('dict2', get_field('', 'int16'), dict2) ] batch_sizes = [7, 10] return _generate_file("dictionary", fields, batch_sizes, dictionaries=[dict0, dict1, dict2]) def generate_nested_dictionary_case(): str_type = StringType('str') dict0 = Dictionary(0, str_type, str_type.generate_column(10, name='DICT0')) list_type = ListType( 'list', DictionaryType('str_dict', get_field('', 'int8'), dict0)) dict1 = Dictionary(1, list_type, list_type.generate_column(30, name='DICT1')) struct_type = StructType('struct', [ DictionaryType('str_dict_a', get_field('', 'int8'), dict0), DictionaryType('str_dict_b', get_field('', 'int8'), dict0) ]) dict2 = Dictionary(2, struct_type, struct_type.generate_column(30, name='DICT2')) fields = [ DictionaryType('list_dict', get_field('', 'int8'), dict1), DictionaryType('struct_dict', get_field('', 'int8'), dict2) ] batch_sizes = [10, 13] return _generate_file("nested_dictionary", fields, batch_sizes, dictionaries=[dict0, dict1, dict2]) def get_generated_json_files(tempdir=None, flight=False): tempdir = tempdir or tempfile.mkdtemp() def _temp_path(): return file_objs = [ generate_primitive_case([], name='primitive_no_batches'), generate_primitive_case([17, 20], name='primitive'), generate_primitive_case([0, 0, 0], name='primitive_zerolength'), generate_decimal_case(), generate_datetime_case(), generate_interval_case(), generate_map_case(), generate_nested_case(), generate_dictionary_case(), generate_nested_dictionary_case().skip_category(SKIP_ARROW) .skip_category(SKIP_FLIGHT), ] if flight: file_objs.append(generate_primitive_case([24 * 1024], name='large_batch')) generated_paths = [] for file_obj in file_objs: out_path = os.path.join(tempdir, 'generated_' + file_obj.name + '.json') file_obj.write(out_path) generated_paths.append(file_obj) return generated_paths # ---------------------------------------------------------------------- # Testing harness class IntegrationRunner(object): def __init__(self, json_files, testers, tempdir=None, debug=False): self.json_files = json_files self.testers = testers self.temp_dir = tempdir or tempfile.mkdtemp() self.debug = debug def run(self): failures = [] for producer, consumer in itertools.product( filter(lambda t: t.PRODUCER, self.testers), filter(lambda t: t.CONSUMER, self.testers)): for failure in self._compare_implementations(producer, consumer): failures.append(failure) return failures def run_flight(self): failures = [] servers = filter(lambda t: t.FLIGHT_SERVER, self.testers) clients = filter(lambda t: (t.FLIGHT_CLIENT and t.CONSUMER), self.testers) for server, client in itertools.product(servers, clients): for failure in self._compare_flight_implementations(server, client): failures.append(failure) return failures def _compare_implementations(self, producer, consumer): print('##########################################################') print( '{0} producing, {1} consuming'.format(producer.name, consumer.name) ) print('##########################################################') for test_case in self.json_files: json_path = test_case.path print('==========================================================') print('Testing file {0}'.format(json_path)) print('==========================================================') name = os.path.splitext(os.path.basename(json_path))[0] file_id = guid()[:8] if ('JS' in (producer.name, consumer.name) and "map" in test_case.name): print('TODO(ARROW-1279): Enable map tests ' + ' for JS once they are unbroken') continue if ('JS' in (producer.name, consumer.name) and "interval" in test_case.name): print('TODO(ARROW-5239): Enable interval tests ' + ' for JS once JS supports them') continue if ('Go' in (producer.name, consumer.name) and "decimal" in test_case.name): print('TODO(ARROW-3676): Enable decimal tests ' + ' for Go') continue if ('Go' in (producer.name, consumer.name) and "map" in test_case.name): print('TODO(ARROW-3679): Enable map tests ' + ' for Go') continue if ('Go' in (producer.name, consumer.name) and "dictionary" in test_case.name): print('TODO(ARROW-3039): Enable dictionary tests ' + ' for Go') continue # Make the random access file producer_file_path = os.path.join(self.temp_dir, file_id + '_' + name + '.json_as_file') producer_stream_path = os.path.join(self.temp_dir, file_id + '_' + name + '.producer_file_as_stream') consumer_file_path = os.path.join(self.temp_dir, file_id + '_' + name + '.consumer_stream_as_file') if producer.name in test_case.skip: print('-- Skipping test because producer {0} does ' 'not support'.format(producer.name)) continue if consumer.name in test_case.skip: print('-- Skipping test because consumer {0} does ' 'not support'.format(consumer.name)) continue if SKIP_ARROW in test_case.skip: print('-- Skipping test') continue try: print('-- Creating binary inputs') producer.json_to_file(json_path, producer_file_path) # Validate the file print('-- Validating file') consumer.validate(json_path, producer_file_path) print('-- Validating stream') producer.file_to_stream(producer_file_path, producer_stream_path) consumer.stream_to_file(producer_stream_path, consumer_file_path) consumer.validate(json_path, consumer_file_path) except Exception: traceback.print_exc() yield (test_case, producer, consumer, sys.exc_info()) continue def _compare_flight_implementations(self, producer, consumer): print('##########################################################') print( '{0} serving, {1} requesting'.format(producer.name, consumer.name) ) print('##########################################################') for test_case in self.json_files: json_path = test_case.path print('=' * 58) print('Testing file {0}'.format(json_path)) print('=' * 58) if ('Java' in (producer.name, consumer.name) and "map" in test_case.name): print('TODO(ARROW-1279): Enable map tests ' + ' for Java and JS once Java supports them and JS\'' + ' are unbroken') continue if SKIP_FLIGHT in test_case.skip: print('-- Skipping test') continue try: with producer.flight_server(): # Have the client upload the file, then download and # compare consumer.flight_request(producer.FLIGHT_PORT, json_path) except Exception: traceback.print_exc() yield (test_case, producer, consumer, sys.exc_info()) continue class Tester(object): PRODUCER = False CONSUMER = False FLIGHT_SERVER = False FLIGHT_CLIENT = False FLIGHT_PORT = 31337 def __init__(self, args): self.args = args self.debug = args.debug def json_to_file(self, json_path, arrow_path): raise NotImplementedError def stream_to_file(self, stream_path, file_path): raise NotImplementedError def file_to_stream(self, file_path, stream_path): raise NotImplementedError def validate(self, json_path, arrow_path): raise NotImplementedError def flight_server(self): raise NotImplementedError def flight_request(self, port, json_path): raise NotImplementedError class JavaTester(Tester): PRODUCER = True CONSUMER = True FLIGHT_SERVER = True FLIGHT_CLIENT = True FLIGHT_PORT = 31338 JAVA_OPTS = ['-Dio.netty.tryReflectionSetAccessible=true'] _arrow_version = load_version_from_pom() ARROW_TOOLS_JAR = os.environ.get( 'ARROW_JAVA_INTEGRATION_JAR', os.path.join(ARROW_HOME, 'java/tools/target/arrow-tools-{}-' 'jar-with-dependencies.jar'.format(_arrow_version))) ARROW_FLIGHT_JAR = os.environ.get( 'ARROW_FLIGHT_JAVA_INTEGRATION_JAR', os.path.join(ARROW_HOME, 'java/flight/target/arrow-flight-{}-' 'jar-with-dependencies.jar'.format(_arrow_version))) ARROW_FLIGHT_SERVER = ('org.apache.arrow.flight.example.integration.' 'IntegrationTestServer') ARROW_FLIGHT_CLIENT = ('org.apache.arrow.flight.example.integration.' 'IntegrationTestClient') name = 'Java' def _run(self, arrow_path=None, json_path=None, command='VALIDATE'): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_TOOLS_JAR, 'org.apache.arrow.tools.Integration'] if arrow_path is not None: cmd.extend(['-a', arrow_path]) if json_path is not None: cmd.extend(['-j', json_path]) cmd.extend(['-c', command]) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'JSON_TO_ARROW') def stream_to_file(self, stream_path, file_path): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_TOOLS_JAR, 'org.apache.arrow.tools.StreamToFile', stream_path, file_path] if self.debug: print(' '.join(cmd)) run_cmd(cmd) def file_to_stream(self, file_path, stream_path): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_TOOLS_JAR, 'org.apache.arrow.tools.FileToStream', file_path, stream_path] if self.debug: print(' '.join(cmd)) run_cmd(cmd) def flight_request(self, port, json_path): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_FLIGHT_JAR, self.ARROW_FLIGHT_CLIENT, '-port', str(port), '-j', json_path] if self.debug: print(' '.join(cmd)) run_cmd(cmd) @contextlib.contextmanager def flight_server(self): cmd = ['java'] + self.JAVA_OPTS + \ ['-cp', self.ARROW_FLIGHT_JAR, self.ARROW_FLIGHT_SERVER, '-port', str(self.FLIGHT_PORT)] if self.debug: print(' '.join(cmd)) server = subprocess.Popen(cmd, stdout=subprocess.PIPE) try: output = server.stdout.readline().decode() if not output.startswith("Server listening on localhost"): raise RuntimeError( "Flight-Java server did not start properly, output: " + output) yield finally: server.kill() server.wait(5) class CPPTester(Tester): PRODUCER = True CONSUMER = True FLIGHT_SERVER = True FLIGHT_CLIENT = True EXE_PATH = os.environ.get( 'ARROW_CPP_EXE_PATH', os.path.join(ARROW_HOME, 'cpp/build/debug')) CPP_INTEGRATION_EXE = os.path.join(EXE_PATH, 'arrow-json-integration-test') STREAM_TO_FILE = os.path.join(EXE_PATH, 'arrow-stream-to-file') FILE_TO_STREAM = os.path.join(EXE_PATH, 'arrow-file-to-stream') FLIGHT_PORT = 31337 FLIGHT_SERVER_CMD = [ os.path.join(EXE_PATH, 'flight-test-integration-server'), "-port", str(FLIGHT_PORT)] FLIGHT_CLIENT_CMD = [ os.path.join(EXE_PATH, 'flight-test-integration-client'), "-host", "localhost"] name = 'C++' def _run(self, arrow_path=None, json_path=None, command='VALIDATE'): cmd = [self.CPP_INTEGRATION_EXE, '--integration'] if arrow_path is not None: cmd.append('--arrow=' + arrow_path) if json_path is not None: cmd.append('--json=' + json_path) cmd.append('--mode=' + command) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'JSON_TO_ARROW') def stream_to_file(self, stream_path, file_path): cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path] cmd = ' '.join(cmd) if self.debug: print(cmd) os.system(cmd) def file_to_stream(self, file_path, stream_path): cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path] cmd = ' '.join(cmd) if self.debug: print(cmd) os.system(cmd) @contextlib.contextmanager def flight_server(self): if self.debug: print(' '.join(self.FLIGHT_SERVER_CMD)) server = subprocess.Popen(self.FLIGHT_SERVER_CMD, stdout=subprocess.PIPE) try: output = server.stdout.readline().decode() if not output.startswith("Server listening on localhost"): raise RuntimeError( "Flight-C++ server did not start properly, output: " + output) yield finally: server.kill() server.wait(5) def flight_request(self, port, json_path): cmd = self.FLIGHT_CLIENT_CMD + [ '-port=' + str(port), '-path=' + json_path, ] if self.debug: print(' '.join(cmd)) run_cmd(cmd) class JSTester(Tester): PRODUCER = True CONSUMER = True EXE_PATH = os.path.join(ARROW_HOME, 'js/bin') VALIDATE = os.path.join(EXE_PATH, 'integration.js') JSON_TO_ARROW = os.path.join(EXE_PATH, 'json-to-arrow.js') STREAM_TO_FILE = os.path.join(EXE_PATH, 'stream-to-file.js') FILE_TO_STREAM = os.path.join(EXE_PATH, 'file-to-stream.js') name = 'JS' def _run(self, exe_cmd, arrow_path=None, json_path=None, command='VALIDATE'): cmd = [exe_cmd] if arrow_path is not None: cmd.extend(['-a', arrow_path]) if json_path is not None: cmd.extend(['-j', json_path]) cmd.extend(['--mode', command]) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(self.VALIDATE, arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): cmd = ['node', '--no-warnings', self.JSON_TO_ARROW, '-a', arrow_path, '-j', json_path] cmd = ' '.join(cmd) if self.debug: print(cmd) os.system(cmd) def stream_to_file(self, stream_path, file_path): cmd = ['cat', stream_path, '|', 'node', '--no-warnings', self.STREAM_TO_FILE, '>', file_path] cmd = ' '.join(cmd) if self.debug: print(cmd) os.system(cmd) def file_to_stream(self, file_path, stream_path): cmd = ['cat', file_path, '|', 'node', '--no-warnings', self.FILE_TO_STREAM, '>', stream_path] cmd = ' '.join(cmd) if self.debug: print(cmd) os.system(cmd) class GoTester(Tester): PRODUCER = True CONSUMER = True # FIXME(sbinet): revisit for Go modules GOPATH = os.getenv('GOPATH', '~/go') GOBIN = os.environ.get('GOBIN', os.path.join(GOPATH, 'bin')) GO_INTEGRATION_EXE = os.path.join(GOBIN, 'arrow-json-integration-test') STREAM_TO_FILE = os.path.join(GOBIN, 'arrow-stream-to-file') FILE_TO_STREAM = os.path.join(GOBIN, 'arrow-file-to-stream') name = 'Go' def _run(self, arrow_path=None, json_path=None, command='VALIDATE'): cmd = [self.GO_INTEGRATION_EXE] if arrow_path is not None: cmd.extend(['-arrow', arrow_path]) if json_path is not None: cmd.extend(['-json', json_path]) cmd.extend(['-mode', command]) if self.debug: print(' '.join(cmd)) run_cmd(cmd) def validate(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'VALIDATE') def json_to_file(self, json_path, arrow_path): return self._run(arrow_path, json_path, 'JSON_TO_ARROW') def stream_to_file(self, stream_path, file_path): cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path] cmd = ' '.join(cmd) if self.debug: print(cmd) os.system(cmd) def file_to_stream(self, file_path, stream_path): cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path] cmd = ' '.join(cmd) if self.debug: print(cmd) os.system(cmd) def get_static_json_files(): glob_pattern = os.path.join(ARROW_HOME, 'integration', 'data', '*.json') return [JsonFile(name=os.path.basename(p), path=p, skip=set(), schema=None, batches=None) for p in glob.glob(glob_pattern)] def run_all_tests(args): testers = [] if args.enable_cpp: testers.append(CPPTester(args)) if args.enable_java: testers.append(JavaTester(args)) if args.enable_js: testers.append(JSTester(args)) if args.enable_go: testers.append(GoTester(args)) static_json_files = get_static_json_files() generated_json_files = get_generated_json_files(tempdir=args.tempdir, flight=args.run_flight) json_files = static_json_files + generated_json_files runner = IntegrationRunner(json_files, testers, tempdir=args.tempdir, debug=args.debug) failures = [] failures.extend(runner.run()) if args.run_flight: failures.extend(runner.run_flight()) fail_count = 0 if failures: print("################# FAILURES #################") for test_case, producer, consumer, exc_info in failures: fail_count += 1 print("FAILED TEST:", end=" ") print(test_case.name, producer.name, "producing, ", consumer.name, "consuming") if exc_info: traceback.print_exception(*exc_info) print() print(fail_count, "failures") if fail_count > 0: sys.exit(1) def write_js_test_json(directory): generate_map_case().write(os.path.join(directory, 'map.json')) generate_nested_case().write(os.path.join(directory, 'nested.json')) generate_decimal_case().write(os.path.join(directory, 'decimal.json')) generate_datetime_case().write(os.path.join(directory, 'datetime.json')) (generate_dictionary_case() .write(os.path.join(directory, 'dictionary.json'))) (generate_primitive_case([]) .write(os.path.join(directory, 'primitive_no_batches.json'))) (generate_primitive_case([7, 10]) .write(os.path.join(directory, 'primitive.json'))) (generate_primitive_case([0, 0, 0]) .write(os.path.join(directory, 'primitive-empty.json'))) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Arrow integration test CLI') parser.add_argument('--enable-c++', dest='enable_cpp', action='store', type=int, default=1, help='Include C++ in integration tests') parser.add_argument('--enable-java', dest='enable_java', action='store', type=int, default=1, help='Include Java in integration tests') parser.add_argument('--enable-js', dest='enable_js', action='store', type=int, default=1, help='Include JavaScript in integration tests') parser.add_argument('--enable-go', dest='enable_go', action='store', type=int, default=1, help='Include Go in integration tests') parser.add_argument('--write_generated_json', dest='generated_json_path', action='store', default=False, help='Generate test JSON') parser.add_argument('--run_flight', dest='run_flight', action='store_true', default=False, help='Run Flight integration tests') parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Run executables in debug mode as relevant') parser.add_argument('--tempdir', dest='tempdir', default=tempfile.mkdtemp(), help=('Directory to use for writing ' 'integration test temporary files')) args = parser.parse_args() if args.generated_json_path: try: os.makedirs(args.generated_json_path) except OSError as e: if e.errno != errno.EEXIST: raise write_js_test_json(args.generated_json_path) else: run_all_tests(args)
"""This module contains methods and classes for making parallel ETL flows. Warning: This is still experimental and things may be changed drastically. If you have ideas, comments, bug reports, etc., please report them to Christian Thomsen (chr@cs.aau.dk) """ # Copyright (c) 2011, Christian Thomsen (chr@cs.aau.dk) # All rights reserved. # Redistribution and use in source anqd binary forms, with or without # modification, are permitted provided that the following conditions are met: # - Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __author__ = "Christian Thomsen" __maintainer__ = "Christian Thomsen" __version__ = '0.2.0' __all__ = ['splitpoint', 'endsplits', 'createflow', 'Decoupled', \ 'shareconnectionwrapper', 'getsharedsequencefactory'] import copy import os from Queue import Empty import sys if sys.platform.startswith('java'): # Jython specific code in jythonmultiprocessing import pygrametl.jythonmultiprocessing as multiprocessing else: # Use (C)Python's std. lib. import multiprocessing import pygrametl # Support for spawned processes to be able to terminate all related processes # in case of an uncaught exception _masterpid = os.getpid() # the first to import parallel _toterminator = None def _getexitfunction(): """Return a function that halts the execution of pygrametl. pygrametl uses the function as excepthook in spawned processes such that an uncaught exception halts the entire execution. """ # On Java, System.exit will do as there are no separate processes if sys.platform.startswith('java'): def javaexitfunction(): import java.lang.System java.lang.System.exit(1) return javaexitfunction # else see if the os module provides functions to kill process groups; # this should be the case on UNIX. import signal if hasattr(os, 'getpgrp') and hasattr(os, 'killpg'): def unixexitfunction(): procgrp = os.getpgrp() os.killpg(procgrp, signal.SIGTERM) return unixexitfunction # else, we are on a platform that does not allow us to kill a group. # We make a special process that gets the pids of all calls to # this procedure. The function we return, informs this process to kill # all processes it knows about. # set up the terminator global _toterminator if _toterminator is None: _toterminator = multiprocessing.Queue() def terminatorfunction(): pids = set([_masterpid]) while True: item = _toterminator.get() if type(item) == int: pids.add(item) else: # We take it as a signal to kill all for p in pids: os.kill(p, 9) # we don't know which signals exist; use 9 return terminatorprocess = multiprocessing.Process(target=terminatorfunction) terminatorprocess.daemon = True terminatorprocess.start() # tell the terminator about this process _toterminator.put(os.getpid()) # return a function that tells the terminator to kill all known processes def exitfunction(): _toterminator.put('TERMINATE') return exitfunction def _getexcepthook(): "Return a function that can be used as except hook for uncaught exceptions." if not sys.argv[0]: # We are in interactive mode and don't want to terminate return sys.excepthook # else create a function that terminates all spawned processes and this # in case of an uncaught exception exit = _getexitfunction() def excepthook(exctype, excvalue, exctraceback): import traceback sys.stderr.write( "An uncaught exception occured. Terminating pygrametl.\n") traceback.print_exception(exctype, excvalue, exctraceback) exit() return excepthook # Stuff for @splitpoint def _splitprocess(func, input, output): # The target of a process created for a splitpoint sys.excepthook = _getexcepthook() # To handle uncaught exceptions and halt (args, kw) = input.get() while True: res = func(*args, **kw) if output is not None: output.put(res) input.task_done() (args, kw) = input.get() _splitpointqueues = [] def splitpoint(*arg, **kwargs): """To be used as an annotation to make a function run in a separate process. Each call of a @splitpoint annotated function f involves adding the request (and arguments, if any) to a shared queue. This can be relatively expensive if f only uses little computation time. The benefits from @splitpoint are thus best obtained for a function f which is time-consuming. To wait for all splitpoints to finish their computations, call endsplits(). @splitpoint can be used as in the following examples: @splitpoint def f(args): # The simplest case. Makes f run in a separate process. # All calls of f will return None immediately and f will be # invoked in the separate process. ... @splitpoint() def g(args): # With parentheses. Has the same effect as the previous example. ... @splitpoint(output=queue, instances=2, queuesize=200) def h(args): # With keyword arguments. It is not required that # all of keyword arguments above are given. ... Keyword arguments: - output: If given, it should be a queue-like object (offering the .put(obj) method). The annotated function's results will then be put in the output - instances: Determines how many processes should run the function. - queuesize: Given as an argument to a multiprocessing.JoinableQueue which holds arguments to the annotated function while they wait for an idle process that will pass them on to the annotated function. The argument decides the maximum number of calls that can wait in the queue. 0 means unlimited. Default: 0 """ # We construct a function called decorator. We either return this # decorator or the decorator applied to the function to decorate. # It depends on the user's arguments what to do: # # When the user uses parentheses after the annotation (as in # "@splitpoint(output=x)" or even "@splitpoint()"), arg automatically # becomes empty, i.e., arg == (). In that case we return the created # decorator such that Python can use it to decorate some function (which # we don't know. # # When no arguments are given (as in "@splitpoint"), arg has a # single element, namely the function to annotate, arg == (<function>,). # We then return decorator(function). for kw in kwargs.keys(): if kw not in ('instances', 'output', 'queuesize'): raise TypeError, \ "'%s' is an invalid keyword argument for splitpoint" % kw output = kwargs.get('output', None) instances = kwargs.get('instances', 1) queuesize = kwargs.get('queuesize', 0) def decorator(func): global _splitpointqueues if instances < 1: # A special case where there is no process so # we just call func directly def sillywrapper(*args, **kw): res = func(*args, **kw) if output is not None: output.put(res) return sillywrapper # Else set up processes input = multiprocessing.JoinableQueue(queuesize) for n in range(instances): p = multiprocessing.Process(target=_splitprocess,\ args=(func, input, output)) p.name = 'Process-%d for %s' % (n, func.__name__) p.daemon = True p.start() _splitpointqueues.append(input) def wrapper(*args,**kw): input.put((args, kw)) return wrapper if len(arg) == 0: return decorator elif len(arg) == 1: return decorator(*arg) else: raise ValueError, 'More than one *arg given' def endsplits(): """Wait for all splitpoints to finish""" global _splitpointqueues for q in _splitpointqueues: q.join() # Stuff for (function) flows def _flowprocess(func, input, output, inclosed, outclosed): sys.excepthook = _getexcepthook() # To handle uncaught exceptions and halt retryingafterclose = False while True: try: batch = input.get(True, 0.1) for args in batch: func(*args) input.task_done() output.put(batch) except Empty: if not inclosed.value: # A new item may be on its way, so try again continue elif not retryingafterclose: # After the get operation timed out, but before we got to here, # an item may have been added before the closed mark got set, # so we have to try again retryingafterclose = True continue else: # We have now tried get again after we saw the closed mark. # There is no more data. break output.close() outclosed.value = 1 class Flow(object): """A Flow consists of different functions running in different processes. A Flow should be created by calling createflow. """ def __init__(self, queues, closedarray, batchsize=1000): self.__queues = queues self.__closed = closedarray self.__batchsize = batchsize self.__batch = [] self.__resultbatch = [] def __iter__(self): try: while True: yield self.get() except Empty: return def __call__(self, *args): self.process(*args) def process(self, *args): "Insert arguments into the flow" self.__batch.append(args) if len(self.__batch) == self.__batchsize: self.__queues[0].put(self.__batch) self.__batch = [] def __oneortuple(self, thetuple): # If there is only one element in the given tuple, return that element; # otherwise return the full tuple. if len(thetuple) == 1: return thetuple[0] return thetuple def get(self): """Return the result of a single call of the flow. If the flow was called with a single argument -- as in flow({'foo':0, 'bar':1}) -- that single argument is returned (with the side-effects of the flow preserved). If the flow was called with multiple arguments -- as in flow({'foo'0}, {'bar':1}) -- a tuple with those arguments is returned (with the side-effects of the flow preserved). """ if self.__resultbatch: return self.__oneortuple(self.__resultbatch.pop()) # Else fetch new data from the queue retryingafterclose = False while True: try: tmp = self.__queues[-1].get(True, 0.1) self.__queues[-1].task_done() tmp.reverse() self.__resultbatch = tmp return self.__oneortuple(self.__resultbatch.pop()) except Empty: # See explanation in _flowprocess if not self.__closed[-1].value: continue elif not retryingafterclose: retryingafterclose = True continue else: raise Empty def getall(self): """Return all results in a single list. The results are of the same form as those returned by get. """ res = [] try: while True: res.append(self.get()) except Empty: pass return res def join(self): "Wait for all queues to be empty, i.e., for all computations to be done" for q in self.__queues: q.join() def close(self): "Close the flow. New entries can't be added, but computations continue." if self.__batch: self.__queues[0].put(self.__batch) self.__batch = [] # Not really necessary, but ... self.__queues[0].close() self.__closed[0].value = 1 @property def finished(self): "Tells if the flow is closed and all computations have finished" for v in self.__closed: if not v.value: return False return True def _buildgroupfunction(funcseq): def groupfunc(*args): for f in funcseq: f(*args) groupfunc.__doc__ = 'group function calling ' + \ (', '.join([f.__name__ for f in funcseq])) return groupfunc def createflow(*functions, **options): """Create a flow of functions running in different processes. A Flow object ready for use is returned. A flow consists of several functions running in several processes. A flow created by flow = createflow(f1, f2, f3) uses three processes. Data can be inserted into the flow by calling it as in flow(data). The argument data is then first processed by f1(data), then f2(data), and finally f3(data). Return values from f1, f2, and f3 are *not* preserved, but their side-effects are. The functions in a flow should all accept the same number of arguments (*args are also okay). Internally, a Flow object groups calls together in batches to reduce communication costs (see also the description of arguments below). In the example above, f1 could thus work on one batch, while f2 works on another batch and so on. Flows are thus good to use even if there are many calls of relatively fast functions. When no more data is to be inserted into a flow, it should be closed by calling its close method. Data processed by a flow can be fetched by calling get/getall or simply iterating the flow. This can both be done by the process that inserted data into the flow or by another (possibly concurrent) process. All data in a flow should be fetched again as it otherwise will remain in memory . Arguments: - *functions: A sequence of functions of sequences of functions. Each element in the sequence will be executed in a separate process. For example, the argument (f1, (f2, f3), f4) leads to that f1 executes in process-1, f2 and f3 execute in process-2, and f4 executes in process-3. The functions in the sequence should all accept the same number of arguments. - **options: keyword arguments configuring details. The considered options are: - batchsize: an integer deciding how many function calls are "grouped together" before they are passed on between processes. The default is 500. - queuesize: an integer deciding the maximum number of batches that can wait in a JoinableQueue between two different processes. 0 means that there is no limit. The default is 25. """ # A special case if not functions: return Flow([multiprocessing.JoinableQueue()],\ [multiprocessing.Value('b', 0)], 1) # Create functions that invoke a group of functions if needed resultfuncs = [] for item in functions: if callable(item): resultfuncs.append(item) else: # Check the arguments if not hasattr(item, '__iter__'): raise ValueError, \ 'An element is neither iterable nor callable' for f in item: if not callable(f): raise ValueError, \ 'An element in a sequence is not callable' # We can - finally - create the function groupfunc = _buildgroupfunction(item) resultfuncs.append(groupfunc) # resultfuncs are now the functions we need to deal with. # Each function in resultfuncs should run in a separate process queuesize = ('queuesize' in options and options['queuesize']) or 0 batchsize = ('batchsize' in options and options['batchsize']) or 25 if batchsize < 1: batchsize = 25 queues = [multiprocessing.JoinableQueue(queuesize) for f in resultfuncs] queues.append(multiprocessing.JoinableQueue(queuesize)) # for the results closed = [multiprocessing.Value('b', 0) for q in queues] # in shared mem for i in range(len(resultfuncs)): p = multiprocessing.Process(target=_flowprocess, \ args=(resultfuncs[i], \ queues[i], queues[i+1], \ closed[i], closed[i+1])) p.start() # Now create and return the object which allows data to enter the flow return Flow(queues, closed, batchsize) ### Stuff for Decoupled objects class FutureResult(object): """Represent a value that may or may not be computed yet. FutureResults are created by Decoupled objects. """ def __init__(self, creator, id): """Arguments: - creator: a value that identifies the creator of the FutureResult. Use a primitive value. - id: a unique identifier for the FutureResult. """ self.__creator = creator self.__id = id @property def creator(self): return self.__creator @property def id(self): return self.__id def __setstate__(self, state): self.__creator = state[0] self.__id = state[1] def __getstate__(self): return (self.__creator, self.__id) # TODO: Add more documentation for developers. Users should use Decoupled # through its subclasses DecoupledDimension and DecoupledFactTable in # pygrametl.tables class Decoupled(object): __instances = [] def __init__(self, obj, returnvalues=True, consumes=(), directupdatepositions=(), batchsize=500, queuesize=200, autowrap=True): self.__instancenumber = len(Decoupled.__instances) self.__futurecnt = 0 Decoupled.__instances.append(self) self._obj = obj if hasattr(obj, '_decoupling') and callable(obj._decoupling): obj._decoupling() self.batchsize = batchsize self.__batch = [] self.__results = {} self.autowrap = autowrap self.__toworker = multiprocessing.JoinableQueue(queuesize) if returnvalues: self.__fromworker = multiprocessing.JoinableQueue(queuesize) else: self.__fromworker = None self.__otherqueues = dict([(dcpld.__instancenumber, dcpld.__fromworker)\ for dcpld in consumes]) self.__otherresults = {} # Will store dicts - see also __decoupledworker self.__directupdates = directupdatepositions self.__worker = multiprocessing.Process(target=self.__decoupledworker) self.__worker.daemon = True self.__worker.name = 'Process for %s object for %s' % \ (self.__class__.__name__, getattr(obj, 'name', 'an unnamed object')) self.__worker.start() ### Stuff for the forked process def __getresultfromother(self, queuenumber, id): while True: if id in self.__otherresults[queuenumber]: return self.__otherresults[queuenumber].pop(id) # else wait for more results to become available self.__otherresults[queuenumber].update( self.__otherqueues[queuenumber].get()) def __replacefuturesindict(self, dct): res = {} for (k, v) in dct.items(): if isinstance(v, FutureResult) and v.creator in self.__otherqueues: res[k] = self.__getresultfromother(v.creator, v.id) elif isinstance(v, list): res[k] = self.__replacefuturesinlist(v) elif isinstance(v, tuple): res[k] = self.__replacefuturesintuple(v) elif isinstance(v, dict): res[k] = self.__replacefuturesindict(v) else: res[k] = v return res def __replacefuturesinlist(self, lst): res = [] for e in lst: if isinstance(e, FutureResult) and e.creator in self.__otherqueues: res.append(self.__getresultfromother(e.creator, e.id)) elif isinstance(e, list): res.append(self.__replacefuturesinlist(e)) elif isinstance(e, tuple): res.append(self.__replacefuturesintuple(e)) elif isinstance(e, dict): res.append(self.__replacefuturesindict(e)) else: res.append(e) return res def __replacefuturesintuple(self, tpl): return tuple(self.__replacefuturesinlist(tpl)) def __replacefuturesdirectly(self, args): for pos in self.__directupdates: if len(pos) == 2: x, y = pos fut = args[x][y] args[x][y] = self.__getresultfromother(fut.creator, fut.id) elif len(pos) == 3: x, y, z = pos fut = args[x][y][z] args[x][y][z] = self.__getresultfromother(fut.creator, fut.id) else: raise ValueError, 'Positions must be of length 2 or 3' def __decoupledworker(self): sys.excepthook = _getexcepthook() if hasattr(self._obj, '_decoupled') and callable(self._obj._decoupled): self._obj._decoupled() for (creatorid, queue) in self.__otherqueues.items(): self.__otherresults[creatorid] = {} while True: batch = self.__toworker.get() resbatch = [] for [id, funcname, args] in batch: if self.__otherqueues and args: if self.__directupdates: try: self.__replacefuturesdirectly(args) except KeyError: args = self.__replacefuturesintuple(args) except IndexError: args = self.__replacefuturesintuple(args) else: args = self.__replacefuturesintuple(args) func = getattr(self._obj, funcname) res = func(*args) # NB: func's side-effects on args are ignored if id is not None: resbatch.append((id, res)) if self.__fromworker and resbatch: self.__fromworker.put(resbatch) self.__toworker.task_done() ### Stuff for the parent process def __getattr__(self, name): res = getattr(self._obj, name) if callable(res) and self.autowrap: def wrapperfunc(*args): return self._enqueue(name, *args) res = wrapperfunc setattr(self, name, res) # NB: Values are only read once... return res def _enqueue(self, funcname, *args): future = FutureResult(self.__instancenumber, self.__futurecnt) self.__futurecnt += 1 self.__batch.append([future.id, funcname, args]) if len(self.__batch) >= self.batchsize: self._endbatch() return future def _enqueuenoreturn(self, funcname, *args): self.__batch.append([None, funcname, args]) if len(self.__batch) >= self.batchsize: self._endbatch() return None def _getresult(self, future): if self.__fromworker is None: raise RuntimeError, "Return values are not kept" if future.creator != self.__instancenumber: raise ValueError, "Cannot return results from other instances" # else find and return the result while True: if future.id in self.__results: return self.__results.pop(future.id) # else wait for results to become available self.__results.update(self.__fromworker.get()) def _endbatch(self): if self.__batch: self.__toworker.put(self.__batch) self.__batch = [] def _join(self): self._endbatch() self.__toworker.join() # SharedConnectionWrapper stuff class SharedConnectionWrapperClient(object): """Provide access to a shared ConnectionWrapper. Users should not create a SharedConnectionWrapperClient directly, but instead use shareconnectionwrapper to do this. Each process should get its own SharedConnectionWrapper by calling the copy()/new() method. """ def __init__(self, toserver, fromserver, freelines, connectionmodule, userfuncnames=()): self.nametranslator = lambda s: s self.__clientid = None self.__toserver = toserver self.__fromserver = fromserver self.__freelines = freelines self.__connectionmodule = connectionmodule self.__userfuncnames = userfuncnames if pygrametl._defaulttargetconnection is None: pygrametl._defaulttargetconnection = self def __getstate__(self): res = self.__dict__.copy() res['_SharedConnectionWrapperClient__clientid'] = None return res def __setstate__(self, state): self.__dict__.update(state) self.__createalluserfuncs() # A new self exists now def __del__(self): if self.__clientid is not None: self.__freelines.put(self.__clientid) def __connecttoSCWserver(self): self.__clientid = self.__freelines.get() def __enqueue(self, method, *args): if self.__clientid is None: self.__connecttoSCWserver() self.__toserver.put((self.__clientid, method, args)) def __getrows(self, amount): # TODO:Should exceptions be transferred to the client and received here? self.__enqueue('#get', amount) return self.__fromserver[self.__clientid].get() def __join(self): self.__toserver.join() def __createalluserfuncs(self): for funcname in self.__userfuncnames: setattr(self, funcname, self.__createuserfunc(funcname)) def __createuserfunc(self, funcname): def userfunction(*args): self.__enqueue('_userfunc_' + funcname, *args) # Wait for the userfunc to finish... res = self.__fromserver[self.__clientid].get() # OK after __enqueue assert res == 'USERFUNC' return userfunction def copy(self): """ Create a new copy of the SharedConnectionWrapper (same as new) """ return copy.copy(self) def new(self): """ Create a new copy of the SharedConnectionWrapper (same as copy) """ return self.copy() def execute(self, stmt, arguments=None, namemapping=None, translate=True): """Execute a statement. Arguments: - stmt: the statement to execute - arguments: a mapping with the arguments (default: None) - namemapping: a mapping of names such that if stmt uses %(arg)s and namemapping[arg]=arg2, the value arguments[arg2] is used instead of arguments[arg] - translate: decides if translation from 'pyformat' to the undlying connection's format should take place. Default: True """ if namemapping and arguments: arguments = pygrametl.copy(arguments, **namemapping) elif arguments: arguments = arguments.copy() self.__enqueue('execute', stmt, arguments, None, translate) def executemany(self, stmt, params, translate=True): """Execute a sequence of statements.""" self.__enqueue('executemany', stmt, params, translate) def rowfactory(self, names=None): """Return a generator object returning result rows (i.e. dicts).""" (srvnames, rows) = self.__getrows(0) if names is None: names = srvnames for r in rows: yield dict(zip(names, r)) def fetchone(self, names=None): """Return one result row (i.e. dict).""" (rownames, row) = self.__getrows(1) return dict(zip(names or rownames, row)) def fetchonetuple(self): """Return one result tuple.""" (rownames, row) = self.__getrows(1) return row def fetchmanytuples(self, cnt): """Return cnt result tuples.""" (rownames, rows) = self.__getrows(cnt) return rows def fetchalltuples(self): """Return all result tuples""" (rownames, rows) = self.__getrows(0) return rows def rowcount(self): """Not supported. Returns -1.""" return -1 def getunderlyingmodule(self): """Return a reference to the underlying connection's module.""" return self.__connectionmodule def commit(self): """Commit the transaction.""" pygrametl.endload() self.__enqueue('commit') self.__join() def close(self): """Close the connection to the database,""" self.__enqueue('close') def rollback(self): """Rollback the transaction.""" self.__enqueue('rollback') self.__join() def setasdefault(self): """Set this ConnectionWrapper as the default connection.""" pygrametl._defaulttargetconnection = self def cursor(self): """Return a cursor object. Optional method.""" raise NotImplementedError def resultnames(self): (rownames, nothing) = self.__getrows(None) return rownames ### class SharedConnectionWrapperServer(object): """Manage access to a shared ConnectionWrapper. Users should not create a SharedConnectionWrapperServer directly, but instead use shareconnectionwrapper to do this. """ def __init__(self, wrapped, toserver, toclients): self.__toserver = toserver self.__toclients = toclients self.__wrapped = wrapped self.__results = [(None, None) for q in toclients] #as (names, [tuples]) def __senddata(self, client, amount=0): # Returns (column names, rows) # amount: None: No rows are returned - instead an empty list is sent # 0: all rows in a list, # 1: a single row (NOT in a list), # other positive numbers: max. that number of rows in a list. (names, data) = self.__results[client] if amount is None: rows = [] elif amount == 1 and data: rows = data.pop(0) elif amount > 0 and data: rows = data[0:amount] del data[0:amount] else: rows = data[:] del data[:] self.__toclients[client].put((names, rows)) def worker(self): sys.excepthook = _getexcepthook() # TODO: Improved error handling such that an exception can be passed on # to the responsible client. It is, however, likely that we cannot # continue using the shared DB connection after the exception occured... while True: (client, method, args) = self.__toserver.get() if method == '#get': self.__senddata(client, *args) elif method.startswith('_userfunc_'): target = getattr(self, method) target(*args) self.__toclients[client].put('USERFUNC') else: # it must be a function from the wrapped ConnectionWrapper target = getattr(self.__wrapped, method) target(*args) res = self.__wrapped.fetchalltuples() if not type(res) == list: # In __senddata we pop/del from a list so a tuple won't work res = list(res) self.__results[client] = (self.__wrapped.resultnames(), res) self.__toserver.task_done() def shareconnectionwrapper(targetconnection, maxclients=10, userfuncs=()): """Share a ConnectionWrapper between several processes/threads. When Decoupled objects are used, they can try to update the DW at the same time. They can use several ConnectionWrappers to avoid race conditions, but this is not transactionally safe. Instead, they can use a "shared" ConnectionWrapper obtained through this function. When a ConnectionWrapper is shared, it is executing in a separate process (or thread, in case Jython is used) and ensuring that only one operation takes place at the time. This is hidden from the users of the shared ConnectionWrapper. They see an interface similar to the normal ConnectionWrapper. When this method is called, it returns a SharedConnectionWrapperClient which can be used as a normal ConnectionWrapper. Each process (i.e., each Decoupled object) should, however, get a unique SharedConnectionWrapperClient by calling copy() on the returned SharedConnectionWrapperClient. Note that a shared ConnectionWrapper needs to hold the complete result of each query in memory until it is fetched by the process that executed the query. Again, this is hidden from the users. It is also possible to add methods to a shared ConnectionWrapper when it is created. When this is done and the method is invoked, no other operation will modify the DW at the same time. If, for example, the functions foo and bar are added to a shared ConnectionWrapper (by passing the argument userfuncs=(foo, bar) to shareconnectionwrapper), the returned SharedConnectionWrapperClient will offer the methods foo and bar which when called will be running in the separate process for the shared ConnectionWrapper. This is particularly useful for user-defined bulk loaders as used by BulkFactTable: def bulkload(): # DBMS-specific code here. # No other DW operation should take place concurrently scw = shareconnectionwrapper(ConnectionWrapper(...), userfuncs=(bulkload,)) facttbl = BulkFact(..., bulkloader=scw.copy().bulkload) #Note the .copy(). Arguments: - targetconnection: a pygrametl ConnectionWrapper - maxclients: the maximum number of concurrent clients. Default: 10 - userfuncs: a sequence of functions to add to the shared ConnectionWrapper. Default: () """ toserver = multiprocessing.JoinableQueue(5000) toclients = [multiprocessing.Queue() for i in range(maxclients)] freelines = multiprocessing.Queue() for i in range(maxclients): freelines.put(i) serverCW = SharedConnectionWrapperServer(targetconnection, toserver, toclients) userfuncnames = [] for func in userfuncs: if not (callable(func) and hasattr(func, 'func_name') and \ not func.func_name == '<lambda>'): raise ValueError, "Elements in userfunc must be callable and named" if hasattr(SharedConnectionWrapperClient, func.func_name): raise ValueError, "Illegal function name: " + func.func_name setattr(serverCW, '_userfunc_' + func.func_name, func) userfuncnames.append(func.func_name) serverprocess = multiprocessing.Process(target=serverCW.worker) serverprocess.name = 'Process for shared connection wrapper' serverprocess.daemon = True serverprocess.start() module = targetconnection.getunderlyingmodule() clientCW = SharedConnectionWrapperClient(toserver, toclients, freelines, module, userfuncnames) return clientCW # Shared sequences def getsharedsequencefactory(startvalue, intervallen=5000): """ Creates a factory for parallel readers of a sequence. Returns a callable f. When f() is called, it returns a callable g. Whenever g(*args) is called, it returns a unique int from a sequence (if several g's are created, the order of the calls may lead to that the returned ints are not ordered, but they will be unique). The arguments to g are ignored, but accepted. Thus g can be used as idfinder for [Decoupled]Dimensions. The different g's can be used safely from different processes and threads. Arguments: - startvalue: The first value to return. If None, 0 is assumed. - intervallen: The amount of numbers that a single g from above can return before synchronization is needed to get a new amount. Default: 5000. """ if startvalue is None: startvalue = 0 # We use a Queue to ensure that intervals are only given to one deliverer values = multiprocessing.Queue(10) # A worker that fills the queue def valuegenerator(nextval): sys.excepthook = _getexcepthook() while True: values.put((nextval, nextval + intervallen)) nextval += intervallen p = multiprocessing.Process(target=valuegenerator, args=(startvalue,)) p.daemon = True p.start() # A generator that repeatedly gets an interval from the queue and returns # all numbers in that interval before it gets a new interval and goes on ... def valuedeliverer(): while True: interval = values.get() for i in range(*interval): yield i # A factory method for the object the end-consumer calls def factory(): generator = valuedeliverer() # get a unique generator # The method called (i.e., the g) by the end-consumer def getnextseqval(*ignored): return generator.next() return getnextseqval return factory
# Copyright 2014 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import hashlib import urllib2 from lxml import etree class HPMSAConnectionError(Exception): pass class HPMSAAuthenticationError(Exception): pass class HPMSARequestError(Exception): pass class HPMSAClient(object): def __init__(self, host, login, password, protocol='http'): self._login = login self._password = password self._base_url = "%s://%s/api" % (protocol, host) self._session_key = None def _get_auth_token(self, xml): """Parse an XML authentication reply to extract the session key.""" self._session_key = None obj = etree.XML(xml).find("OBJECT") for prop in obj.iter("PROPERTY"): if prop.get("name") == "response": self._session_key = prop.text break def login(self): """Authenticates the service on the device.""" hash = hashlib.md5("%s_%s" % (self._login, self._password)) digest = hash.hexdigest() url = self._base_url + "/login/" + digest try: xml = urllib2.urlopen(url).read() except urllib2.URLError: raise HPMSAConnectionError() self._get_auth_token(xml) if self._session_key is None: raise HPMSAAuthenticationError() def _assert_response_ok(self, tree): """Parses the XML returned by the device to check the return code. Raises a HPMSARequestError error if the return code is not 0. """ for obj in tree.iter(): if obj.get("basetype") != "status": continue ret_code = ret_str = None for prop in obj.iter("PROPERTY"): if prop.get("name") == "return-code": ret_code = prop.text elif prop.get("name") == "response": ret_str = prop.text if ret_code != "0": raise HPMSARequestError(ret_str) else: return raise HPMSARequestError("No status found") def _build_request_url(self, path, args=None, **kargs): url = self._base_url + path if kargs: url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v) for (k, v) in kargs.items()]) if args: if not isinstance(args, list): args = [args] url += '/' + '/'.join(args) return url def _request(self, path, args=None, **kargs): """Performs an HTTP request on the device. Raises a HPMSARequestError if the device returned but the status is not 0. The device error message will be used in the exception message. If the status is OK, returns the XML data for further processing. """ url = self._build_request_url(path, args, **kargs) headers = {'dataType': 'api', 'sessionKey': self._session_key} req = urllib2.Request(url, headers=headers) try: xml = urllib2.urlopen(req).read() except urllib2.URLError: raise HPMSAConnectionError() try: tree = etree.XML(xml) except etree.LxmlError: raise HPMSAConnectionError() self._assert_response_ok(tree) return tree def logout(self): url = self._base_url + '/exit' try: urllib2.urlopen(url) return True except HPMSARequestError: return False def create_volume(self, vdisk, name, size): # NOTE: size is in this format: [0-9]+GB self._request("/create/volume", name, vdisk=vdisk, size=size) return None def delete_volume(self, name): self._request("/delete/volumes", name) def extend_volume(self, name, added_size): self._request("/expand/volume", name, size=added_size) def create_snapshot(self, volume_name, snap_name): self._request("/create/snapshots", snap_name, volumes=volume_name) def delete_snapshot(self, snap_name): self._request("/delete/snapshot", ["cleanup", snap_name]) def vdisk_exists(self, vdisk): try: self._request("/show/vdisks", vdisk) return True except HPMSARequestError: return False def vdisk_stats(self, vdisk): stats = {'free_capacity_gb': 0, 'total_capacity_gb': 0} tree = self._request("/show/vdisks", vdisk) for obj in tree.iter(): if obj.get("basetype") != "virtual-disks": continue for prop in obj.iter("PROPERTY"): # the sizes are given in number of blocks of 512 octets if prop.get("name") == "size-numeric": stats['total_capacity_gb'] = \ int(prop.text) * 512 / (10 ** 9) elif prop.get("name") == "freespace-numeric": stats['free_capacity_gb'] = \ int(prop.text) * 512 / (10 ** 9) return stats def _get_first_available_lun_for_host(self, host): luns = [] tree = self._request("/show/host-maps", host) for obj in tree.iter(): if obj.get("basetype") != "host-view-mappings": continue for prop in obj.iter("PROPERTY"): if prop.get("name") == "lun": luns.append(int(prop.text)) lun = 1 while True: if lun not in luns: return lun lun += 1 def map_volume(self, volume_name, wwpns): # NOTE(gpocentek): we assume that luns will be the same for all hosts lun = self._get_first_available_lun_for_host(wwpns[0]) hosts = ",".join(wwpns) self._request("/map/volume", volume_name, lun=str(lun), host=hosts, access="rw") return lun def unmap_volume(self, volume_name, wwpns): hosts = ",".join(wwpns) self._request("/unmap/volume", volume_name, host=hosts) def get_active_target_ports(self): ports = [] tree = self._request("/show/ports") for obj in tree.iter(): if obj.get("basetype") != "port": continue port = {} for prop in obj.iter("PROPERTY"): prop_name = prop.get("name") if prop_name in ["port-type", "target-id", "status"]: port[prop_name] = prop.text if port['status'] != 'Up': continue ports.append(port) return ports def get_active_fc_target_ports(self): ports = [] for port in self.get_active_target_ports(): if port['port-type'] == "FC": ports.append(port['target-id']) return ports def copy_volume(self, source_name, target_name, vdisk): self._request("/volumecopy", target_name, dest_vdisk=vdisk, source_volume=source_name, prompt='yes')
#(C) Copyright Syd Logan 2017-2020 #(C) Copyright Thousand Smiles Foundation 2017-2020 # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. # #You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. ''' unit tests for surgery history application. Assumes django server is up and running on the specified host and port ''' import unittest import getopt, sys import json from tschartslib.service.serviceapi import ServiceAPI from tschartslib.tscharts.tscharts import Login, Logout from tschartslib.patient.patient import CreatePatient, DeletePatient, GetPatient from tschartslib.surgerytype.surgerytype import CreateSurgeryType, DeleteSurgeryType, GetSurgeryType class CreateSurgeryHistory(ServiceAPI): def __init__(self, host, port, token): super(CreateSurgeryHistory, self).__init__() self.setHttpMethod("POST") self.setHost(host) self.setPort(port) self.setToken(token) self.setURL("tscharts/v1/surgeryhistory/") def setSurgeryHistory(self, history): for k, v in history.iteritems(): self._payload[k] = v self.setPayload(self._payload) class GetSurgeryHistory(ServiceAPI): def makeURL(self): hasQArgs = False if not self._id == None: base = "tscharts/v1/surgeryhistory/{}/".format(self._id) else: base = "tscharts/v1/surgeryhistory/" if not self._patientid == None: if not hasQArgs: base += "?" else: base += "&" base += "patient={}".format(self._patientid) hasQArgs = True if not self._surgeryid == None: if not hasQArgs: base += "?" else: base += "&" base += "surgery={}".format(self._surgeryid) hasQArgs = True self.setURL(base) def __init__(self, host, port, token): super(GetSurgeryHistory, self).__init__() self.setHttpMethod("GET") self.setHost(host) self.setPort(port) self.setToken(token) self._patientid = None self._surgeryid = None self._id = None self.makeURL() def setId(self, id): self._id = id; self.makeURL() def setPatient(self, patient): self._patientid = patient self.makeURL() def setSurgery(self, surgery): self._surgeryid = surgery self.makeURL() class UpdateSurgeryHistory(ServiceAPI): def __init__(self, host, port, token, id): super(UpdateSurgeryHistory, self).__init__() self.setHttpMethod("PUT") self.setHost(host) self.setPort(port) self.setToken(token) self._payload = {} self.setPayload(self._payload) #patientid is fixed self.setURL("tscharts/v1/surgeryhistory/{}/".format(id)) def setSurgeryHistory(self, history): #history might include: surgeryid, year, month, location, anesthesia problem(T/F), bleeding problem(T/F). for k, v in history.iteritems(): self._payload[k] = v self.setPayload(self._payload) class DeleteSurgeryHistory(ServiceAPI): def __init__(self, host, port, token, id): super(DeleteSurgeryHistory, self).__init__() self.setHttpMethod("DELETE") self.setHost(host) self.setPort(port) self.setToken(token) self.setURL("tscharts/v1/surgeryhistory/{}/".format(id)) class TestTSSurgeryHistory(unittest.TestCase): def setUp(self): login = Login(host, port, username, password) ret = login.send(timeout=30) self.assertEqual(ret[0], 200) self.assertTrue("token" in ret[1]) global token token = ret[1]["token"] def testCreateSurgeryHistory(self): data = {} data["paternal_last"] = "abcd1234" data["maternal_last"] = "yyyyyy" data["first"] = "zzzzzzz" data["middle"] = "" data["suffix"] = "Jr." data["prefix"] = "" data["dob"] = "04/01/1962" data["gender"] = "Female" data["street1"] = "1234 First Ave" data["street2"] = "" data["city"] = "Ensenada" data["colonia"] = "" data["state"] = u"Baja California" data["phone1"] = "1-111-111-1111" data["phone2"] = "" data["email"] = "patient@example.com" data["emergencyfullname"] = "Maria Sanchez" data["emergencyphone"] = "1-222-222-2222" data["emergencyemail"] = "maria.sanchez@example.com" x = CreatePatient(host, port, token, data) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) patientid = int(ret[1]["id"]) data = {} data["name"] = "Surgery1" x = CreateSurgeryType(host, port, token, data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) surgeryid = int(ret[1]["id"]) x = CreateSurgeryHistory(host, port, token) data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) x = GetSurgeryHistory(host, port, token) x.setId(id) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) self.assertTrue("patient" in ret[1]) patientId = int(ret[1]["patient"]) self.assertTrue(patientId == patientid) data = ret[1] self.assertTrue("surgery" in data) self.assertTrue("surgeryyear" in data) self.assertTrue("surgerymonth" in data) self.assertTrue("surgerylocation" in data) self.assertTrue("anesthesia_problems" in data) self.assertTrue("bleeding_problems" in data) self.assertTrue(data["surgery"] == surgeryid) self.assertTrue(data["surgeryyear"] == 1999) self.assertTrue(data["surgerymonth"] == 12) self.assertTrue(data["surgerylocation"] == "Place1") self.assertTrue(data["anesthesia_problems"] == True) self.assertTrue(data["bleeding_problems"] == True) x = GetSurgeryHistory(host, port, token) x.setPatient(patientid) x.setSurgery(surgeryid) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) self.assertTrue("surgery" in ret[1][0]) self.assertTrue("patient" in ret[1][0]) patientId = int(ret[1][0]["patient"]) self.assertTrue(patientId == patientid) data = ret[1][0] self.assertTrue("surgery" in data) self.assertTrue("surgeryyear" in data) self.assertTrue("surgerymonth" in data) self.assertTrue("surgerylocation" in data) self.assertTrue("anesthesia_problems" in data) self.assertTrue("bleeding_problems" in data) self.assertTrue(data["surgery"] == surgeryid) self.assertTrue(data["surgeryyear"] == 1999) self.assertTrue(data["surgerymonth"] == 12) self.assertTrue(data["surgerylocation"] == "Place1") self.assertTrue(data["anesthesia_problems"] == True) self.assertTrue(data["bleeding_problems"] == True) x = DeleteSurgeryHistory(host, port, token, id) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) x = GetSurgeryHistory(host, port, token) x.setId(id) ret = x.send(timeout=30) self.assertEqual(ret[0], 404) #non-exist patient data = {} data["patient"] = 9999 data["surgery"] = surgeryid data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 404) #non-exist surgery data = {} data["patient"] = patientid data["surgery"] = 9999 data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 404) #invalid paramter name data = {} data["bc"] = 123 data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 400) #no data x = CreateSurgeryHistory(host, port, token) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) #invalid data boolean argu data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = 123 data["bleeding_problems"] = 1234 x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 400) #invalid surgeryyear data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 1952 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 400) #invalid surgeryyear data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 2050 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 400) #invalid surgerymonth data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 2000 data["surgerymonth"] = 15 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 400) #invalid surgerymonth data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 2000 data["surgerymonth"] = 0 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 400) #invalid surgerylocation data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 2000 data["surgerymonth"] = 10 data["surgerylocation"] = "" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout=30) self.assertEqual(ret[0], 400) x = DeletePatient(host, port, token, patientid) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) x = DeleteSurgeryType(host, port, token, surgeryid) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) def testDeleteSurgeryHistory(self): data = {} data["paternal_last"] = "abcd1234" data["maternal_last"] = "yyyyyy" data["first"] = "zzzzzzz" data["middle"] = "" data["suffix"] = "Jr." data["prefix"] = "" data["dob"] = "04/01/1962" data["gender"] = "Female" data["street1"] = "1234 First Ave" data["street2"] = "" data["city"] = "Ensenada" data["colonia"] = "" data["state"] = u"Baja California" data["phone1"] = "1-111-111-1111" data["phone2"] = "" data["email"] = "patient@example.com" data["emergencyfullname"] = "Maria Sanchez" data["emergencyphone"] = "1-222-222-2222" data["emergencyemail"] = "maria.sanchez@example.com" x = CreatePatient(host, port, token, data) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) patientid = int(ret[1]["id"]) data = {} data["name"] = "Surgery1" x = CreateSurgeryType(host, port, token, data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) surgeryid = int(ret[1]["id"]) x = CreateSurgeryHistory(host, port, token) data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) x = DeleteSurgeryHistory(host, port, token, id) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) x = GetSurgeryHistory(host, port, token) x.setId(id) ret = x.send(timeout=30) self.assertEqual(ret[0], 404) # not found x = DeleteSurgeryHistory(host, port, token, 9999) ret = x.send(timeout=30) self.assertEqual(ret[0], 404) x = DeleteSurgeryHistory(host, port, token, None) ret = x.send(timeout=30) self.assertEqual(ret[0], 404) x = DeleteSurgeryHistory(host, port, token, "") ret = x.send(timeout=30) self.assertEqual(ret[0], 400) x = DeleteSurgeryHistory(host, port, token, "Hello") ret = x.send(timeout=30) self.assertEqual(ret[0], 404) x = DeletePatient(host, port, token, patientid) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) x = DeleteSurgeryType(host, port, token, surgeryid) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) def testUpdateSurgeryHistory(self): data = {} data["paternal_last"] = "abcd1234" data["maternal_last"] = "yyyyyy" data["first"] = "zzzzzzz" data["middle"] = "" data["suffix"] = "Jr." data["prefix"] = "" data["dob"] = "04/01/1962" data["gender"] = "Female" data["street1"] = "1234 First Ave" data["street2"] = "" data["city"] = "Ensenada" data["colonia"] = "" data["state"] = u"Baja California" data["phone1"] = "1-111-111-1111" data["phone2"] = "" data["email"] = "patient@example.com" data["emergencyfullname"] = "Maria Sanchez" data["emergencyphone"] = "1-222-222-2222" data["emergencyemail"] = "maria.sanchez@example.com" x = CreatePatient(host, port, token, data) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) patientid = int(ret[1]["id"]) data = {} data["name"] = "Surgery1" x = CreateSurgeryType(host, port, token, data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) surgeryid = int(ret[1]["id"]) x = CreateSurgeryHistory(host, port, token) data = {} data["patient"] = patientid data["surgery"] = surgeryid data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) x = GetSurgeryHistory(host, port, token) x.setId(id) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) self.assertTrue("patient" in ret[1]) patientId = int(ret[1]["patient"]) self.assertTrue(patientId == patientid) self.assertTrue("surgery" in ret[1]) surgeryId = ret[1]["surgery"] self.assertTrue(surgeryid == surgeryId) data = {} data["surgeryyear"] = 2000 data["surgerymonth"] = 11 x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) x = GetSurgeryHistory(host, port, token) x.setId(id) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) self.assertTrue("patient" in ret[1]) patientId = int(ret[1]["patient"]) self.assertTrue(patientid == patientId) data = ret[1] self.assertTrue("surgery" in data) self.assertTrue("surgeryyear" in data) self.assertTrue("surgerymonth" in data) self.assertTrue("surgerylocation" in data) self.assertTrue("anesthesia_problems" in data) self.assertTrue("bleeding_problems" in data) self.assertTrue(data["surgery"] == surgeryid) self.assertTrue(data["surgeryyear"] == 2000) self.assertTrue(data["surgerymonth"] == 11) self.assertTrue(data["surgerylocation"] == "Place1") self.assertTrue(data["anesthesia_problems"] == True) self.assertTrue(data["bleeding_problems"] == True) data = {} data["surgerylocation"] = "Place2" data["anesthesia_problems"] = False data["bleeding_problems"] = False x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) x = GetSurgeryHistory(host, port, token) x.setId(id) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) self.assertTrue("patient" in ret[1]) patientId = int(ret[1]["patient"]) self.assertTrue(patientid == patientId) data = ret[1] self.assertTrue("surgery" in data) self.assertTrue("surgeryyear" in data) self.assertTrue("surgerymonth" in data) self.assertTrue("surgerylocation" in data) self.assertTrue("anesthesia_problems" in data) self.assertTrue("bleeding_problems" in data) self.assertTrue(data["surgery"] == surgeryid) self.assertTrue(data["surgeryyear"] == 2000) self.assertTrue(data["surgerymonth"] == 11) self.assertTrue(data["surgerylocation"] == "Place2") self.assertTrue(data["anesthesia_problems"] == False) self.assertTrue(data["bleeding_problems"] == False) data = {} data["anesthesia_problems"] = "hello" x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) data = {} data["surgery"] = None x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) data = {} data["surgeryyear"] = 1900 x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) data = {} data["surgeryyear"] = 2500 x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) data = {} data["surgerymonth"] = 24 x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) data = {} data["surgery"] = 9999 x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) data = {} data["abc"] = 1234 x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 400) data = {} #update nothing is fine. x = UpdateSurgeryHistory(host, port, token, id) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) x = DeleteSurgeryHistory(host, port, token, id) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) x = DeletePatient(host, port, token, patientid) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) x = DeleteSurgeryType(host, port, token, surgeryid) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) def testGetAllSurgeryHistories(self): data = {} data["name"] = "Surgery1" x = CreateSurgeryType(host, port, token, data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) surgeryid1 = int(ret[1]["id"]) data = {} data["name"] = "Surgery2" x = CreateSurgeryType(host, port, token, data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) surgeryid2 = int(ret[1]["id"]) data = {} data["name"] = "Surgery3" x = CreateSurgeryType(host, port, token, data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) surgeryid3 = int(ret[1]["id"]) data = {} data["paternal_last"] = "3abcd1234" data["maternal_last"] = "yyyyyy" data["first"] = "zzzzzzz" data["middle"] = "" data["suffix"] = "Jr." data["prefix"] = "" data["dob"] = "04/01/1962" data["gender"] = "Female" data["street1"] = "1234 First Ave" data["street2"] = "" data["city"] = "Ensenada" data["colonia"] = "" data["state"] = u"Baja California" data["phone1"] = "1-111-111-1111" data["phone2"] = "" data["email"] = "patient@example.com" data["emergencyfullname"] = "Maria Sanchez" data["emergencyphone"] = "1-222-222-2222" data["emergencyemail"] = "maria.sanchez@example.com" x = CreatePatient(host, port, token, data) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) patientid1 = int(ret[1]["id"]) data = {} data["paternal_last"] = "1abcd1234" data["maternal_last"] = "yyyyyy" data["first"] = "zzzzzzz" data["middle"] = "" data["suffix"] = "Jr." data["prefix"] = "" data["dob"] = "04/01/1962" data["gender"] = "Female" data["street1"] = "1234 First Ave" data["street2"] = "" data["city"] = "Ensenada" data["colonia"] = "" data["state"] = u"Baja California" data["phone1"] = "1-111-111-1111" data["phone2"] = "" data["email"] = "patient@example.com" data["emergencyfullname"] = "Maria Sanchez" data["emergencyphone"] = "1-222-222-2222" data["emergencyemail"] = "maria.sanchez@example.com" x = CreatePatient(host, port, token, data) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) patientid2 = int(ret[1]["id"]) data = {} data["paternal_last"] = "2abcd1234" data["maternal_last"] = "yyyyyy" data["first"] = "zzzzzzz" data["middle"] = "" data["suffix"] = "Jr." data["prefix"] = "" data["dob"] = "04/01/1962" data["gender"] = "Female" data["street1"] = "1234 First Ave" data["street2"] = "" data["city"] = "Ensenada" data["colonia"] = "" data["state"] = u"Baja California" data["phone1"] = "1-111-111-1111" data["phone2"] = "" data["email"] = "patient@example.com" data["emergencyfullname"] = "Maria Sanchez" data["emergencyphone"] = "1-222-222-2222" data["emergencyemail"] = "maria.sanchez@example.com" x = CreatePatient(host, port, token, data) ret = x.send(timeout=30) self.assertEqual(ret[0], 200) patientid3 = int(ret[1]["id"]) idlist = [] data = {} data["patient"] = patientid1 data["surgery"] = surgeryid1 data["surgeryyear"] = 1998 data["surgerymonth"] = 11 data["surgerylocation"] = "Place1" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid2 data["surgery"] = surgeryid1 data["surgeryyear"] = 1999 data["surgerymonth"] = 12 data["surgerylocation"] = "Place2" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid3 data["surgery"] = surgeryid1 data["surgeryyear"] = 2003 data["surgerymonth"] = 9 data["surgerylocation"] = "Place3" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid1 data["surgery"] = surgeryid2 data["surgeryyear"] = 2005 data["surgerymonth"] = 10 data["surgerylocation"] = "Place4" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid2 data["surgery"] = surgeryid2 data["surgeryyear"] = 2005 data["surgerymonth"] = 9 data["surgerylocation"] = "Place5" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid3 data["surgery"] = surgeryid2 data["surgeryyear"] = 2005 data["surgerymonth"] = 10 data["surgerylocation"] = "Place6" data["anesthesia_problems"] = False data["bleeding_problems"] = False x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid1 data["surgery"] = surgeryid3 data["surgeryyear"] = 2005 data["surgerymonth"] = 10 data["surgerylocation"] = "Place7" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid2 data["surgery"] = surgeryid3 data["surgeryyear"] = 2005 data["surgerymonth"] = 9 data["surgerylocation"] = "Place8" data["anesthesia_problems"] = True data["bleeding_problems"] = True x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) data = {} data["patient"] = patientid3 data["surgery"] = surgeryid3 data["surgeryyear"] = 2005 data["surgerymonth"] = 10 data["surgerylocation"] = "Place9" data["anesthesia_problems"] = False data["bleeding_problems"] = False x = CreateSurgeryHistory(host, port, token) x.setSurgeryHistory(data) ret = x.send(timeout = 30) self.assertEqual(ret[0], 200) id = int(ret[1]["id"]) idlist.append(id) x = GetSurgeryHistory(host, port,token) x.setPatient(patientid1) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) rtcs = ret[1] self.assertTrue(len(rtcs) == 3) x = GetSurgeryHistory(host, port,token) x.setPatient(patientid2) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) rtcs = ret[1] self.assertTrue(len(rtcs) == 3) x = GetSurgeryHistory(host, port,token) x.setPatient(patientid3) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) rtcs = ret[1] self.assertTrue(len(rtcs) == 3) x = GetSurgeryHistory(host, port, token) x.setSurgery(surgeryid1) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) rtcs = ret[1] self.assertTrue(len(rtcs) == 3) x = GetSurgeryHistory(host, port, token) x.setSurgery(surgeryid2) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) rtcs = ret[1] self.assertTrue(len(rtcs) == 3) x = GetSurgeryHistory(host, port, token) x.setSurgery(surgeryid3) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) rtcs = ret[1] self.assertTrue(len(rtcs) == 3) for x in idlist: y = DeleteSurgeryHistory(host, port, token, x) ret = y.send(timeout = 30) self.assertEqual(ret[0], 200) for x in idlist: y = GetSurgeryHistory(host, port, token) y.setId(x) ret = y.send(timeout=30) self.assertEqual(ret[0], 404) x = DeletePatient(host, port, token, patientid1) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) x = DeletePatient(host, port, token, patientid2) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) x = DeletePatient(host, port, token, patientid3) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) x = DeleteSurgeryType(host, port, token, surgeryid1) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) x = DeleteSurgeryType(host, port, token, surgeryid2) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) x = DeleteSurgeryType(host, port, token, surgeryid3) ret = x.send(timeout = 30) self.assertEqual(ret[0],200) def usage(): print("surgeryhistory [-h host] [-p port] [-u username] [-w password]") def main(): try: opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:") except getopt.GetoptError as err: print(str(err)) usage() sys.exit(2) global host host = "127.0.0.1" global port port = 8000 global username username = None global password password = None for o, a in opts: if o == "-h": host = a elif o == "-p": port = int(a) elif o == "-u": username = a elif o == "-w": password = a else: assert False, "unhandled option" unittest.main(argv=[sys.argv[0]]) if __name__ == "__main__": main()
import pyodbc import json import os ## TRANSCRIPT CLASS ## #Class to hold all information provided by database for audio synthesis #Holds functions to ensure validity of user input #Holds functions to communicate with the database and update information class Transcript(): ### CONSTANTS ### #Nmbers for the status of the transcript, starts at pending STATUS_PENDING = 1 STATUS_COMPLETE = 2 STATUS_ERROR = 3 #two available audio forms pre-conversion WAV_FORM = "audio/wav" OGG_FORM = "audio/ogg;codecs=opus" #Error codes, also specified in database.py ERR_TRANSCRIPT_TEXT = -1 #invalid text to be synthsized ERR_FILENAME = -2 #invalid filename ERR_AUDIO_FORMAT = -3 #invalid audio format type ERR_FILEPATH = -4 #invalid filepath ## CONSTRUCTOR ## #Initializes the transcript object using a list of elements #The list is the default form given by the pyodbc module #Creating object straight from list requires minor tweaks def __init__(self, dataList): #extracting json data using json module and edits the information #loads the information into the members, to be put into two variables jsonItem = json.loads(dataList[5]) filetype = (jsonItem["fileType"]) voice = (jsonItem["voiceID"]) #saves the information in the object self.identity = dataList[0] self.voiceTranscript = dataList[1] self.filename = dataList[2] self.vox_filepath = dataList[3] self.wav_filepath = "wavfiles" self.fileType = filetype self.voiceID = voice self.status = self.STATUS_PENDING self.errorCode = 0 #Null error on initialization ## GETTER FUNCTIONS ## #methods used to retrieve member variables without affecting them #method to get identity def getIdentity(self): return self.identity #method to get status and errorCode def getStatus(self): return self.status #method to get errorCode def getError(self): return self.errorCode #method to get voiceID from member variables def getVoice(self): return self.voiceID #method to get transcript def getTranscriptText(self): return self.voiceTranscript #method to get filepath def getVoxFilePath(self): return self.vox_filepath #method to get wav filepath def getWavFilePath(self): return self.wav_filepath def getFileName(self): return self.filename #method to get fileType #reformats fileType to fit necessities of watson api def getAccept(self): #adjusts the accept variable based on response if self.fileType == 'wav': accept = self.WAV_FORM elif self.fileType == 'ogg': accept = self.OGG_FORM elif self.fileType == 'vox': accept = self.WAV_FORM return accept def getVoxBool(self): if self.fileType == 'vox': return True else: return False ## MEMBER FUNCTIONS ## #CHECK fucntions used to create error codes and halt synthesis #to avoid critical failures #method to check a text phrase to synthesize voice #returns true or false to indicate validity of the phrase #Phrase must not be empty, otherwise anything can be synthesized def checkPhrase(self): #checks for empty input if self.voiceTranscript == '': self.status = self.STATUS_ERROR self.errorCode = self.ERR_TRANSCRIPT_TEXT return False else: return True #method to check validity of filename #filename must not contain colons or periods, this changes from OS to OS def checkFilename(self): for c in self.filename: if c == ':' or c == '.': self.status = self.STATUS_ERROR self.errorCode = self.ERR_FILENAME return False return True #method to check validity of format type #format must be one of the three available formats def checkFormat(self): #checks for 3 valid filetypes if (self.fileType != 'ogg' and self.fileType != 'wav' and self.fileType != 'vox'): self.status = self.STATUS_ERROR self.errorCode = self.ERR_AUDIO_FORMAT return False else: return True #method to check validity of filepath #filepath must exist in the system # (can be removed if you want the program to create its own directories) def checkVoxFilePath(self): #checks that path exists if not os.path.isdir(self.vox_filepath): self.status = self.STATUS_ERROR self.errorCode = self.ERR_FILEPATH return False else: return True #method that acts the same as above, but checks the wav file path def checkWavFilePath(self): if not os.path.isdir(self.vox_filepath): self.status = self.STATUS_ERROR self.errorCode = self.ERR_FILEPATH return False else: return True ## SETTER FUNCTIONS ## #methods to edit and update the transcript on the database #method that sets the status variable to a new variable def setStatus(self, newStatus): self.status = newStatus #method that sets the error code variable def setError(self, newError): self.errorCode = newError #method to update server information with the status of completion #if the process is pending, status = 1, errorCode = NULL #if the process is complete, status = 2, errorCode = NULL #if the process failed, status = 3, errorCode = some specfic error code def updateTranscriptData(self, DB_DRIVER, DB_HOST, DB_USER, DB_PASSWORD, DB_NAME): #string to connect to the server constr = "DRIVER=%s;SERVER=%s;UID=%s;PWD=%s;DATABASE=%s" % (DB_DRIVER, DB_HOST, DB_USER, DB_PASSWORD, DB_NAME) #creating a connection object through the pyodbc module conn = pyodbc.connect(constr) #cursor object for making changes or calling stored procedures crsr = conn.cursor() params = (self.identity, self.status, self.errorCode) exStr = "UpdateTextToSpeechStaging %s, %s, %s" % params #executes stored procedure to update staging crsr.execute(exStr) crsr.commit() #close the connection conn.close()
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for linear operators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import contextlib from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib.linalg.python.ops import linear_operator_util from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops __all__ = ["LinearOperator"] # TODO(langmore) Use matrix_solve_ls for singular or non-square matrices. class LinearOperator(object): """Base class defining a [batch of] linear operator[s]. Subclasses of `LinearOperator` provide a access to common methods on a (batch) matrix, without the need to materialize the matrix. This allows: * Matrix free computations * Operators that take advantage of special structure, while providing a consistent API to users. #### Subclassing To enable a public method, subclasses should implement the leading-underscore version of the method. The argument signature should be identical except for the omission of `name="..."`. For example, to enable `apply(x, adjoint=False, name="apply")` a subclass should implement `_apply(x, adjoint=False)`. #### Performance contract Subclasses should only implement the assert methods (e.g. `assert_non_singular`) if they can be done in less than `O(N^3)` time. Class docstrings should contain an explanation of computational complexity. Since this is a high-performance library, attention should be paid to detail, and explanations can include constants as well as Big-O notation. #### Shape compatibility `LinearOperator` sub classes should operate on a [batch] matrix with compatible shape. Class docstrings should define what is meant by compatible shape. Some sub-classes may not support batching. An example is: `x` is a batch matrix with compatible shape for `apply` if ``` operator.shape = [B1,...,Bb] + [M, N], b >= 0, x.shape = [B1,...,Bb] + [N, R] ``` `rhs` is a batch matrix with compatible shape for `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], b >= 0, rhs.shape = [B1,...,Bb] + [M, R] ``` #### Example docstring for subclasses. This operator acts like a (batch) matrix `A` with shape `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `m x n` matrix. Again, this matrix `A` may not be materialized, but for purposes of identifying and working with compatible arguments the shape is relevant. Examples: ```python some_tensor = ... shape = ???? operator = MyLinOp(some_tensor) operator.shape() ==> [2, 4, 4] operator.log_abs_determinant() ==> Shape [2] Tensor x = ... Shape [2, 4, 5] Tensor operator.apply(x) ==> Shape [2, 4, 5] Tensor ``` #### Shape compatibility This operator acts on batch matrices with compatible shape. FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE #### Performance FILL THIS IN #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ __metaclass__ = abc.ABCMeta def __init__(self, dtype, graph_parents=None, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize the `LinearOperator`. **This is a private method for subclass use.** **Subclasses should copy-paste this `__init__` documentation.** Args: dtype: The type of the this `LinearOperator`. Arguments to `apply` and `solve` will have to be this type. graph_parents: Python list of graph prerequisites of this `LinearOperator` Typically tensors that are passed during initialization. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `dtype` is real, this is equivalent to being symmetric. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix\ #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: ValueError: If any member of graph_parents is `None` or not a `Tensor`. ValueError: If hints are set incorrectly. """ # Check and auto-set flags. if is_positive_definite: if is_non_singular is False: raise ValueError("A positive definite matrix is always non-singular.") is_non_singular = True if is_non_singular: if is_square is False: raise ValueError("A non-singular matrix is always square.") is_square = True if is_self_adjoint: if is_square is False: raise ValueError("A self-adjoint matrix is always square.") is_square = True self._is_square_set_or_implied_by_hints = is_square graph_parents = [] if graph_parents is None else graph_parents for i, t in enumerate(graph_parents): if t is None or not contrib_framework.is_tensor(t): raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) self._dtype = dtype self._graph_parents = graph_parents self._is_non_singular = is_non_singular self._is_self_adjoint = is_self_adjoint self._is_positive_definite = is_positive_definite self._name = name or type(self).__name__ # We will cache some tensors to avoid repeatedly adding shape # manipulation ops to the graph. # Naming convention: # self._cached_X_tensor is the cached version of self._X_tensor. self._cached_shape_tensor = None self._cached_batch_shape_tensor = None self._cached_domain_dimension_tensor = None self._cached_range_dimension_tensor = None self._cached_tensor_rank_tensor = None @contextlib.contextmanager def _name_scope(self, name=None, values=None): """Helper function to standardize op scope.""" with ops.name_scope(self.name): with ops.name_scope( name, values=((values or []) + self._graph_parents)) as scope: yield scope @property def dtype(self): """The `DType` of `Tensor`s handled by this `LinearOperator`.""" return self._dtype @property def name(self): """Name prepended to all ops created by this `LinearOperator`.""" return self._name @property def graph_parents(self): """List of graph dependencies of this `LinearOperator`.""" return self._graph_parents @property def is_non_singular(self): return self._is_non_singular @property def is_self_adjoint(self): return self._is_self_adjoint @property def is_positive_definite(self): return self._is_positive_definite @property def is_square(self): """Return `True/False` depending on if this operator is square.""" # Static checks done after __init__. Why? Because domain/range dimension # sometimes requires lots of work done in the derived class after init. auto_square_check = self.domain_dimension == self.range_dimension if self._is_square_set_or_implied_by_hints is False and auto_square_check: raise ValueError( "User set is_square hint to False, but the operator was square.") if self._is_square_set_or_implied_by_hints is None: return auto_square_check return self._is_square_set_or_implied_by_hints @abc.abstractmethod def _shape(self): # Write this in derived class to enable all static shape methods. raise NotImplementedError("_shape is not implemented.") @property def shape(self): """`TensorShape` of this `LinearOperator`. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`. Returns: `TensorShape`, statically determined, may be undefined. """ return self._shape() @abc.abstractmethod def _shape_tensor(self): raise NotImplementedError("_shape_tensor is not implemented.") def shape_tensor(self, name="shape_tensor"): """Shape of this `LinearOperator`, determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`. Args: name: A name for this `Op. Returns: `int32` `Tensor` """ with self._name_scope(name): # Be clean by avoiding adding shape Ops to the graph too many times. if self._cached_shape_tensor is None: # Prefer to use statically defined shape if available. if self.shape.is_fully_defined(): self._cached_shape_tensor = linear_operator_util.shape_tensor( self.shape.as_list()) else: self._cached_shape_tensor = self._shape_tensor() return self._cached_shape_tensor @property def batch_shape(self): """`TensorShape` of batch dimensions of this `LinearOperator`. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]` Returns: `TensorShape`, statically determined, may be undefined. """ # Derived classes get this "for free" once .shape is implemented. return self.shape[:-2] def batch_shape_tensor(self, name="batch_shape_tensor"): """Shape of batch dimensions of this operator, determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding `[B1,...,Bb]`. Args: name: A name for this `Op. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): if self._cached_batch_shape_tensor is None: # Prefer to use statically defined shape if available. if self.batch_shape.is_fully_defined(): self._cached_batch_shape_tensor = linear_operator_util.shape_tensor( self.batch_shape.as_list(), name="batch_shape") else: self._cached_batch_shape_tensor = self.shape_tensor()[:-2] return self._cached_batch_shape_tensor @property def tensor_rank(self, name="tensor_rank"): """Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. Args: name: A name for this `Op. Returns: Python integer, or None if the tensor rank is undefined. """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): return self.shape.ndims def tensor_rank_tensor(self, name="tensor_rank_tensor"): """Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. Args: name: A name for this `Op. Returns: `int32` `Tensor`, determined at runtime. """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): if self._cached_tensor_rank_tensor is None: # Prefer to use statically defined shape if available. if self.tensor_rank is not None: self._cached_tensor_rank_tensor = ops.convert_to_tensor( self.tensor_rank) else: self._cached_tensor_rank_tensor = array_ops.size(self.shape_tensor()) return self._cached_tensor_rank_tensor @property def domain_dimension(self): """Dimension (in the sense of vector spaces) of the domain of this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `N`. Returns: `Dimension` object. """ # Derived classes get this "for free" once .shape is implemented. return self.shape[-1] def domain_dimension_tensor(self, name="domain_dimension_tensor"): """Dimension (in the sense of vector spaces) of the domain of this operator. Determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `N`. Args: name: A name for this `Op`. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): if self._cached_domain_dimension_tensor is None: # Prefer to use statically defined shape if available. if self.domain_dimension.value is not None: self._cached_domain_dimension_tensor = ops.convert_to_tensor( self.domain_dimension.value) else: self._cached_domain_dimension_tensor = self.shape_tensor()[-1] return self._cached_domain_dimension_tensor @property def range_dimension(self): """Dimension (in the sense of vector spaces) of the range of this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `M`. Returns: `Dimension` object. """ # Derived classes get this "for free" once .shape is implemented. return self.shape[-2] def range_dimension_tensor(self, name="range_dimension_tensor"): """Dimension (in the sense of vector spaces) of the range of this operator. Determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `M`. Args: name: A name for this `Op`. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): if self._cached_range_dimension_tensor is None: # Prefer to use statically defined shape if available. if self.range_dimension.value is not None: self._cached_range_dimension_tensor = ops.convert_to_tensor( self.range_dimension.value) else: self._cached_range_dimension_tensor = self.shape_tensor()[-2] return self._cached_range_dimension_tensor def _assert_non_singular(self): raise NotImplementedError("assert_non_singular is not implemented.") def assert_non_singular(self, name="assert_non_singular"): """Returns an `Op` that asserts this operator is non singular.""" with self._name_scope(name): return self._assert_non_singular() def _assert_positive_definite(self): raise NotImplementedError("assert_positive_definite is not implemented.") def assert_positive_definite(self, name="assert_positive_definite"): """Returns an `Op` that asserts this operator is positive definite. Here, positive definite means that the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive definite. Args: name: A name to give this `Op`. Returns: An `Op` that asserts this operator is positive definite. """ with self._name_scope(name): return self._assert_positive_definite() def _assert_self_adjoint(self): raise NotImplementedError("assert_self_adjoint is not implemented.") def assert_self_adjoint(self, name="assert_self_adjoint"): """Returns an `Op` that asserts this operator is self-adjoint.""" with self._name_scope(name): return self._assert_self_adjoint() def _check_input_dtype(self, arg): """Check that arg.dtype == self.dtype.""" if arg.dtype != self.dtype: raise TypeError( "Expected argument to have dtype %s. Found: %s in tensor %s" % (self.dtype, arg.dtype, arg)) @abc.abstractmethod def _apply(self, x, adjoint=False, adjoint_arg=False): raise NotImplementedError("_apply is not implemented.") def apply(self, x, adjoint=False, adjoint_arg=False, name="apply"): """Transform `x` with left multiplication: `x --> Ax`. Args: x: `Tensor` with compatible shape and same `dtype` as `self`. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is the hermitian transpose (transposition and complex conjugation). name: A name for this `Op. Returns: A `Tensor` with shape `[..., M, R]` and same `dtype` as `self`. """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") self._check_input_dtype(x) self_dim = -2 if adjoint else -1 arg_dim = -1 if adjoint_arg else -2 self.shape[self_dim].assert_is_compatible_with(x.get_shape()[arg_dim]) return self._apply(x, adjoint=adjoint, adjoint_arg=adjoint_arg) def _determinant(self): if self._can_use_cholesky(): return math_ops.exp(self.log_abs_determinant()) return linalg_ops.matrix_determinant(self._matrix) def determinant(self, name="det"): """Determinant for every batch member. Args: name: A name for this `Op. Returns: `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. Raises: NotImplementedError: If `self.is_square` is `False`. """ if self.is_square is False: raise NotImplementedError( "Determinant not implemented for an operator that is expected to " "not be square.") with self._name_scope(name): return self._determinant() def _log_abs_determinant(self): if self._can_use_cholesky(): diag = array_ops.matrix_diag_part(self._get_cached_chol()) return 2 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1]) abs_det = math_ops.abs(self.determinant()) return math_ops.log(abs_det) def log_abs_determinant(self, name="log_abs_det"): """Log absolute value of determinant for every batch member. Args: name: A name for this `Op. Returns: `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. Raises: NotImplementedError: If `self.is_square` is `False`. """ if self.is_square is False: raise NotImplementedError( "Determinant not implemented for an operator that is expected to " "not be square.") with self._name_scope(name): return self._log_abs_determinant() def _solve(self, rhs, adjoint=False, adjoint_arg=False): if self.is_square is False: raise NotImplementedError( "Solve is not yet implemented for non-square operators.") rhs = linear_operator_util.matrix_adjoint(rhs) if adjoint_arg else rhs if self._can_use_cholesky(): return linalg_ops.cholesky_solve(self._get_cached_chol(), rhs) return linalg_ops.matrix_solve( self._get_cached_dense_matrix(), rhs, adjoint=adjoint) def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"): """Solve `R` (batch) systems of equations with best effort: `A X = rhs`. The solution may not be exact, and in this case it will be close in some sense (see class docstring for details). Examples: ```python # Create an operator acting like a 10 x 2 x 2 matrix. operator = LinearOperator(...) operator.shape # = 10 x 2 x 2 # Solve one linear system (R = 1) for every member of the length 10 batch. RHS = ... # shape 10 x 2 x 1 X = operator.solve(RHS) # shape 10 x 2 x 1 # Solve five linear systems (R = 5) for every member of the length 10 batch. RHS = ... # shape 10 x 2 x 5 X = operator.solve(RHS) X[3, :, 2] # Solution to the linear system A[3, :, :] X = RHS[3, :, 2] ``` Args: rhs: `Tensor` with same `dtype` as this operator and compatible shape. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, solve the system involving the adjoint of this `LinearOperator`: `A^H X = rhs`. adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H` is the hermitian transpose (transposition and complex conjugation). name: A name scope to use for ops added by this method. Returns: `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`. Raises: NotImplementedError: If `self.is_non_singular` or `is_square` is False. """ if self.is_non_singular is False: raise NotImplementedError( "Exact solve not implemented for an operator that is expected to " "be singular.") if self.is_square is False: raise NotImplementedError( "Exact solve not implemented for an operator that is expected to " "not be square.") with self._name_scope(name, values=[rhs]): rhs = ops.convert_to_tensor(rhs, name="rhs") self._check_input_dtype(rhs) self_dim = -1 if adjoint else -2 arg_dim = -1 if adjoint_arg else -2 self.shape[self_dim].assert_is_compatible_with(rhs.get_shape()[arg_dim]) return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) def _to_dense(self): """Generic and often inefficient implementation. Override often.""" if self.batch_shape.is_fully_defined(): batch_shape = self.batch_shape else: batch_shape = self.batch_shape_tensor() if self.domain_dimension.value is not None: n = self.domain_dimension.value else: n = self.domain_dimension_tensor() eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype) return self.apply(eye) def to_dense(self, name="to_dense"): """Return a dense (batch) matrix representing this operator.""" with self._name_scope(name): return self._to_dense() def _diag_part(self): """Generic and often inefficient implementation. Override often.""" return array_ops.matrix_diag_part(self.to_dense()) def diag_part(self, name="diag_part"): """Efficiently get the [batch] diagonal part of this operator. If this operator has shape `[B1,...,Bb, M, N]`, this returns a `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`. ``` my_operator = LinearOperatorDiag([1., 2.]) # Efficiently get the diagonal my_operator.diag_part() ==> [1., 2.] # Equivalent, but inefficient method tf.matrix_diag_part(my_operator.to_dense()) ==> [1., 2.] ``` Args: name: A name for this `Op`. Returns: diag_part: A `Tensor` of same `dtype` as self. """ with self._name_scope(name): return self._diag_part() def _add_to_tensor(self, x): # Override if a more efficient implementation is available. return self.to_dense() + x def add_to_tensor(self, x, name="add_to_tensor"): """Add matrix represented by this operator to `x`. Equivalent to `A + x`. Args: x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`. """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") self._check_input_dtype(x) return self._add_to_tensor(x) def _can_use_cholesky(self): # TODO(langmore) Add complex types when tf.cholesky can use them. return (not self.dtype.is_complex and self.is_self_adjoint and self.is_positive_definite) def _get_cached_dense_matrix(self): if not hasattr(self, "_cached_dense_matrix"): self._cached_dense_matrix = self.to_dense() return self._cached_dense_matrix def _get_cached_chol(self): if not self._can_use_cholesky(): return None if not hasattr(self, "_cached_chol"): self._cached_chol = linalg_ops.cholesky(self._get_cached_dense_matrix()) return self._cached_chol
#!/usr/bin/env python # # ___INFO__MARK_BEGIN__ ####################################################################################### # Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.) # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. ####################################################################################### # ___INFO__MARK_END__ # import tempfile import types from .utils import needs_uge from .utils import generate_random_string from .utils import create_config_file from .utils import load_values from uge.api.qconf_api import QconfApi from uge.config.config_manager import ConfigManager from uge.log.log_manager import LogManager from uge.exceptions.object_not_found import ObjectNotFound from uge.exceptions.object_already_exists import ObjectAlreadyExists create_config_file() API = QconfApi() PE_NAME = '%s.q' % generate_random_string(6) CONFIG_MANAGER = ConfigManager.get_instance() LOG_MANAGER = LogManager.get_instance() VALUES_DICT = load_values('test_values.json') print(VALUES_DICT) @needs_uge def test_object_not_found(): try: pe = API.get_pe('__non_existent_pe__') assert (False) except ObjectNotFound as ex: # ok pass def test_generate_pe(): pe = API.generate_pe(PE_NAME) assert (pe.data['pe_name'] == PE_NAME) def test_add_pe(): try: pel = API.list_pes() except ObjectNotFound as ex: # no pes defined pel = [] pe = API.add_pe(name=PE_NAME) assert (pe.data['pe_name'] == PE_NAME) pel2 = API.list_pes() assert (len(pel2) == len(pel) + 1) assert (pel2.count(PE_NAME) == 1) def test_list_pes(): pel = API.list_pes() assert (pel is not None) def test_object_already_exists(): try: pe = API.add_pe(name=PE_NAME) assert (False) except ObjectAlreadyExists as ex: # ok pass def test_get_pe(): pe = API.get_pe(PE_NAME) assert (pe.data['pe_name'] == PE_NAME) def test_generate_pe_from_json(): pe = API.get_pe(PE_NAME) json = pe.to_json() pe2 = API.generate_object(json) assert (pe2.__class__.__name__ == pe.__class__.__name__) for key in list(pe.data.keys()): v = pe.data[key] v2 = pe2.data[key] if type(v) == list: assert (len(v) == len(v2)) for s in v: assert (v2.count(s) == 1) elif type(v) == dict: for key in list(v.keys()): assert (str(v[key]) == str(v2[key])) else: assert (str(v) == str(v2)) def test_modify_pe(): pe = API.get_pe(PE_NAME) slots = pe.data['slots'] pe = API.modify_pe(name=PE_NAME, data={'slots': slots + 1}) slots2 = pe.data['slots'] assert (slots2 == slots + 1) def test_get_acls(): pel = API.list_pes() pes = API.get_pes() for pe in pes: print("#############################################") print(pe.to_uge()) assert (pe.data['pe_name'] in pel) def test_write_pes(): try: tdir = tempfile.mkdtemp() print("*************************** " + tdir) pe_names = VALUES_DICT['pe_names'] pes = API.get_pes() for pe in pes: print("Before #############################################") print(pe.to_uge()) new_pes = [] for name in pe_names: npe = API.generate_pe(name=name) new_pes.append(npe) API.mk_pes_dir(tdir) API.write_pes(new_pes, tdir) API.add_pes_from_dir(tdir) API.modify_pes_from_dir(tdir) pes = API.get_pes() for pe in pes: print("After #############################################") print(pe.to_uge()) pes = API.list_pes() for name in pe_names: assert (name in pes) print("pe found: " + name) finally: API.delete_pes_from_dir(tdir) API.rm_pes_dir(tdir) def test_add_pes(): try: new_pes = [] pe_names = VALUES_DICT['pe_names'] for name in pe_names: npe = API.generate_pe(name=name) new_pes.append(npe) # print all pes currently in the cluster pes = API.get_pes() for pe in pes: print("Before #############################################") print(pe.to_uge()) # add pes API.add_pes(new_pes) API.modify_pes(new_pes) # print all pes currently in the cluster pes = API.get_pes() for pe in pes: print("After #############################################") print(pe.to_uge()) # check that cals have been added pes = API.list_pes() for name in pe_names: assert (name in pes) print("pe found: " + name) finally: API.delete_pes(new_pes) def test_delete_pe(): pel = API.list_pes() API.delete_pe(PE_NAME) try: pel2 = API.list_pes() except ObjectNotFound as ex: # no pes defined pel2 = [] assert (len(pel2) == len(pel) - 1) assert (pel2.count(PE_NAME) == 0)
""" MediaWiki translator, aimed at Wikipedia/WikiBooks type of web pages. Syntax defined by http://en.wikipedia.org/wiki/Help:Wiki_markup and http://en.wikipedia.org/wiki/Help:Displaying_a_formula. The prefix m in the name mwiki distinguishes this translator from gwiki (googlecode wiki). Not yet implemented: mwiki_ref_and_label (just using code from gwiki) Just using plan ASCII solutions for index_bib (requires some work to port to MediaWiki, but is straightforward - use rst as template) and exercise (probably ok with the plain solution). GitHub wiki pages understand MediaWiki, see https://github.com/github/gollum The page http://en.wikibooks.org/wiki/Wikibooks:Sandbox is fine for short-lived experiments. http://shoutwiki.com can host MediaWiki pages. http://jumpwiki.com/wiki/Main_Page can also host MediaWiki pages, but there are troubles with align envirs and math (ugly typesetting and some strange indents). Create a user account, choose *Create a Wiki* in the menu on the left, fill out the form, wait until you get a Main Page, click on edit, make references to a new page, say [[First demo|demo]], save, click on demo and fill out that page with the content of a mydoconcefile.wiki, sometimes it is necessary to create a new account, just do that and go back. """ import re, os, commands, sys from common import default_movie, plain_exercise, insert_code_and_tex from plaintext import plain_quiz from misc import _abort def align2equations(math_text): """ Transform an align environment to a set of equation environments. Used to handle multiple equations if align does not work well. Note: This version is outdated. common.align2equations is the newest attempt to implement align in terms of single equations. """ if not '{align' in math_text: return math_text = math_text.replace('&', '') math_text = math_text.replace('\\\\', r""" </math> :<math>""") pattern = r'\\(begin|end)\{align\*?\}\s*' math_text = re.sub(pattern, '', math_text) # :<math> and </math> surroundings appear when !bt and !et are translated return math_text def equation2nothing(math_text): pattern = r'\\(begin|end)\{equation\*?\}\s*' math_text = re.sub(pattern, '', math_text) math_text = math_text.replace(r'\[', '') math_text = math_text.replace(r'\]', '') return math_text def remove_labels(math_text): pattern = 'label\{(.+?)\}\s*' labels = re.findall(pattern, math_text) if labels: math_text = re.sub(pattern, '', math_text) return math_text, labels def mwiki_code(filestr, code_blocks, code_block_types, tex_blocks, format): # http://en.wikipedia.org/wiki/Help:Displaying_a_formula # MediaWiki math does not support labels in equations. # The enviros equation and \[ \] must be removed (not supported). for i in range(len(tex_blocks)): # Standard align works in Wikipedia and Wikibooks. # Standard align gives somewhat ugly output on wiiki.com services, # but a set of separate equations is not much better. # We therefore stick to align instead. #tex_blocks[i] = align2equations(tex_blocks[i]) tex_blocks[i] = equation2nothing(tex_blocks[i]) tex_blocks[i], labels = remove_labels(tex_blocks[i]) for label in labels: if label in filestr: print '*** warning: reference to label "%s" in an equation does not work in MediaWiki' % label filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, format) # Supported programming languages: # http://www.mediawiki.org/wiki/Extension:SyntaxHighlight_GeSHi#Supported_languages envir2lang = dict(cod='python', pycod='python', cycod='python', fcod='fortran', ccod='c', cppcod='cpp', mcod='matlab', plcod='perl', shcod='bash', pro='python', pypro='python', cypro='python', fpro='fortran', cpro='c', cpppro='cpp', mpro='matlab', plpro='perl', shpro='bash', rbpro='ruby', rbcod='ruby', javacod='java', javapro='java', htmlcod='html5', xmlcod='xml', htmlpro='html5', xmlpro='xml', html='html5', xml='xml', sys='bash', dat='text', csv='text', txt='text', pyoptpro='python', pyscpro='python', ipy='python', pyshell='python', ) for key in envir2lang: language = envir2lang[key] cpattern = re.compile(r'^!bc\s+%s\s*\n' % key, flags=re.MULTILINE) filestr = cpattern.sub('<syntaxhighlight lang="%s">\n' % \ envir2lang[key], filestr) c = re.compile(r'^!bc.*$\n', re.MULTILINE) filestr = c.sub('<syntaxhighlight lang="text">\n', filestr) filestr = re.sub(r'!ec\n', '</syntaxhighlight>\n', filestr) c = re.compile(r'^!bt\n', re.MULTILINE) filestr = c.sub(':<math>\n', filestr) filestr = re.sub(r'!et\n', '</math>\n', filestr) # Final fix of MediaWiki file # __TOC__ syntax is misinterpretated as paragraph heading, so we # use <<<TOC>>> instead and replace to right syntax here at the end. filestr = filestr.replace('<<<TOC>>>', '__TOC__') return filestr def mwiki_figure(m): filename = m.group('filename') link = filename if filename.startswith('http') else None if not link and not os.path.isfile(filename): raise IOError('no figure file %s' % filename) basename = os.path.basename(filename) stem, ext = os.path.splitext(basename) root, ext = os.path.splitext(filename) if link is None: if not ext in '.png .gif .jpg .jpeg'.split(): # try to convert image file to PNG, using # convert from ImageMagick: cmd = 'convert %s png:%s' % (filename, root+'.png') failure, output = commands.getstatusoutput(cmd) if failure: print '\n**** warning: could not run ', cmd print ' convert %s to PNG format manually' % filename _abort() filename = root + '.png' caption = m.group('caption').strip() if caption != '': caption = '|' + caption # add | for non-empty caption else: # Avoid filename as caption when caption is empty # see http://www.mediawiki.org/wiki/Help:Images caption = '|<span title=""></span>' # keep label if it's there: caption = re.sub(r'label\{(.+?)\}', '(\g<1>)', caption) size = '' opts = m.group('options').strip() if opts: info = dict([s.split('=') for s in opts.split()]) if 'width' in info and 'height' in info: size = '|%sx%spx' % (info['width'], info['height']) elif 'width' in info: size = '|%spx' % info['width'] elif 'height' in info: size = '|x%spx' % info['height'] if link: # We link to some image on the web filename = os.path.basename(filename) link = os.path.dirname(link) result = r""" [[File:%s|frame%s|link=%s|alt=%s%s]] """ % (filename, size, link, filename, caption) else: # We try to link to a file at wikimedia.org. found_wikimedia = False orig_filename = filename # Check if the file exists and find the appropriate wikimedia name. # http://en.wikipedia.org/w/api.php?action=query&titles=Image:filename&prop=imageinfo&format=xml # Skip directories - get the basename filename = os.path.basename(filename) import urllib prms = urllib.urlencode({ 'action': 'query', 'titles': 'Image:' + filename, 'prop': 'imageinfo', 'format': 'xml'}) url = 'http://en.wikipedia.org/w/api.php?' + prms try: print ' ...checking if %s is stored at en.wikipedia.org/w/api.php...' % filename f = urllib.urlopen(url) imageinfo = f.read() f.close() def get_data(name, text): pattern = '%s="(.*?)"' % name m = re.search(pattern, text) if m: match = m.group(1) if 'Image:' in match: return match.split('Image:')[1] if 'File:' in match: return match.split('File:')[1] else: return match else: return None data = ['from', 'to', 'title', 'missing', 'imagerepository', 'timestamp', 'user'] orig_filename = filename filename = get_data('title', imageinfo) user = get_data('user', imageinfo) timestamp = get_data('timestamp', imageinfo) if user: found_wikimedia = True print ' ...found %s at wikimedia' % filename result = r""" [[File:%s|frame%s|alt=%s%s]] <!-- user: %s, filename: %s, timestamp: %s --> """ % (filename, size, filename, caption, user, orig_filename, timestamp) except IOError: print ' ...no Internet connection...' if not found_wikimedia: print ' ...for wikipedia/wikibooks you must upload image file %s to\n common.wikimedia.org' % orig_filename # see http://commons.wikimedia.org/wiki/Commons:Upload # and http://commons.wikimedia.org/wiki/Special:UploadWizard print ' ...for now we use local file %s' % filename # This is fine if we use github wiki result = r""" [[File:%s|frame%s|alt=%s%s]] <!-- not yet uploaded to common.wikimedia.org --> """ % (filename, size, filename, caption) return result from common import table_analysis def mwiki_author(authors_and_institutions, auth2index, inst2index, index2inst, auth2email): authors = [] for author, i, email in authors_and_institutions: if email is None: email_text = '' else: name, adr = email.split('@') email_text = ' (%s at %s)' % (name, adr) authors.append('_%s_%s' % (author, email_text)) if len(authors) == 1: authors = authors[0] elif len(authors) == 2: authors = authors[0] + ' and ' + authors[1] elif len(authors) > 2: authors[-1] = 'and ' + authors[-1] authors = ', '.join(authors) else: # no authors: return '' text = '\n\nBy ' + authors + '\n\n' # we skip institutions in mwiki return text from gwiki import wiki_ref_and_label_common def mwiki_ref_and_label(section_label2title, format, filestr): return wiki_ref_and_label_common(section_label2title, format, filestr) def mwiki_admon(block, format, title='Warning', text_size='normal', admon_type='warning'): if title.lower().strip() == 'none': title = '' # Blocks without explicit title should have empty title if title == 'Block': title = '' if title and title[-1] not in ('.', ':', '!', '?'): # Make sure the title ends with puncuation title += '.' admon_type2mwiki = dict(notice='notice', warning='warning', # or critical or important hint='notice', quote='quote') if admon_type in admon_type2mwiki: admon_type = admon_type2mwiki[admon_type] # use mwiki admon else: admon_type = title # Just use the title text = "'''%s''' " % title + block if text_size == 'normal': text_size = '90%' elif text_size == 'large': text_size = '130%' elif text_size == 'small': text_size = '80%' if admon_type == 'quote': s = """ {{quote box | quote = %s | textstyle = font-size: %s; }} """ % (block, text_size) # quote has also | source = ... but other formats like # latex and html have no specific source tag, so it must # be typeset manually else: s = """ {{mbox | type = %s | textstyle = font-size: %s; | text = %s }} """ % (admon_type, text_size, text) return s # mbox: notice def define(FILENAME_EXTENSION, BLANKLINE, INLINE_TAGS_SUBST, CODE, LIST, ARGLIST, TABLE, EXERCISE, FIGURE_EXT, CROSS_REFS, INDEX_BIB, TOC, ENVIRS, QUIZ, INTRO, OUTRO, filestr): # all arguments are dicts and accept in-place modifications (extensions) FILENAME_EXTENSION['mwiki'] = '.mwiki' # output file extension BLANKLINE['mwiki'] = '\n' # replacement patterns for substitutions of inline tags INLINE_TAGS_SUBST['mwiki'] = { 'math': r'\g<begin><math>\g<subst></math>\g<end>', 'math2': r'\g<begin><math>\g<latexmath></math>\g<end>', 'emphasize': r"\g<begin>''\g<subst>''\g<end>", 'bold': r"\g<begin>'''\g<subst>'''\g<end>", 'verbatim': r'\g<begin><code>\g<subst></code>\g<end>', #'linkURL': r'\g<begin>[\g<url> \g<link>]\g<end>', 'linkURL2': r'[\g<url> \g<link>]', 'linkURL3': r'[\g<url> \g<link>]', 'linkURL2v': r'[\g<url> <code>\g<link></code>]', 'linkURL3v': r'[\g<url> <code>\g<link></code>]', 'plainURL': r'\g<url>', 'colortext': r'<font color="\g<color>">\g<text></font>', 'chapter': r"""== '''\g<subst>''' ==""", 'section': r'== \g<subst> ==', 'subsection': r'=== \g<subst> ===', 'subsubsection': r'==== \g<subst> ====\n', 'paragraph': r"''\g<subst>''\n", 'title': r'#TITLE (actually governed by the filename): \g<subst>\n', 'date': r'===== \g<subst> =====', 'author': mwiki_author, #r'===== \g<name>, \g<institution> =====', # 'figure': r'<\g<filename>>', 'figure': mwiki_figure, 'movie': default_movie, # will not work for HTML movie player 'comment': '<!-- %s -->', 'abstract': r'\n*\g<type>.* \g<text>\g<rest>', 'linebreak': r'\g<text><br />', 'non-breaking-space': '&nbsp;', 'ampersand2': r' \g<1>&\g<2>', } CODE['mwiki'] = mwiki_code from html import html_table TABLE['mwiki'] = html_table ENVIRS['mwiki'] = { 'warning': lambda block, format, title='Warning', text_size='normal': mwiki_admon(block, format, title, text_size, 'warning'), 'notice': lambda block, format, title='Notice', text_size='normal': mwiki_admon(block, format, title, text_size, 'notice'), 'question': lambda block, format, title='Question', text_size='normal': mwiki_admon(block, format, title, text_size, 'question'), 'hint': lambda block, format, title='Hint', text_size='normal': mwiki_admon(block, format, title, text_size, 'hint'), 'summary': lambda block, format, title='Summary', text_size='normal': mwiki_admon(block, format, title, text_size, 'summary'), 'block': lambda block, format, title='Block', text_size='normal': mwiki_admon(block, format, title, text_size, 'block'), 'box': lambda block, format, title='none', text_size='normal': mwiki_admon(block, format, title, text_size, 'box'), 'quote': lambda block, format, title='none', text_size='normal': mwiki_admon(block, format, title, text_size, 'quote'), } # native list: LIST['mwiki'] = { 'itemize': {'begin': '\n', 'item': '*', 'end': '\n\n'}, 'enumerate': {'begin': '\n', 'item': '#', 'end': '\n\n'}, 'description': {'begin': '\n', 'item': '* %s ', 'end': '\n\n'}, 'separator': '\n'} # Try this: LIST['mwiki'] = LIST['html'] # how to typeset description lists for function arguments, return # values, and module/class variables: ARGLIST['mwiki'] = { 'parameter': '*argument*', 'keyword': '*keyword argument*', 'return': '*return value(s)*', 'instance variable': '*instance variable*', 'class variable': '*class variable*', 'module variable': '*module variable*', } FIGURE_EXT['mwiki'] = { 'search': ('.png', '.gif', '.jpg', '.jpeg'), 'convert': ('.png', '.gif', '.jpg')} CROSS_REFS['mwiki'] = mwiki_ref_and_label from plaintext import plain_index_bib EXERCISE['mwiki'] = plain_exercise INDEX_BIB['mwiki'] = plain_index_bib TOC['mwiki'] = lambda s: '<<<TOC>>>' # __TOC__ will be wrongly translated to paragraph headline and needs a fix QUIZ['mwiki'] = plain_quiz # document start: INTRO['mwiki'] = ''
# This file is part of PanGaia and licensed under the CreativeCommons, Share-Alike, Attribution # (C) Mark Janssen, dreamingforward@gmail.com # For most up-to-date version, see the PangaiaProject """Bag types: a set-like container that counts the number of the same items held within it.""" import random #pick() _DEBUG = True class IntegerBag(dict): """Implements a bag type that allows item counts to be negative.""" __slots__ = [] def __init__(self, init={}): """Initialize bag with optional contents. >>> b = Bag() #creates empty bag >>> b {} >>> print(IntegerBag({1: -1, 2: 0, 3: -9})) {1: -1, 3: -9} Can initialize with (key, count) list as in standard dict. However, duplicate keys will accumulate counts: >>> print(Bag([(1, 2), (2, 4), (1, 7)])) {1: 9, 2: 4} """ if not init or isinstance(init, self.__class__): dict.__init__(self, init) #values known to be good, use faster dict creation else: #initializing with list or plain dict dict.__init__(self) if isinstance(init, dict): for key, count in init.items(): self[key] = count #will test invariants else: #sequence may contain duplicates, so add to existing value, if any for key, count in init: self[key] += count def fromkeys(cls, iterable, count=1): """Class method which creates bag from iterable adding optional count for each item. >>> b = Bag({'b': 2, 'c': 1, 'a': 3}) >>> b2 = Bag.fromkeys(['a', 'b', 'c', 'b', 'a', 'a']) >>> b3 = Bag.fromkeys("abacab") >>> assert b == b2 == b3 >>> word_count = Bag.fromkeys("how much wood could a wood chuck chuck".split()) >>> print(word_count) {'a': 1, 'chuck': 2, 'could': 1, 'how': 1, 'much': 1, 'wood': 2} An optional count can be specified. Count added each time item is encountered. >>> print(Bag.fromkeys("abacab", 5)) {'a': 15, 'b': 10, 'c': 5} """ b = cls() for key in iterable: #could just return b.__iadd__(iterable, count) b[key] += count #perhaps slower than necessary but will simplify derived classes that override __setitem__() return b fromkeys = classmethod(fromkeys) def update(self, items, count=1): """Adds contents to bag from other mapping type or iterable. >>> ib = IntegerBag.fromkeys('abc') >>> ib.update({'a': 2, 'b': 1, 'c': 0}) >>> print(ib) {'a': 3, 'b': 2, 'c': 1} Negative updates are allowable. >>> ib.update({'a': -2, 'b': -2, 'c': -2, 'd': 2}) >>> print(ib) {'a': 1, 'c': -1, 'd': 2} Can call with iterable. Amount added can be specified by count parameter: >>> ib.update(['a','b'], 2) >>> print(ib) {'a': 3, 'b': 2, 'c': -1, 'd': 2} Values that can't be converted to ints are skipped and will raise TypeError. >>> ib.update({0: 'test1', 'a': 'test2', 'd': 3, 'f': '1.0', 'c': 2.0}) Traceback (most recent call last): TypeError: unsupported operand type(s) for +=: 'int' and 'str' >>> print(ib) {'a': 3, 'b': 2, 'c': 1, 'd': 5} Updating Bag with values that would cause the count to go negative sets count to 0, removing item. >>> b = Bag({'a': 1, 'c': 2, 'd': 5}) >>> b.update({'a': -4, 'b': -1, 'c': -2, 'd': -2}) >>> print(b) {'d': 3} NOTE: Exceptions are only reported on the last bad element encountered. """ #XXX may be able to improve this by calling dict methods directly and/or using map and operator functions #XXX should raise exception and return unchanged self if problem encountered! #XXX or use logging.warning() and continue err = False if isinstance(items, dict): for key, count in items.items(): try: self[key] += count #may be slower than necessary except TypeError as error: err = True #FIXME should have to re-assign to propagate error: check docs else: #sequence for key in items: try: self[key] += count except TypeError as error: err = Trie if err: raise TypeError(error) def pick(self, count=1, remove=True): #XXX perhaps better to default to False? """Returns a bag with 'count' random items from bag (defaults to 1), removing the items unless told otherwise. >>> b = IntegerBag({'a': 3, 'b': -2, 'c': 1}) >>> sub = b.pick(4) >>> sub.size, b.size (4, 2) """ l = list(self.itereach()) picked = IntegerBag(random.sample(l, min(abs(count), len(l)))) if count < 0: picked *= (-1) #this probably not useful except for Network class if remove: self -= picked return picked def pop(self, item): """Remove all of item from bag, returning its count, if any. >>> b = IntegerBag.fromkeys("abacab") >>> b.pop('b') 2 >>> b.pop('z') 0 >>> print(b) {'a': 3, 'c': 1} """ return super(IntegerBag, self).pop(item, 0) def discard(self, item): """Removes all of the specified item if it exists, otherwise ignored. >>> b = Bag.fromkeys("abacab") >>> b.discard('b') >>> b.discard('d') #non-existent items ignored >>> print(b) {'a': 3, 'c': 1} """ try: del self[item] #note: this does not call __getitem__ except KeyError: pass def setdefault(self, item, count=1): count = self._filter(count) return count and dict.setdefault(self, item, count) def itereach(self): #XXX consider rename akin to Python3 rules """Will iterate through all items in bag individually. >>> b = Bag.fromkeys("abacab") >>> l = list(b.itereach()); l.sort() >>> l [('a', 1), ('a', 1), ('a', 1), ('b', 1), ('b', 1), ('c', 1)] >>> b = IntegerBag(b) >>> b['b'] = -2 >>> l = list(b.itereach()); l.sort() >>> l [('a', 1), ('a', 1), ('a', 1), ('b', -1), ('b', -1), ('c', 1)] Note: iteration on bag itself just iterates through unique keys: >>> l = list(b) ; l.sort() >>> l ['a', 'b', 'c'] """ for key, count in self.items(): for i in range(abs(count)): yield (key, count >= 0 and 1 or -1) #consider returning (key, +/-1) pair to account for negative counts def __iadd__(self, other): """Add items in bag. >>> b = Bag() >>> b += [1, 2, 1, 0] >>> print(b) {0: 1, 1: 2, 2: 1} >>> b.clear() >>> b += "abca" >>> print(b) {'a': 2, 'b': 1, 'c': 1} """ self.update(other, 1) #XXX may fail mid-update... return self def __add__(self, other): #XXX better way to create copy?? (in case self.__class__ has more complicated constructor...) """Add one bag to another, returns type of first bag. >>> b = IntegerBag({1: 2, 2: -2}) + Bag({1: 5, 2: 1, 3: 7}) >>> b, "IntegerBag" in str(type(b)) ({1: 7, 2: -1, 3: 7}, True) """ return self.__class__(self).__iadd__(other) def __isub__(self, other): """Subtract items from bag. >>> b = Bag.fromkeys("abacab") >>> b -= "cccccab" >>> print(b) {'a': 2, 'b': 1} """ if isinstance(other, dict): other = IntegerBag(other) * (-1) self.update(other, -1) return self def __sub__(self, other): """Subtract items from bag. >>> IntegerBag({1: 2, 2: -2}) - {1: 5, 2: -2, 3: 7} {1: -3, 3: -7} """ return self.__class__(self).__isub__(other) def __imul__(self, factor): """Multiply bag contents by factor. >>> b = Bag.fromkeys("abacab") >>> b *= 4 >>> print(b) {'a': 12, 'b': 8, 'c': 4} Negative factors can be used with IntegerBag. >>> ib = IntegerBag(b) >>> ib *= -1 >>> print(ib) {'a': -12, 'b': -8, 'c': -4} Trying that on a Bag will return empty bag (akin to list behavior). >>> b *= -1 >>> b {} Zero factors will return empty bag. >>> b += "abacab" >>> b *= 0 >>> b {} """ if self._filter(factor): for item, count in self.items(): dict.__setitem__(self, item, count*factor) #bypass test logic in bag.__setitem__ else: #factor==0 or negative on Bag dict.clear(self) #call dict.clear to protect subclass which might override and do other things besides clear dict values return self def __mul__(self, factor): """Returns new bag of same type multiplied by factor. >>> d = {1: 2, 2: 4, 3: -9} >>> IntegerBag(d) * -1 {1: -2, 2: -4, 3: 9} >>> Bag(d) * -1 {} """ #XXX should perhaps use explicit IntBag in case derived class needs parameters -- or use copy()??? return self.__class__(self).__imul__(factor) def _size(self): """Returns sum of absolute value of item counts in bag. >>> b = IntegerBag.fromkeys("abacab") >>> b['a'] = -4 >>> b.size 7 """ return sum(map(abs, self.values())) size = property(_size, None, None, "Sum of absolute count values in the bag") def __getitem__(self, item): """Returns total count for given item, or zero if item not in bag. >>> b = Bag.fromkeys("abacab") >>> b['a'] 3 >>> b['d'] 0 """ return self.get(item, 0) count = __getitem__ def __setitem__(self, item, count): """Sets the count for the given item in bag, removing if zero. >>> b = Bag() >>> b[1] = 3 >>> b[3] = 1.6 #floats get coerced to ints >>> b[4] = "2" #as do int strings >>> print(b) {1: 3, 3: 1, 4: 2} If count is zero, all 'matching items' are deleted from bag. >>> b[2] = 0 >>> print(b) {1: 3, 3: 1, 4: 2} Counts for IntegerBag are allowed to be negative. >>> ib = IntegerBag(b) >>> ib[4] = -2 >>> ib[5] -= 2 >>> ib[1] -= 4 >>> ib[3] -= 1 >>> print(ib) {1: -1, 4: -2, 5: -2} Trying to set negative values on Bag reverts to zero. >>> b[4] = -2 >>> b[4] 0 If count is non-integer, an exception is raised. >>> b[1] = "oops" #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: invalid literal for int(): oops """ count = self._filter(count) if count: dict.__setitem__(self, item, count) #XXX should this call super instead of dict (for cases of multiple inheritence etc...) else: #setting to 0 so discard key self.discard(item) def __str__(self): """Convert self to string with items in sorted order. >>> str(IntegerBag()) '{}' >>> str(IntegerBag({'b': -2, 'a': 3, 'c': 1, 1: 0})) "{'a': 3, 'b': -2, 'c': 1}" >>> str(Bag.fromkeys("abacab")) "{'a': 3, 'b': 2, 'c': 1}" """ #sort by values, largest first? should we sort at all? if _DEBUG: self._validate() if not self: return '{}' #nothing to sort keys = sorted(self) #this extra assigment necessary??? !Must remember basic python!... return '{%s}' % ', '.join(["%r: %r" % (k, self[k]) for k in keys]) def _filter(value): #XXX could just set _filter = int but doctest complains even under Python 2.3.3 """Coerces value to int and returns it, or raise raises TypeError.""" return int(value) _filter = staticmethod(_filter) def _validate(self): """Check class invariants. >>> b = IntegerBag.fromkeys("abc") >>> dict.__setitem__(b, 'a', "oops") >>> b._validate() #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: invalid literal for int(): oops >>> b = Bag() >>> dict.__setitem__(b, 'a', 0) #zero values are normally deleted >>> b {'a': 0} >>> b._validate() Traceback (most recent call last): AssertionError: zero value encountered >>> b = Bag() >>> dict.__setitem__(b, 'a', -1) #negative values not allowed >>> b._validate() Traceback (most recent call last): AssertionError: unfiltered value """ for count in self.values(): assert count == self._filter(count), "unfiltered value" assert count, "zero value encountered" class Bag(IntegerBag): """Standard bag class. Allows only non-negative bag counts.""" __slots__ = [] def _size(self): """Returns total number of items in bag. >>> b = Bag.fromkeys("abacab") >>> b.size 6 """ return sum(self.values()) size = property(_size, None, None, "Sum of all bag values.") def _filter(value): """Returns 0 if value is negative. """ return max(int(value), 0) _filter = staticmethod(_filter) def _test(): """Miscillaneous tests: Equality test. Can compare against dictionary or bag types. >>> Bag.fromkeys("abacab") == {'a': 3, 'b': 2, 'c': 1} True >>> b, l = Bag.fromkeys([1, 2, 1, 3, 1]), [1, 1, 1, 3, 2] >>> b == l False >>> b == Bag.fromkeys(l) == IntegerBag.fromkeys(l) True Tests for non-zero: >>> b = Bag() >>> bool(b) False >>> b += [0] >>> bool(b) True """ import doctest return doctest.testmod() if __name__ == "__main__": _test(), throws the bird in the incinerator and bolts out of the room, fear of being found.
import json import logging from typing import List, Optional from flask import abort, Blueprint, make_response, render_template, request, url_for from google.appengine.api import taskqueue from google.appengine.ext import ndb from werkzeug.wrappers import Response from backend.common.consts.event_type import EventType, SEASON_EVENT_TYPES from backend.common.futures import TypedFuture from backend.common.helpers.district_helper import DistrictHelper from backend.common.helpers.event_helper import EventHelper from backend.common.helpers.event_insights_helper import EventInsightsHelper from backend.common.helpers.match_helper import MatchHelper from backend.common.helpers.matchstats_helper import MatchstatsHelper from backend.common.helpers.prediction_helper import PredictionHelper from backend.common.manipulators.district_manipulator import DistrictManipulator from backend.common.manipulators.event_details_manipulator import ( EventDetailsManipulator, ) from backend.common.models.district import District from backend.common.models.district_ranking import DistrictRanking from backend.common.models.event import Event from backend.common.models.event_details import EventDetails from backend.common.models.keys import DistrictKey, EventKey, Year from backend.common.models.team import Team from backend.common.queries.district_query import DistrictsInYearQuery from backend.common.queries.event_query import DistrictEventsQuery, EventListQuery from backend.common.queries.team_query import DistrictTeamsQuery blueprint = Blueprint("math", __name__) @blueprint.route("/tasks/math/enqueue/district_points_calc/<int:year>") def enqueue_event_district_points_calc(year: Year) -> Response: """ Enqueues calculation of district points for all season events for a given year """ event_keys: List[ndb.Key] = Event.query( Event.year == year, Event.event_type_enum.IN(SEASON_EVENT_TYPES) ).fetch(None, keys_only=True) for event_key in event_keys: taskqueue.add( url=url_for( "math.event_district_points_calc", event_key=event_key.string_id() ), method="GET", target="py3-tasks-io", queue_name="default", ) if ( "X-Appengine-Taskname" not in request.headers ): # Only write out if not in taskqueue return make_response( "Enqueued for: {}".format([event_key.id() for event_key in event_keys]) ) return make_response("") @blueprint.route("/tasks/math/do/district_points_calc/<event_key>") def event_district_points_calc(event_key: EventKey) -> Response: """ Calculates district points for an event """ event = Event.get_by_id(event_key) if event is None: abort(404) if event.event_type_enum not in SEASON_EVENT_TYPES and not request.args.get( "allow-offseason", None ): return make_response( f"Can't calculate district points for a non-season event {event.key_name}!", 400, ) district_points = DistrictHelper.calculate_event_points(event) event_details = EventDetails(id=event_key, district_points=district_points) EventDetailsManipulator.createOrUpdate(event_details) # Enqueue task to update rankings if event.district_key: taskqueue.add( url=url_for( "math.district_rankings_calc", district_key=event.district_key.string_id(), ), method="GET", target="py3-tasks-io", queue_name="default", ) if ( "X-Appengine-Taskname" not in request.headers ): # Only write out if not in taskqueue return make_response(json.dumps(district_points, sort_keys=True, indent=2)) return make_response("") @blueprint.route("/tasks/math/enqueue/district_rankings_calc/<int:year>") def enqueue_district_rankings_calc(year: Year) -> Response: """ Enqueues calculation of rankings for all districts for a given year """ districts = DistrictsInYearQuery(int(year)).fetch() district_keys = [district.key.id() for district in districts] for district_key in district_keys: taskqueue.add( url=url_for("math.district_rankings_calc", district_key=district_key), method="GET", target="py3-tasks-io", queue_name="default", ) taskqueue.add( url=url_for("frc_api.district_rankings", district_key=district_key), method="GET", target="py3-tasks-io", queue_name="default", ) if ( "X-Appengine-Taskname" not in request.headers ): # Only write out if not in taskqueue return make_response(f"Enqueued for: {district_keys}") return make_response("") @blueprint.route("/tasks/math/do/district_rankings_calc/<district_key>") def district_rankings_calc(district_key: DistrictKey) -> Response: """ Calculates district rankings for a district year """ district = District.get_by_id(district_key) if not district: return make_response(f"District {district_key} not found", 404) events_future: TypedFuture[List[Event]] = DistrictEventsQuery( district_key ).fetch_async() teams_future: TypedFuture[List[Team]] = DistrictTeamsQuery( district_key ).fetch_async() events = events_future.get_result() for event in events: event.prep_details() events = EventHelper.sorted_events(events) team_totals = DistrictHelper.calculate_rankings(events, teams_future, district.year) rankings: List[DistrictRanking] = [] current_rank = 1 for key, points in team_totals.items(): point_detail = DistrictRanking( rank=current_rank, team_key=key, event_points=[], rookie_bonus=points.get("rookie_bonus", 0), point_total=points["point_total"], ) for event, event_points in points["event_points"]: event_points["event_key"] = event.key_name event_points["district_cmp"] = ( event.event_type_enum == EventType.DISTRICT_CMP or event.event_type_enum == EventType.DISTRICT_CMP_DIVISION ) point_detail["event_points"].append(event_points) rankings.append(point_detail) current_rank += 1 if rankings: district.rankings = rankings DistrictManipulator.createOrUpdate(district) if ( "X-Appengine-Taskname" not in request.headers ): # Only write out if not in taskqueue return make_response(f"Finished calculating rankings for: {district_key}") return make_response("") @blueprint.route("/tasks/math/enqueue/event_matchstats/now", defaults={"year": None}) @blueprint.route("/tasks/math/enqueue/event_matchstats/<int:year>") def enqueue_event_matchstats(year: Optional[Year]) -> str: """ Enqueues Matchstats calculation """ if year is None: events = EventHelper.events_within_a_day() else: events: List[Event] = EventListQuery(year=year).fetch() events = EventHelper.sorted_events(events) for event in events: taskqueue.add( url="/tasks/math/do/event_matchstats/" + event.key_name, method="GET", target="py3-tasks-io", queue_name="run-in-order", # Because predictions depend on past events ) template_values = { "event_count": len(events), "year": year, } return render_template("math/event_matchstats_enqueue.html", **template_values) @blueprint.route("/tasks/math/do/event_matchstats/<event_key>") def event_matchstats_calc(event_key: EventKey) -> Response: """ Calculates match stats (OPR/DPR/CCWM) for an event Calculates predictions for an event Calculates insights for an event """ event = Event.get_by_id(event_key) if not event: abort(404) matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches, event.year) if not any([v != {} for v in matchstats_dict.values()]): logging.warning("Matchstat calculation for {} failed!".format(event_key)) matchstats_dict = None predictions_dict = None if ( event.year in {2016, 2017, 2018, 2019, 2020, 2022} and event.event_type_enum in SEASON_EVENT_TYPES ) or event.enable_predictions: sorted_matches = MatchHelper.play_order_sorted_matches(event.matches) ( match_predictions, match_prediction_stats, stat_mean_vars, ) = PredictionHelper.get_match_predictions(sorted_matches) ( ranking_predictions, ranking_prediction_stats, ) = PredictionHelper.get_ranking_predictions(sorted_matches, match_predictions) predictions_dict = { "match_predictions": match_predictions, "match_prediction_stats": match_prediction_stats, "stat_mean_vars": stat_mean_vars, "ranking_predictions": ranking_predictions, "ranking_prediction_stats": ranking_prediction_stats, } event_insights = EventInsightsHelper.calculate_event_insights( event.matches, event.year ) event_details = EventDetails( id=event_key, matchstats=matchstats_dict, predictions=predictions_dict, insights=event_insights, ) EventDetailsManipulator.createOrUpdate(event_details) template_values = { "matchstats_dict": matchstats_dict, } if ( "X-Appengine-Taskname" not in request.headers ): # Only write out if not in taskqueue return make_response( render_template("math/event_matchstats_do.html", **template_values) ) return make_response("")
#! /usr/bin/env python2 """ 2016-10-21: Modified version of balbuzard application for AL, original code found here: https://github.com/decalage2/balbuzard """ """ balbuzard - v0.20 2014-06-29 Philippe Lagadec Balbuzard is a tool to quickly extract patterns from suspicious files for malware analysis (IP addresses, domain names, known file headers and strings, etc). For more info and updates: http://www.decalage.info/balbuzard """ # LICENSE: # # balbuzard is copyright (c) 2007-2014, Philippe Lagadec (http://www.decalage.info) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __version__ = '0.20' #------------------------------------------------------------------------------ # CHANGELOG: # 2007-07-11 v0.01 PL: - 1st version # 2007-07-30 v0.02 PL: - added list of patterns # 2007-07-31 v0.03 PL: - added patterns # - added hexadecimal dump # 2007-08-09 v0.04 PL: - improved some regexs, added Petite detection # 2008-06-06 v0.05 PL: - escape non-printable characters with '\xNN' when # displaying matches # - optional custom pattern list in reScan_custom.py # - optional call to magic.py to guess filetype # 2011-05-06 v0.06 PL: - added bruteforce functions # 2013-02-24 v0.07 PL: - renamed rescan to balbuzard # - changed license from CeCILL v2 to BSD # - added patterns for URL, e-mail, Flash # - new Pattern class to add patterns # - pattern can now be a regex or a string, with weigth # - moved bruteforce functions to balbucrack # 2013-03-18 v0.08 PL: - a few more/improved patterns # - optionparser with option -s for short display # 2013-03-21 v0.09 PL: - open file from password-protected zip (inspired from # Didier Steven's pdfid, thanks Didier! :-) # - improved plugin system # 2013-03-26 v0.10 PL: - improved Pattern and Pattern_re classes # 2013-07-31 v0.11 PL: - added support for Yara plugins # 2013-08-28 v0.12 PL: - plugins can now be in subfolders # - improved OLE2 pattern # 2013-12-03 v0.13 PL: - moved patterns to separate file patterns.py # - fixed issue when balbuzard launched from another dir # - added CSV output # 2013-12-04 v0.14 PL: - can now scan several files from command line args # - now short display is default, -v for hex view # 2013-12-09 v0.15 PL: - Pattern_re: added filter function to ignore false # positives # 2014-01-14 v0.16 PL: - added riglob, ziglob # - new option -r to find files recursively in subdirs # - new option -f to find files within zips with wildcards # 2014-01-23 v0.17 PL: - Pattern: added partial support for filter function # 2014-02-24 v0.18 PL: - fixed bug with main_dir when balbuzard is imported # 2014-03-21 v0.19 PL: - fixed bug when Yara-python is not installed # 2014-06-29 v0.20 PL: - simplified bbcrack transforms, added Yara signatures #------------------------------------------------------------------------------ # TODO: # + add yara plugins support to Balbuzard.count and scan_profiling # + merge Balbuzard.scan_hexdump and short # + option to choose which plugins to load: all (default), none, python or yara # only # + option to use the Yara-python engine for searching (translating balbuzard # patterns to yara at runtime) # - Yara plugins: keep track of the filename containing each set of Yara rules # - option to support Unicode strings? (need to check 2 alignments and 2 byte # orders, or simply insert \x00 between all chars, e.g. 'T\x00E\x00S\x00T') # + improve patterns to avoid some false positives: maybe use pefile or magic.py ? # - HTML report with color highlighting # - GUI ? # - optional use of other magic libs (TrIDscan, pymagic, python-magic, etc: see PyPI) # - provide samples # - RTF hex object decoder? # - option to decode stream before searching: unicode, hex, base64, etc # - options for XML outputs # - export to OpenIOC? # ? zip file: open all files instead of only the 1st one, or add an option to # specify the filename(s) to open within the zip, with wildcards? # ISSUES: # - BUG: it seems that re ignores null bytes in patterns, despite what the doc says? # - BUG: the URL pattern is not fully correct, need to find a better one # - BUG: the e-mail pattern catches a lot of false positives. #--- IMPORTS ------------------------------------------------------------------ import sys, re, os, os.path, optparse, glob, zipfile, time, string, fnmatch, imp #import csv # try to import yara-python: # try: # import yara # YARA = True # except: # YARA = False #--- CLASSES ------------------------------------------------------------------ class Pattern (object): """ a Pattern object is a string or a list of strings to be searched in data. Attributes: - name: str, description of the pattern for display - pat: str or list/tuple of strings to be searched - nocase: bool, if True, search is case-insensitive - single: bool, if True search will stop at the first occurence - weight: int, weight used by balbucrack - filt: function to filter out false positives, should be a function with arguments (value, index, pattern), returning True when acceptable or False when it is a false positive. """ def __init__(self, name, pat=None, nocase=False, single=False, weight=1, filt=None): self.name = name # self.pat should always be a list of strings: if isinstance(pat, str): self.pat = [pat] else: # else we assume it's a sequence: self.pat = pat self.nocase = nocase if nocase: # transform pat to lowercase self.pat_lower = map(string.lower, self.pat) self.single = single self.weight = weight # for profiling: self.total_time = 0 self.filter = filt def find_all (self, data, data_lower=None): """ find all occurences of pattern in data. data_lower should be set to data.lower(), if there are case-insensitive patterns (it's better to do it only once) return a list of tuples (index, string) """ found = [] if self.nocase: d = data_lower pat = self.pat_lower else: d = data pat = self.pat for s in pat: l = len(s) for i in str_find_all(d, s): # the matched string is not always s, case can differ: match = data[i:i+len(s)] valid = True if self.filter is not None: valid = self.filter(value=match, index=i, pattern=self) if valid: found.append((i, match)) # debug message: else: print 'Filtered out %s: %s' % (self.name, repr(match)) return found def count (self, data, data_lower=None): """ count all occurences of pattern in data. Except for those with single=True, only the first occurence of any string is counted. data_lower should be set to data.lower(), if there are case-insensitive patterns (it's better to do it only once) return an integer """ #TODO: add support for filter? (will be much slower...) count = 0 if self.nocase: d = data_lower pat = self.pat_lower else: d = data pat = self.pat if not self.single: for s in pat: count += d.count(s) return count else: for s in pat: if s in d: return 1 return 0 class Pattern_re (Pattern): """ a Pattern_re object is a regular expression to be searched in data. Attributes: - name: str, description of the pattern for display - pat: str, regular expression to be searched - trigger: str or list/tuple of strings to be searched before pat - nocase: bool, if True, search is case-insensitive - single: bool, if True search will stop at the first occurence - weight: int, weight used by balbucrack - filt: function to filter out false positives, should be a function with arguments (value, index, pattern), returning True when acceptable or False when it is a false positive. """ def __init__(self, name, pat=None, trigger=None, nocase=False, single=False, weight=1, filt=None): # first call the Pattern constructor: Pattern.__init__(self, name, pat, nocase, single, weight) # compile regex flags = 0 if nocase: flags = re.IGNORECASE self.pat = re.compile(pat, flags) self.trigger = trigger if trigger is not None: # create second pattern for trigger, for single search: self.trigger_pat = Pattern(name, pat=trigger, nocase=nocase, single=True) self.filter = filt #print 'pattern %s: filter=%s' % (self.name, self.filter) def find_all (self, data, data_lower=None): """ find all occurences of pattern in data. data_lower should be set to data.lower(), if there are case-insensitive patterns (it's better to do it only once) return a list of tuples (index, string) """ found = [] if self.trigger is not None: # when trigger is specified, search trigger first and stop if not # found: if self.trigger_pat.count(data, data_lower) == 0: return found for m in self.pat.finditer(data): valid = True if self.filter is not None: valid = self.filter(value=m.group(), index=m.start(), pattern=self) if valid: found.append((m.start(), m.group())) # debug message: #else: print 'Filtered out %s: %s' % (self.name, repr(m.group())) return found def count (self, data, data_lower=None): """ count all occurences of pattern in data. data_lower should be set to data.lower(), if there are case-insensitive patterns (it's better to do it only once) return an integer """ if self.trigger is not None: # when trigger is specified, search trigger first and stop if not # found: if self.trigger_pat.count(data, data_lower) == 0: return 0 # when no filter is defined, quickest way to count: if self.filter is None: return len(self.pat.findall(data)) # otherwise, need to call filter for each match: c = 0 for m in self.pat.finditer(data): valid = self.filter(value=m.group(), index=m.start(), pattern=self) if valid: c += 1 return c #------------------------------------------------------------------------------ class Balbuzard (object): """ class to scan a string of data, searching for a set of patterns (strings and regular expressions) """ def __init__(self, patterns=None, yara_rules=None): self.patterns = patterns if patterns == None: self.patterns = [] # self.yara_rules = yara_rules ## def add_pattern(self, name, regex=None, string=None, weight=1): ## self.patterns.append(Pattern(name, regex, string, weight)) def list_patterns(self): """ Adding function for FrankenStrings to get regex patterns when needed """ return self.patterns def scan (self, data): """ Scans data for all patterns. This is an iterator: for each pattern found, yields the Pattern object and a list of matches as tuples (index in data, matched string). """ # prep lowercase version of data for case-insensitive patterns data_lower = data.lower() for pattern in self.patterns: matches = pattern.find_all(data, data_lower) if len(matches)>0: yield pattern, matches # if YARA and self.yara_rules is not None: # for rules in self.yara_rules: # yara_matches = rules.match(data=data) # for match in yara_matches: # # create a fake pattern object, with a single match: # pattern = Pattern(match.rule) # matches = [] # for s in match.strings: # offset, id, d = s # matches.append((offset, d)) # yield pattern, matches def scan_profiling (self, data): """ Scans data for all patterns. This is an iterator: for each pattern found, yields the Pattern object and a list of matches as tuples (index in data, matched string). Version with profiling, to check which patterns take time. """ start = time.clock() # prep lowercase version of data for case-insensitive patterns data_lower = data.lower() for pattern in self.patterns: start_pattern = time.clock() matches = pattern.find_all(data, data_lower) pattern.time = time.clock()-start_pattern pattern.total_time += pattern.time if len(matches)>0: yield pattern, matches self.time = time.clock()-start def count (self, data): """ Scans data for all patterns. This is an iterator: for each pattern found, yields the Pattern object and the count as int. """ # prep lowercase version of data for case-insensitive patterns data_lower = data.lower() for pattern in self.patterns: count = pattern.count(data, data_lower) if count: yield pattern, count def scan_display (self, data, filename, hexdump=False, csv_writer=None): """ Scans data for all patterns, displaying an hexadecimal dump for each match on the console (if hexdump=True), or one line for each match (if hexdump=False). """ for pattern, matches in self.scan(data): if hexdump: print "-"*79 print "%s:" % pattern.name for index, match in matches: # limit matched string display to 50 chars: m = repr(match) if len(m)> 50: m = m[:24]+'...'+m[-23:] if hexdump: print "at %08X: %s" % (index, m) # 5 lines of hexadecimal dump around the pattern: 2 lines = 32 bytes start = max(index-32, 0) & 0xFFFFFFF0 index_end = index + len(match) end = min(index_end+32+15, len(data)) & 0xFFFFFFF0 length = end-start #print start, end, length print hexdump3(data[start:end], length=16, startindex=start) print "" else: print "at %08X: %s - %s" % (index, pattern.name, m) if csv_writer is not None: #['Filename', 'Index', 'Pattern name', 'Found string', 'Length'] csv_writer.writerow([filename, '0x%08X' % index, pattern.name, m, len(match)]) # blank line between each file: print '' ## if item == "EXE MZ headers" and MAGIC: ## # Check if it's really a EXE header ## print "Magic: %s\n" % magic.whatis(data[m.start():]) #--- GLOBALS ------------------------------------------------------------------ patterns = [] #--- FUNCTIONS ---------------------------------------------------------------- ##def add_pattern(name, regex=None, string=None, weight=1): ## patterns.append(Pattern(name, regex, string, weight)) # HEXDUMP from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/142812 FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)]) ##def hexdump(src, length=8): ## N=0; result='' ## while src: ## s,src = src[:length],src[length:] ## hexa = ' '.join(["%02X"%ord(x) for x in s]) ## s = s.translate(FILTER) ## result += "%04X %-*s %s\n" % (N, length*3, hexa, s) ## N+=length ## return result ## ##def hexdump2(src, length=8): ## result=[] ## for i in xrange(0, len(src), length): ## s = src[i:i+length] ## hexa = ' '.join(["%02X"%ord(x) for x in s]) ## printable = s.translate(FILTER) ## result.append("%04X %-*s %s\n" % (i, length*3, hexa, printable)) ## return ''.join(result) # my improved hexdump, to add a start index: def hexdump3(src, length=8, startindex=0): """ Returns a hexadecimal dump of a binary string. length: number of bytes per row. startindex: index of 1st byte. """ result=[] for i in xrange(0, len(src), length): s = src[i:i+length] hexa = ' '.join(["%02X"%ord(x) for x in s]) printable = s.translate(FILTER) result.append("%04X %-*s %s\n" % (i+startindex, length*3, hexa, printable)) return ''.join(result) def str_find_all(a_str, sub): start = 0 while True: start = a_str.find(sub, start) if start == -1: return yield start start += len(sub) # recursive glob function to find plugin files in any subfolder: # inspired by http://stackoverflow.com/questions/14798220/how-can-i-search-sub-folders-using-glob-glob-module-in-python def rglob (path, pattern='*.*'): """ Recursive glob: similar to glob.glob, but finds files recursively in all subfolders of path. path: root directory where to search files pattern: pattern for filenames, using wildcards, e.g. *.txt """ #TODO: more compatible API with glob: use single param, split path from pattern return [os.path.join(dirpath, f) for dirpath, dirnames, files in os.walk(path) for f in fnmatch.filter(files, pattern)] def riglob (pathname): """ Recursive iglob: similar to glob.iglob, but finds files recursively in all subfolders of path. pathname: root directory where to search files followed by pattern for filenames, using wildcards, e.g. *.txt """ path, filespec = os.path.split(pathname) for dirpath, dirnames, files in os.walk(path): for f in fnmatch.filter(files, filespec): yield os.path.join(dirpath, f) def ziglob (zipfileobj, pathname): """ iglob in a zip: similar to glob.iglob, but finds files within a zip archive. - zipfileobj: zipfile.ZipFile object - pathname: root directory where to search files followed by pattern for filenames, using wildcards, e.g. *.txt """ files = zipfileobj.namelist() for f in files: print f for f in fnmatch.filter(files, pathname): yield f def iter_files(files, recursive=False, zip_password=None, zip_fname='*'): """ Open each file provided as argument: - files is a list of arguments - if zip_password is None, each file is opened and read as-is. Wilcards are supported. - if not, then each file is opened as a zip archive with the provided password - then files matching zip_fname are opened from the zip archive Iterator: yields (filename, data) for each file """ # choose recursive or non-recursive iglob: if recursive: iglob = riglob else: iglob = glob.iglob for filespec in files: for filename in iglob(filespec): if zip_password is not None: # Each file is a zip archive: print 'Opening zip archive %s with provided password' % filename z = zipfile.ZipFile(filename, 'r') print 'Looking for file(s) matching "%s"' % zip_fname for filename in ziglob(z, zip_fname): print 'Opening file in zip archive:', filename data = z.read(filename, zip_password) yield filename, data else: # normal file print 'Opening file', filename data = open(filename, 'rb').read() yield filename, data def relpath(path, start='.'): """ convert a path to a relative path, using os.path.relpath on Python 2.6+ On Python 2.5 or older, the path is not changed, but no exception is raised. (this function is just for backward compatibility) """ # with python 2.6+, make it a relative path: try: return os.path.relpath(path, start) except: return path #=== INITALIZATION ============================================================ # get main directory where this script is located: main_dir = os.path.dirname(__file__) #print 'main dir:', main_dir #plugins_dir = os.path.join(main_dir, 'plugins') #print 'plugins dir:', plugins_dir # load patterns patfile = os.path.join(main_dir, 'patterns.py') # save __doc__, else it seems to be overwritten: d = __doc__ #print 'patfile:', patfile execfile(patfile) __doc__ = d del d #=== MAIN ===================================================================== if __name__ == '__main__': usage = 'usage: %prog [options] <filename> [filename2 ...]' parser = optparse.OptionParser(usage=usage) ## parser.add_option('-o', '--outfile', dest='outfile', ## help='output file') parser.add_option('-c', '--csv', dest='csv', help='export results to a CSV file') parser.add_option("-v", action="store_true", dest="verbose", help='verbose display, with hex view.') parser.add_option("-r", action="store_true", dest="recursive", help='find files recursively in subdirectories.') parser.add_option("-z", "--zip", dest='zip_password', type='str', default=None, help='if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)') parser.add_option("-f", "--zipfname", dest='zip_fname', type='str', default='*', help='if the file is a zip archive, file(s) to be opened within the zip. Wildcards * and ? are supported. (default:*)') (options, args) = parser.parse_args() # Print help if no argurments are passed if len(args) == 0: print __doc__ parser.print_help() sys.exit() # load plugins #for f in rglob(plugins_dir, 'bbz*.py'): # glob.iglob('plugins/bbz*.py'): # print 'Loading plugin from', relpath(f, plugins_dir) # execfile(f) # load yara plugins # if YARA: # yara_rules = [] # for f in rglob(plugins_dir, '*.yara'): #glob.iglob('plugins/*.yara'): # or bbz*.yara? # print 'Loading yara plugin from', relpath(f, plugins_dir) # yara_rules.append(yara.compile(f)) # else: # yara_rules = None # open CSV file # if options.csv: # print 'Writing output to CSV file: %s' % options.csv # csvfile = open(options.csv, 'wb') # csv_writer = csv.writer(csvfile) # csv_writer.writerow(['Filename', 'Index', 'Pattern name', # 'Found string', 'Length']) # else: # csv_writer = None # # # close CSV file # if options.csv: # csvfile.close() # This was coded while listening to The National "Boxer".
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from thrift.Thrift import TException, TType, TFrozenDict from thrift.transport.TTransport import TTransportException from ..compat import binary_to_str, str_to_binary import six import sys from itertools import islice from six.moves import zip class TProtocolException(TException): """Custom Protocol Exception class""" UNKNOWN = 0 INVALID_DATA = 1 NEGATIVE_SIZE = 2 SIZE_LIMIT = 3 BAD_VERSION = 4 NOT_IMPLEMENTED = 5 DEPTH_LIMIT = 6 def __init__(self, type=UNKNOWN, message=None): TException.__init__(self, message) self.type = type class TProtocolBase(object): """Base class for Thrift protocol driver.""" def __init__(self, trans): self.trans = trans @staticmethod def _check_length(limit, length): if length < 0: raise TTransportException(TTransportException.NEGATIVE_SIZE, 'Negative length: %d' % length) if limit is not None and length > limit: raise TTransportException(TTransportException.SIZE_LIMIT, 'Length exceeded max allowed: %d' % limit) def writeMessageBegin(self, name, ttype, seqid): pass def writeMessageEnd(self): pass def writeStructBegin(self, name): pass def writeStructEnd(self): pass def writeFieldBegin(self, name, ttype, fid): pass def writeFieldEnd(self): pass def writeFieldStop(self): pass def writeMapBegin(self, ktype, vtype, size): pass def writeMapEnd(self): pass def writeListBegin(self, etype, size): pass def writeListEnd(self): pass def writeSetBegin(self, etype, size): pass def writeSetEnd(self): pass def writeBool(self, bool_val): pass def writeByte(self, byte): pass def writeI16(self, i16): pass def writeI32(self, i32): pass def writeI64(self, i64): pass def writeDouble(self, dub): pass def writeString(self, str_val): self.writeBinary(str_to_binary(str_val)) def writeBinary(self, str_val): pass def writeUtf8(self, str_val): self.writeString(str_val.encode('utf8')) def readMessageBegin(self): pass def readMessageEnd(self): pass def readStructBegin(self): pass def readStructEnd(self): pass def readFieldBegin(self): pass def readFieldEnd(self): pass def readMapBegin(self): pass def readMapEnd(self): pass def readListBegin(self): pass def readListEnd(self): pass def readSetBegin(self): pass def readSetEnd(self): pass def readBool(self): pass def readByte(self): pass def readI16(self): pass def readI32(self): pass def readI64(self): pass def readDouble(self): pass def readString(self): return binary_to_str(self.readBinary()) def readBinary(self): pass def readUtf8(self): return self.readString().decode('utf8') def skip(self, ttype): if ttype == TType.STOP: return elif ttype == TType.BOOL: self.readBool() elif ttype == TType.BYTE: self.readByte() elif ttype == TType.I16: self.readI16() elif ttype == TType.I32: self.readI32() elif ttype == TType.I64: self.readI64() elif ttype == TType.DOUBLE: self.readDouble() elif ttype == TType.STRING: self.readString() elif ttype == TType.STRUCT: name = self.readStructBegin() while True: (name, ttype, id) = self.readFieldBegin() if ttype == TType.STOP: break self.skip(ttype) self.readFieldEnd() self.readStructEnd() elif ttype == TType.MAP: (ktype, vtype, size) = self.readMapBegin() for i in range(size): self.skip(ktype) self.skip(vtype) self.readMapEnd() elif ttype == TType.SET: (etype, size) = self.readSetBegin() for i in range(size): self.skip(etype) self.readSetEnd() elif ttype == TType.LIST: (etype, size) = self.readListBegin() for i in range(size): self.skip(etype) self.readListEnd() # tuple of: ( 'reader method' name, is_container bool, 'writer_method' name ) _TTYPE_HANDLERS = ( (None, None, False), # 0 TType.STOP (None, None, False), # 1 TType.VOID # TODO: handle void? ('readBool', 'writeBool', False), # 2 TType.BOOL ('readByte', 'writeByte', False), # 3 TType.BYTE and I08 ('readDouble', 'writeDouble', False), # 4 TType.DOUBLE (None, None, False), # 5 undefined ('readI16', 'writeI16', False), # 6 TType.I16 (None, None, False), # 7 undefined ('readI32', 'writeI32', False), # 8 TType.I32 (None, None, False), # 9 undefined ('readI64', 'writeI64', False), # 10 TType.I64 ('readString', 'writeString', False), # 11 TType.STRING and UTF7 ('readContainerStruct', 'writeContainerStruct', True), # 12 *.STRUCT ('readContainerMap', 'writeContainerMap', True), # 13 TType.MAP ('readContainerSet', 'writeContainerSet', True), # 14 TType.SET ('readContainerList', 'writeContainerList', True), # 15 TType.LIST (None, None, False), # 16 TType.UTF8 # TODO: handle utf8 types? (None, None, False) # 17 TType.UTF16 # TODO: handle utf16 types? ) def _ttype_handlers(self, ttype, spec): if spec == 'BINARY': if ttype != TType.STRING: raise TProtocolException(type=TProtocolException.INVALID_DATA, message='Invalid binary field type %d' % ttype) return ('readBinary', 'writeBinary', False) if sys.version_info[0] == 2 and spec == 'UTF8': if ttype != TType.STRING: raise TProtocolException(type=TProtocolException.INVALID_DATA, message='Invalid string field type %d' % ttype) return ('readUtf8', 'writeUtf8', False) return self._TTYPE_HANDLERS[ttype] if ttype < len(self._TTYPE_HANDLERS) else (None, None, False) def _read_by_ttype(self, ttype, spec, espec): reader_name, _, is_container = self._ttype_handlers(ttype, spec) if reader_name is None: raise TProtocolException(type=TProtocolException.INVALID_DATA, message='Invalid type %d' % (ttype)) reader_func = getattr(self, reader_name) read = (lambda: reader_func(espec)) if is_container else reader_func while True: yield read() def readFieldByTType(self, ttype, spec): return self._read_by_ttype(ttype, spec, spec).next() def readContainerList(self, spec): ttype, tspec, is_immutable = spec (list_type, list_len) = self.readListBegin() # TODO: compare types we just decoded with thrift_spec elems = islice(self._read_by_ttype(ttype, spec, tspec), list_len) results = (tuple if is_immutable else list)(elems) self.readListEnd() return results def readContainerSet(self, spec): ttype, tspec, is_immutable = spec (set_type, set_len) = self.readSetBegin() # TODO: compare types we just decoded with thrift_spec elems = islice(self._read_by_ttype(ttype, spec, tspec), set_len) results = (frozenset if is_immutable else set)(elems) self.readSetEnd() return results def readContainerStruct(self, spec): (obj_class, obj_spec) = spec obj = obj_class() obj.read(self) return obj def readContainerMap(self, spec): ktype, kspec, vtype, vspec, is_immutable = spec (map_ktype, map_vtype, map_len) = self.readMapBegin() # TODO: compare types we just decoded with thrift_spec and # abort/skip if types disagree keys = self._read_by_ttype(ktype, spec, kspec) vals = self._read_by_ttype(vtype, spec, vspec) keyvals = islice(zip(keys, vals), map_len) results = (TFrozenDict if is_immutable else dict)(keyvals) self.readMapEnd() return results def readStruct(self, obj, thrift_spec, is_immutable=False): if is_immutable: fields = {} self.readStructBegin() while True: (fname, ftype, fid) = self.readFieldBegin() if ftype == TType.STOP: break try: field = thrift_spec[fid] except IndexError: self.skip(ftype) else: if field is not None and ftype == field[1]: fname = field[2] fspec = field[3] val = self.readFieldByTType(ftype, fspec) if is_immutable: fields[fname] = val else: setattr(obj, fname, val) else: self.skip(ftype) self.readFieldEnd() self.readStructEnd() if is_immutable: return obj(**fields) def writeContainerStruct(self, val, spec): val.write(self) def writeContainerList(self, val, spec): ttype, tspec, _ = spec self.writeListBegin(ttype, len(val)) for _ in self._write_by_ttype(ttype, val, spec, tspec): pass self.writeListEnd() def writeContainerSet(self, val, spec): ttype, tspec, _ = spec self.writeSetBegin(ttype, len(val)) for _ in self._write_by_ttype(ttype, val, spec, tspec): pass self.writeSetEnd() def writeContainerMap(self, val, spec): ktype, kspec, vtype, vspec, _ = spec self.writeMapBegin(ktype, vtype, len(val)) for _ in zip(self._write_by_ttype(ktype, six.iterkeys(val), spec, kspec), self._write_by_ttype(vtype, six.itervalues(val), spec, vspec)): pass self.writeMapEnd() def writeStruct(self, obj, thrift_spec): self.writeStructBegin(obj.__class__.__name__) for field in thrift_spec: if field is None: continue fname = field[2] val = getattr(obj, fname) if val is None: # skip writing out unset fields continue fid = field[0] ftype = field[1] fspec = field[3] self.writeFieldBegin(fname, ftype, fid) self.writeFieldByTType(ftype, val, fspec) self.writeFieldEnd() self.writeFieldStop() self.writeStructEnd() def _write_by_ttype(self, ttype, vals, spec, espec): _, writer_name, is_container = self._ttype_handlers(ttype, spec) writer_func = getattr(self, writer_name) write = (lambda v: writer_func(v, espec)) if is_container else writer_func for v in vals: yield write(v) def writeFieldByTType(self, ttype, val, spec): self._write_by_ttype(ttype, [val], spec, spec).next() def checkIntegerLimits(i, bits): if bits == 8 and (i < -128 or i > 127): raise TProtocolException(TProtocolException.INVALID_DATA, "i8 requires -128 <= number <= 127") elif bits == 16 and (i < -32768 or i > 32767): raise TProtocolException(TProtocolException.INVALID_DATA, "i16 requires -32768 <= number <= 32767") elif bits == 32 and (i < -2147483648 or i > 2147483647): raise TProtocolException(TProtocolException.INVALID_DATA, "i32 requires -2147483648 <= number <= 2147483647") elif bits == 64 and (i < -9223372036854775808 or i > 9223372036854775807): raise TProtocolException(TProtocolException.INVALID_DATA, "i64 requires -9223372036854775808 <= number <= 9223372036854775807") class TProtocolFactory(object): def getProtocol(self, trans): pass
import datetime from werkzeug.exceptions import BadRequest from rdr_service.clock import FakeClock from rdr_service.dao.code_dao import CodeBookDao, CodeDao, CodeHistoryDao from rdr_service.model.code import Code, CodeBook, CodeHistory, CodeType from tests.helpers.unittest_base import BaseTestCase, PDRGeneratorTestMixin TIME = datetime.datetime(2016, 1, 1, 10, 0) TIME_2 = datetime.datetime(2016, 1, 2, 10, 0) TIME_3 = datetime.datetime(2016, 1, 3, 10, 0) TIME_4 = datetime.datetime(2016, 1, 4, 10, 0) class CodeDaoTest(BaseTestCase, PDRGeneratorTestMixin): def setUp(self): super().setUp() self.code_book_dao = CodeBookDao() self.code_dao = CodeDao() self.code_history_dao = CodeHistoryDao() def test_get_before_insert(self): self.assertIsNone(self.code_book_dao.get(1)) self.assertIsNone(self.code_dao.get(1)) self.assertIsNone(self.code_history_dao.get(1)) def test_insert_without_codebook_or_parent(self): code = Code(system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True) with FakeClock(TIME): self.code_dao.insert(code) expected_code = Code( codeId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, created=TIME, ) self.assertEqual(expected_code.asdict(), self.code_dao.get(1).asdict()) expected_code_history = CodeHistory( codeHistoryId=1, codeId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, created=TIME, ) self.assertEqual(expected_code_history.asdict(), self.code_history_dao.get(1).asdict()) def test_insert_with_codebook_and_parent(self): code_book_1 = CodeBook(name="pmi", version="v1", system="a") with FakeClock(TIME): self.code_book_dao.insert(code_book_1) expected_code_book = CodeBook(codeBookId=1, latest=True, created=TIME, name="pmi", version="v1", system="a") self.assertEqual(expected_code_book.asdict(), self.code_book_dao.get(1).asdict()) code_1 = Code( codeBookId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True ) with FakeClock(TIME_2): self.code_dao.insert(code_1) expected_code = Code( codeBookId=1, codeId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, created=TIME_2, ) self.assertEqual(expected_code.asdict(), self.code_dao.get(1).asdict()) expected_code_history = CodeHistory( codeBookId=1, codeHistoryId=1, codeId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, created=TIME_2, ) self.assertEqual(expected_code_history.asdict(), self.code_history_dao.get(1).asdict()) code_2 = Code( codeBookId=1, system="x", value="y", display="z", topic="q", codeType=CodeType.QUESTION, mapped=False, parentId=1, ) with FakeClock(TIME_3): self.code_dao.insert(code_2) expected_code_2 = Code( codeBookId=1, codeId=2, system="x", value="y", display="z", topic="q", codeType=CodeType.QUESTION, mapped=False, created=TIME_3, parentId=1, ) self.assertEqual(expected_code_2.asdict(), self.code_dao.get(2).asdict()) # Test code resource generators: bq_code_data = self.make_bq_code(expected_code.codeId) self.assertEqual(bq_code_data.get('code_id'), expected_code.codeId) self.assertEqual(bq_code_data.get('system'), expected_code.system) self.assertEqual(bq_code_data.get('value'), expected_code.value) self.assertEqual(bq_code_data.get('display'), expected_code.display) self.assertEqual(bq_code_data.get('topic'), expected_code.topic) self.assertEqual(bq_code_data.get('parent_id'), expected_code.parentId) self.assertEqual(bq_code_data.get('code_type'), expected_code.codeType.name) self.assertEqual(bq_code_data.get('code_type_id'), expected_code.codeType.number) code_resource_data = self.make_code_resource(expected_code_2.codeId) self.assertEqual(code_resource_data.get('code_id'), expected_code_2.codeId) self.assertEqual(code_resource_data.get('system'), expected_code_2.system) self.assertEqual(code_resource_data.get('value'), expected_code_2.value) self.assertEqual(code_resource_data.get('topic'), expected_code_2.topic) self.assertEqual(code_resource_data.get('parent_id'), expected_code_2.parentId) # TODO: Confirm methodolody for comparing Enum-derived fields since test code data uses messages.Enum class? self.assertEqual(code_resource_data.get('code_type'), expected_code_2.codeType.name) self.assertEqual(code_resource_data.get('code_type_id'), expected_code_2.codeType.number) def test_insert_second_codebook_same_system(self): code_book_1 = CodeBook(name="pmi", version="v1", system="a") with FakeClock(TIME): self.code_book_dao.insert(code_book_1) code_book_2 = CodeBook(name="pmi", version="v2", system="a") with FakeClock(TIME_2): self.code_book_dao.insert(code_book_2) expected_code_book = CodeBook(codeBookId=1, latest=False, created=TIME, name="pmi", version="v1", system="a") self.assertEqual(expected_code_book.asdict(), self.code_book_dao.get(1).asdict()) expected_code_book_2 = CodeBook( codeBookId=2, latest=True, created=TIME_2, name="pmi", version="v2", system="a" ) self.assertEqual(expected_code_book_2.asdict(), self.code_book_dao.get(2).asdict()) def test_insert_second_codebook_different_system(self): code_book_1 = CodeBook(name="pmi", version="v1", system="a") with FakeClock(TIME): self.code_book_dao.insert(code_book_1) code_book_2 = CodeBook(name="pmi", version="v2", system="b") with FakeClock(TIME_2): self.code_book_dao.insert(code_book_2) expected_code_book = CodeBook(codeBookId=1, latest=True, created=TIME, name="pmi", version="v1", system="a") self.assertEqual(expected_code_book.asdict(), self.code_book_dao.get(1).asdict()) expected_code_book_2 = CodeBook( codeBookId=2, latest=True, created=TIME_2, name="pmi", version="v2", system="b" ) self.assertEqual(expected_code_book_2.asdict(), self.code_book_dao.get(2).asdict()) def test_insert_second_codebook_same_system_same_version(self): code_book_1 = CodeBook(name="pmi", version="v1", system="a") self.code_book_dao.insert(code_book_1) code_book_2 = CodeBook(name="pmi", version="v1", system="a") with self.assertRaises(BadRequest): self.code_book_dao.insert(code_book_2) def test_update_codes_no_codebook_id(self): code_book_1 = CodeBook(name="pmi", version="v1", system="c") with FakeClock(TIME): self.code_book_dao.insert(code_book_1) code_1 = Code( codeBookId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True ) with FakeClock(TIME_2): self.code_dao.insert(code_1) new_code_1 = Code( codeId=1, system="x", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True ) with self.assertRaises(BadRequest): self.code_dao.update(new_code_1) def test_update_codes_same_codebook_id(self): code_book_1 = CodeBook(name="pmi", version="v1", system="c") with FakeClock(TIME): self.code_book_dao.insert(code_book_1) code_1 = Code( codeBookId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True ) with FakeClock(TIME_2): self.code_dao.insert(code_1) new_code_1 = Code( codeBookId=1, codeId=1, system="x", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, ) with self.assertRaises(BadRequest): self.code_dao.update(new_code_1) def test_update_codes_new_codebook_id(self): code_book_1 = CodeBook(name="pmi", version="v1", system="a") with FakeClock(TIME): self.code_book_dao.insert(code_book_1) code_1 = Code( codeBookId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True ) with FakeClock(TIME_2): self.code_dao.insert(code_1) code_book_2 = CodeBook(name="pmi", version="v2", system="a") with FakeClock(TIME_3): self.code_book_dao.insert(code_book_2) new_code_1 = Code( codeBookId=2, codeId=1, system="x", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, ) with FakeClock(TIME_4): self.code_dao.update(new_code_1) expected_code = Code( codeBookId=2, codeId=1, system="x", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, created=TIME_2, ) self.assertEqual(expected_code.asdict(), self.code_dao.get(1).asdict()) expected_code_history = CodeHistory( codeBookId=1, codeHistoryId=1, codeId=1, system="a", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, created=TIME_2, ) self.assertEqual(expected_code_history.asdict(), self.code_history_dao.get(1).asdict()) expected_code_history_2 = CodeHistory( codeHistoryId=2, codeBookId=2, codeId=1, system="x", value="b", display="c", topic="d", codeType=CodeType.MODULE, mapped=True, created=TIME_2, ) self.assertEqual(expected_code_history_2.asdict(), self.code_history_dao.get(2).asdict()) def test_import_codebook(self): answer_1 = _make_concept("t1", "Answer", "c1", "d1") answer_2 = _make_concept("t2", "Answer", "c2", "d2") answer_3 = _make_concept("t2", "Answer", "c3", "d3") question_1 = _make_concept("t1", "Question", "q1", "d4", [answer_1]) question_2 = _make_concept("t2", "Question", "q2", "d5", [answer_2, answer_3]) topic_1 = _make_concept("t1", "Topic", "t1", "d6", [question_1]) module_1 = _make_concept("mt1", "Module Name", "m1", "d7", [topic_1]) module_2 = _make_concept("mt2", "Module Name", "m2", "d8", [question_2]) system = "http://blah/foo" codebook = {"name": "pmi", "version": "v1", "url": system, "concept": [module_1, module_2]} with FakeClock(TIME): self.code_book_dao.import_codebook(codebook) expectedCodeBook = CodeBook(codeBookId=1, latest=True, created=TIME, name="pmi", version="v1", system=system) self.assertEqual(expectedCodeBook.asdict(), self.code_book_dao.get(1).asdict()) expectedModule1 = Code( codeBookId=1, codeId=1, system=system, value="m1", shortValue="m1", display="d7", topic="mt1", codeType=CodeType.MODULE, mapped=True, created=TIME, ) self.assertEqual(expectedModule1.asdict(), self.code_dao.get(1).asdict()) expectedModuleHistory1 = CodeHistory( codeHistoryId=1, codeBookId=1, codeId=1, system=system, value="m1", shortValue="m1", display="d7", topic="mt1", codeType=CodeType.MODULE, mapped=True, created=TIME, ) self.assertEqual(expectedModuleHistory1.asdict(), self.code_history_dao.get(1).asdict()) expectedTopic1 = Code( codeBookId=1, codeId=2, system=system, value="t1", shortValue="t1", display="d6", topic="t1", codeType=CodeType.TOPIC, mapped=True, created=TIME, parentId=1, ) self.assertEqual(expectedTopic1.asdict(), self.code_dao.get(2).asdict()) expectedQuestion1 = Code( codeBookId=1, codeId=3, system=system, value="q1", shortValue="q1", display="d4", topic="t1", codeType=CodeType.QUESTION, mapped=True, created=TIME, parentId=2, ) self.assertEqual(expectedQuestion1.asdict(), self.code_dao.get(3).asdict()) expectedAnswer1 = Code( codeBookId=1, codeId=4, system=system, value="c1", shortValue="c1", display="d1", topic="t1", codeType=CodeType.ANSWER, mapped=True, created=TIME, parentId=3, ) self.assertEqual(expectedAnswer1.asdict(), self.code_dao.get(4).asdict()) def test_code_map(self): """Make sure the correct code ids are loaded for the code map""" # Create some initial codes codes = [] for index in range(4): code = self.data_generator.create_database_code(value=f'test_a_{index}') codes.append(code) # Initialize the CodeDao and it's cache code_dao = CodeDao() code_dao._get_cache() # Create another code, one that won't be in the cache uncached_code = self.data_generator.create_database_code(value='uncached_b') codes.append(uncached_code) # Get the CodeDao's internal id code map metadata_map = {(code.system, code.value): 1 for code in codes} # TODO: get_internal_id_code_map only uses system and value pairs now, # so it can be refactored to only accept those id_map = code_dao.get_internal_id_code_map(metadata_map) # Make sure all the code ids are correct for code in codes: mapped_id = id_map.get(code.system, code.value) self.assertEqual(code.codeId, mapped_id, 'Mismatch found when mapping code data to ids') def test_code_mapping_is_not_case_sensitive(self): code_value = 'test_a_1' code = self.data_generator.create_database_code(value=code_value.lower()) # Initialize the CodeDao and it's cache code_dao = CodeDao() code_dao._get_cache() # Get the CodeDao's internal id code map metadata_map = { (code.system, code_value.upper()): 1 } id_map = code_dao.get_internal_id_code_map(metadata_map) # Make sure case doesn't matter when looking up the code mapped_id = id_map.get(code.system, code.value.upper()) self.assertEqual(code.codeId, mapped_id, 'Mismatch found when mapping code data to ids') mapped_id = id_map.get(code.system, code.value.lower()) self.assertEqual(code.codeId, mapped_id, 'Mismatch found when mapping code data to ids') # TODO: the way that the CodeDao's caching works means that if a different case is used # by a payload then when building the id map, the code will not be found in the cache, # but will be loaded from the database. So case differences will always cause a miss # until the caching mechanism can be refactored. def _make_concept(concept_topic, concept_type, code, display, child_concepts=None): concept = { "property": [ {"code": "concept-topic", "valueCode": concept_topic}, {"code": "concept-type", "valueCode": concept_type}, ], "code": code, "display": display, } if child_concepts: concept["concept"] = child_concepts return concept
# coding=utf-8 import os import re import sys import hashlib import logging from importlib import import_module import string from os.path import isfile from urllib.parse import urlsplit, parse_qsl, urlencode, parse_qs import cgi from tornado.httputil import url_concat PY35 = sys.version_info >= (3, 5) PY36 = sys.version_info >= (3, 6) def load_object(path): if isinstance(path, str): dot = path.rindex(".") module, name = path[:dot], path[dot + 1:] mod = import_module(module) return getattr(mod, name) return path def configure_logger(name, config): log_level = config.get('log_level').upper() log_format = config.get('log_format') log_dateformat = config.get('log_dateformat') logger = logging.getLogger(name) logger.setLevel(log_level) log_file = config.get('log_file') if log_file: handler = logging.FileHandler(log_file) else: handler = logging.StreamHandler() handler.setLevel(log_level) formatter = logging.Formatter(log_format, log_dateformat) handler.setFormatter(formatter) logger.handlers.clear() logger.addHandler(handler) return logger def configure_tornado_logger(handlers): log = logging.getLogger('tornado') if log.handlers: return log.handlers = handlers log.setLevel('WARNING') def request_fingerprint(request): sha1 = hashlib.sha1() sha1.update(to_bytes(request.method)) res = urlsplit(request.url) queries = parse_qsl(res.query) queries.sort() final_query = urlencode(queries) sha1.update(to_bytes('{}://{}{}:{}?{}'.format(res.scheme, '' if res.hostname is None else res.hostname, res.path, 80 if res.port is None else res.port, final_query))) sha1.update(request.body or b'') return sha1.hexdigest() def to_bytes(data, encoding=None): if isinstance(data, bytes): return data if isinstance(data, str): return data.encode(encoding or "utf-8") raise TypeError("Need bytes or str, got {}".format(type(data).__name__)) def render_template_file(path, **kwargs): if path.endswith(".tmpl"): with open(path, "rb") as f: raw = f.read().decode("utf-8") content = string.Template(raw).substitute(**kwargs) render_path = path[:-len(".tmpl")] with open(render_path, "wb") as f: f.write(content.encode("utf-8")) os.remove(path) _camelcase_invalid_chars = re.compile(r'[^a-zA-Z\d]') def string_camelcase(s): return _camelcase_invalid_chars.sub('', s.title()) async def iterable_to_list(gen): res = [] if gen is not None: if hasattr(gen, '__aiter__'): async for r in gen: res.append(r) else: for r in gen: res.append(r) return res def cmp(a, b): return (a > b) - (a < b) def daemonize(): if os.fork(): os._exit(0) os.setsid() if os.fork(): os._exit(0) os.umask(0o22) os.closerange(0, 3) fd_null = os.open(os.devnull, os.O_RDWR) if fd_null != 0: os.dup2(fd_null, 0) os.dup2(fd_null, 1) os.dup2(fd_null, 2) def load_config(fname): if fname is None or not isfile(fname): raise ValueError('{} is not a file'.format(fname)) code = compile(open(fname, 'rb').read(), fname, 'exec') cfg = { "__builtins__": __builtins__, "__name__": "__config__", "__file__": fname, "__doc__": None, "__package__": None } exec(code, cfg, cfg) return cfg def iter_settings(config): for key, value in config.items(): if not key.startswith('_'): yield key, value def get_encoding_from_content_type(content_type): if content_type: content_type, params = cgi.parse_header(content_type) if "charset" in params: return params["charset"] _charset_flag = re.compile(r"""<meta.*?charset=["']*(.+?)["'>]""", flags=re.I) _pragma_flag = re.compile(r"""<meta.*?content=["']*;?charset=(.+?)["'>]""", flags=re.I) _xml_flag = re.compile(r"""^<\?xml.*?encoding=["']*(.+?)["'>]""") def get_encoding_from_content(content): if isinstance(content, bytes): content = content.decode("ascii", errors="ignore") elif not isinstance(content, str): raise ValueError("content should be bytes or str") s = _charset_flag.search(content) if s: return s.group(1).strip() s = _pragma_flag.search(content) if s: return s.group(1).strip() s = _xml_flag.search(content) if s: return s.group(1).strip() def make_url(url, params=None): args = [] if isinstance(params, dict): for k, v in params.items(): if isinstance(v, (tuple, list)): for i in v: args.append((k, i)) else: args.append((k, v)) elif isinstance(params, (tuple, list)): for k, v in params: args.append((k, v)) return url_concat(url, args) def get_params_in_url(url): return parse_qs(urlsplit(url).query) def with_not_none_params(**kwargs): params = {} for k, v in kwargs.items(): if v is not None: params[k] = v return params def isiterable(obj): return hasattr(obj, "__iter__") or hasattr(obj, "__aiter__")
# Copyright 2010-2011 OpenStack Foundation # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from oslo_log import log as logging from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.views import addresses as views_addresses from nova.api.openstack.compute.views import flavors as views_flavors from nova.api.openstack.compute.views import images as views_images from nova import context as nova_context from nova import exception from nova import objects from nova.objects import base as obj_base from nova import utils LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = "servers" _progress_statuses = ( "ACTIVE", "BUILD", "REBUILD", "RESIZE", "VERIFY_RESIZE", "MIGRATING", ) _fault_statuses = ( "ERROR", "DELETED" ) # These are the lazy-loadable instance attributes required for showing # details about an instance. Add to this list as new things need to be # shown. _show_expected_attrs = ['flavor', 'info_cache', 'metadata'] def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() self._address_builder = views_addresses.ViewBuilder() self._image_builder = views_images.ViewBuilder() self._flavor_builder = views_flavors.ViewBuilder() def create(self, request, instance): """View that should be returned when an instance is created.""" return { "server": { "id": instance["uuid"], "links": self._get_links(request, instance["uuid"], self._collection_name), # NOTE(sdague): historically this was the # os-disk-config extension, but now that extensions # are gone, we merge these attributes here. "OS-DCF:diskConfig": ( 'AUTO' if instance.get('auto_disk_config') else 'MANUAL'), }, } def basic(self, request, instance): """Generic, non-detailed view of an instance.""" return { "server": { "id": instance["uuid"], "name": instance["display_name"], "links": self._get_links(request, instance["uuid"], self._collection_name), }, } def get_show_expected_attrs(self, expected_attrs=None): """Returns a list of lazy-loadable expected attributes used by show This should be used when getting the instances from the database so that the necessary attributes are pre-loaded before needing to build the show response where lazy-loading can fail if an instance was deleted. :param list expected_attrs: The list of expected attributes that will be requested in addition to what this view builder requires. This method will merge the two lists and return what should be ultimately used when getting an instance from the database. :returns: merged and sorted list of expected attributes """ if expected_attrs is None: expected_attrs = [] # NOTE(mriedem): We sort the list so we can have predictable test # results. return sorted(list(set(self._show_expected_attrs + expected_attrs))) def show(self, request, instance, extend_address=True): """Detailed view of a single instance.""" ip_v4 = instance.get('access_ip_v4') ip_v6 = instance.get('access_ip_v6') server = { "server": { "id": instance["uuid"], "name": instance["display_name"], "status": self._get_vm_status(instance), "tenant_id": instance.get("project_id") or "", "user_id": instance.get("user_id") or "", "metadata": self._get_metadata(instance), "hostId": self._get_host_id(instance) or "", "image": self._get_image(request, instance), "flavor": self._get_flavor(request, instance), "created": utils.isotime(instance["created_at"]), "updated": utils.isotime(instance["updated_at"]), "addresses": self._get_addresses(request, instance, extend_address), "accessIPv4": str(ip_v4) if ip_v4 is not None else '', "accessIPv6": str(ip_v6) if ip_v6 is not None else '', "links": self._get_links(request, instance["uuid"], self._collection_name), # NOTE(sdague): historically this was the # os-disk-config extension, but now that extensions # are gone, we merge these attributes here. "OS-DCF:diskConfig": ( 'AUTO' if instance.get('auto_disk_config') else 'MANUAL'), }, } if server["server"]["status"] in self._fault_statuses: _inst_fault = self._get_fault(request, instance) if _inst_fault: server['server']['fault'] = _inst_fault if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) if api_version_request.is_supported(request, min_version="2.9"): server["server"]["locked"] = (True if instance["locked_by"] else False) if api_version_request.is_supported(request, min_version="2.19"): server["server"]["description"] = instance.get( "display_description") if api_version_request.is_supported(request, min_version="2.26"): server["server"]["tags"] = [t.tag for t in instance.tags] return server def index(self, request, instances): """Show a list of servers without many details.""" coll_name = self._collection_name return self._list_view(self.basic, request, instances, coll_name) def detail(self, request, instances): """Detailed view of a list of instance.""" coll_name = self._collection_name + '/detail' return self._list_view(self.show, request, instances, coll_name) def _list_view(self, func, request, servers, coll_name): """Provide a view for a list of servers. :param func: Function used to format the server data :param request: API request :param servers: List of servers in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: Server data in dictionary format """ server_list = [func(request, server)["server"] for server in servers] servers_links = self._get_collection_links(request, servers, coll_name) servers_dict = dict(servers=server_list) if servers_links: servers_dict["servers_links"] = servers_links return servers_dict @staticmethod def _get_metadata(instance): # FIXME(danms): Transitional support for objects metadata = instance.get('metadata') if isinstance(instance, obj_base.NovaObject): return metadata or {} else: return utils.instance_meta(instance) @staticmethod def _get_vm_status(instance): # If the instance is deleted the vm and task states don't really matter if instance.get("deleted"): return "DELETED" return common.status_from_state(instance.get("vm_state"), instance.get("task_state")) @staticmethod def _get_host_id(instance): host = instance.get("host") project = str(instance.get("project_id")) if host: data = (project + host).encode('utf-8') sha_hash = hashlib.sha224(data) return sha_hash.hexdigest() def _get_addresses(self, request, instance, extend_address=False): context = request.environ["nova.context"] networks = common.get_networks_for_instance(context, instance) return self._address_builder.index(networks, extend_address)["addresses"] def _get_image(self, request, instance): image_ref = instance["image_ref"] if image_ref: image_id = str(common.get_id_from_href(image_ref)) bookmark = self._image_builder._get_bookmark_link(request, image_id, "images") return { "id": image_id, "links": [{ "rel": "bookmark", "href": bookmark, }], } else: return "" def _get_flavor(self, request, instance): instance_type = instance.get_flavor() if not instance_type: LOG.warning("Instance has had its instance_type removed " "from the DB", instance=instance) return {} flavor_id = instance_type["flavorid"] flavor_bookmark = self._flavor_builder._get_bookmark_link(request, flavor_id, "flavors") return { "id": str(flavor_id), "links": [{ "rel": "bookmark", "href": flavor_bookmark, }], } def _load_fault(self, request, instance): try: mapping = objects.InstanceMapping.get_by_instance_uuid( request.environ['nova.context'], instance.uuid) if mapping.cell_mapping is not None: with nova_context.target_cell(instance._context, mapping.cell_mapping): return instance.fault except exception.InstanceMappingNotFound: pass # NOTE(danms): No instance mapping at all, or a mapping with no cell, # which means a legacy environment or instance. return instance.fault def _get_fault(self, request, instance): if 'fault' in instance: fault = instance.fault else: fault = self._load_fault(request, instance) if not fault: return None fault_dict = { "code": fault["code"], "created": utils.isotime(fault["created_at"]), "message": fault["message"], } if fault.get('details', None): is_admin = False context = request.environ["nova.context"] if context: is_admin = getattr(context, 'is_admin', False) if is_admin or fault['code'] != 500: fault_dict['details'] = fault["details"] return fault_dict
# Configuration file for ipython. #------------------------------------------------------------------------------ # InteractiveShellApp(Configurable) configuration #------------------------------------------------------------------------------ ## A Mixin for applications that start InteractiveShell instances. # # Provides configurables for loading extensions and executing files as part of # configuring a Shell environment. # # The following methods should be called by the :meth:`initialize` method of the # subclass: # # - :meth:`init_path` # - :meth:`init_shell` (to be implemented by the subclass) # - :meth:`init_gui_pylab` # - :meth:`init_extensions` # - :meth:`init_code` ## Execute the given command string. # Default: '' # c.InteractiveShellApp.code_to_run = '' ## Run the file referenced by the PYTHONSTARTUP environment variable at IPython # startup. # Default: True # c.InteractiveShellApp.exec_PYTHONSTARTUP = True ## List of files to run at IPython startup. # Default: [] # c.InteractiveShellApp.exec_files = [] ## lines of code to run at IPython startup. # Default: [] # c.InteractiveShellApp.exec_lines = [] ## A list of dotted module names of IPython extensions to load. # Default: [] # c.InteractiveShellApp.extensions = [] ## DEPRECATED. Dotted module name of a single extra IPython extension to load. # # Only one extension can be added this way. # # Only used with traitlets < 5.0, plural extra_extensions list is used in # traitlets 5. # Default: '' # c.InteractiveShellApp.extra_extension = '' ## Dotted module name(s) of one or more IPython extensions to load. # # For specifying extra extensions to load on the command-line. # # .. versionadded:: 7.10 # Default: [] # c.InteractiveShellApp.extra_extensions = [] ## A file to be run # Default: '' # c.InteractiveShellApp.file_to_run = '' ## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk', # 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', # 'qt4'). # Choices: any of ['asyncio', 'glut', 'gtk', 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4'] (case-insensitive) or None # Default: None # c.InteractiveShellApp.gui = None ## Should variables loaded at startup (by startup files, exec_lines, etc.) be # hidden from tools like %who? # Default: True # c.InteractiveShellApp.hide_initial_ns = True ## If True, IPython will not add the current working directory to sys.path. When # False, the current working directory is added to sys.path, allowing imports of # modules defined in the current directory. # Default: False # c.InteractiveShellApp.ignore_cwd = False ## Configure matplotlib for interactive use with the default matplotlib backend. # Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None # Default: None # c.InteractiveShellApp.matplotlib = None ## Run the module as a script. # Default: '' # c.InteractiveShellApp.module_to_run = '' ## Pre-load matplotlib and numpy for interactive use, selecting a particular # matplotlib backend and loop integration. # Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None # Default: None # c.InteractiveShellApp.pylab = None ## If true, IPython will populate the user namespace with numpy, pylab, etc. and # an ``import *`` is done from numpy and pylab, when using pylab mode. # # When False, pylab mode should not import any names into the user namespace. # Default: True # c.InteractiveShellApp.pylab_import_all = True ## Reraise exceptions encountered loading IPython extensions? # Default: False # c.InteractiveShellApp.reraise_ipython_extension_failures = False #------------------------------------------------------------------------------ # Application(SingletonConfigurable) configuration #------------------------------------------------------------------------------ ## This is an application. ## The date format used by logging formatters for %(asctime)s # Default: '%Y-%m-%d %H:%M:%S' # c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' ## The Logging format template # Default: '[%(name)s]%(highlevel)s %(message)s' # c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' ## Set the log level by value or name. # Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'] # Default: 30 # c.Application.log_level = 30 ## Instead of starting the Application, dump configuration to stdout # Default: False # c.Application.show_config = False ## Instead of starting the Application, dump configuration to stdout (as JSON) # Default: False # c.Application.show_config_json = False #------------------------------------------------------------------------------ # BaseIPythonApplication(Application) configuration #------------------------------------------------------------------------------ ## IPython: an enhanced interactive Python shell. ## Whether to create profile dir if it doesn't exist # Default: False # c.BaseIPythonApplication.auto_create = False ## Whether to install the default config files into the profile dir. If a new # profile is being created, and IPython contains config files for that profile, # then they will be staged into the new directory. Otherwise, default config # files will be automatically generated. # Default: False # c.BaseIPythonApplication.copy_config_files = False ## Path to an extra config file to load. # # If specified, load this config file in addition to any other IPython config. # Default: '' # c.BaseIPythonApplication.extra_config_file = '' ## The name of the IPython directory. This directory is used for logging # configuration (through profiles), history storage, etc. The default is usually # $HOME/.ipython. This option can also be specified through the environment # variable IPYTHONDIR. # Default: '' # c.BaseIPythonApplication.ipython_dir = '' ## The date format used by logging formatters for %(asctime)s # See also: Application.log_datefmt # c.BaseIPythonApplication.log_datefmt = '%Y-%m-%d %H:%M:%S' ## The Logging format template # See also: Application.log_format # c.BaseIPythonApplication.log_format = '[%(name)s]%(highlevel)s %(message)s' ## Set the log level by value or name. # See also: Application.log_level # c.BaseIPythonApplication.log_level = 30 ## Whether to overwrite existing config files when copying # Default: False # c.BaseIPythonApplication.overwrite = False ## The IPython profile to use. # Default: 'default' # c.BaseIPythonApplication.profile = 'default' ## Instead of starting the Application, dump configuration to stdout # See also: Application.show_config # c.BaseIPythonApplication.show_config = False ## Instead of starting the Application, dump configuration to stdout (as JSON) # See also: Application.show_config_json # c.BaseIPythonApplication.show_config_json = False ## Create a massive crash report when IPython encounters what may be an internal # error. The default is to append a short message to the usual traceback # Default: False # c.BaseIPythonApplication.verbose_crash = False #------------------------------------------------------------------------------ # TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp) configuration #------------------------------------------------------------------------------ ## Execute the given command string. # See also: InteractiveShellApp.code_to_run # c.TerminalIPythonApp.code_to_run = '' ## Whether to install the default config files into the profile dir. # See also: BaseIPythonApplication.copy_config_files # c.TerminalIPythonApp.copy_config_files = False ## Whether to display a banner upon starting IPython. # Default: True # c.TerminalIPythonApp.display_banner = True ## Run the file referenced by the PYTHONSTARTUP environment # See also: InteractiveShellApp.exec_PYTHONSTARTUP # c.TerminalIPythonApp.exec_PYTHONSTARTUP = True ## List of files to run at IPython startup. # See also: InteractiveShellApp.exec_files # c.TerminalIPythonApp.exec_files = [] ## lines of code to run at IPython startup. # See also: InteractiveShellApp.exec_lines # c.TerminalIPythonApp.exec_lines = [] ## A list of dotted module names of IPython extensions to load. # See also: InteractiveShellApp.extensions # c.TerminalIPythonApp.extensions = [] ## Path to an extra config file to load. # See also: BaseIPythonApplication.extra_config_file # c.TerminalIPythonApp.extra_config_file = '' ## # See also: InteractiveShellApp.extra_extension # c.TerminalIPythonApp.extra_extension = '' ## # See also: InteractiveShellApp.extra_extensions # c.TerminalIPythonApp.extra_extensions = [] ## A file to be run # See also: InteractiveShellApp.file_to_run # c.TerminalIPythonApp.file_to_run = '' ## If a command or file is given via the command-line, e.g. 'ipython foo.py', # start an interactive shell after executing the file or command. # Default: False # c.TerminalIPythonApp.force_interact = False ## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk', # 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', # 'qt4'). # See also: InteractiveShellApp.gui # c.TerminalIPythonApp.gui = None ## Should variables loaded at startup (by startup files, exec_lines, etc.) # See also: InteractiveShellApp.hide_initial_ns # c.TerminalIPythonApp.hide_initial_ns = True ## If True, IPython will not add the current working directory to sys.path. # See also: InteractiveShellApp.ignore_cwd # c.TerminalIPythonApp.ignore_cwd = False ## Class to use to instantiate the TerminalInteractiveShell object. Useful for # custom Frontends # Default: 'IPython.terminal.interactiveshell.TerminalInteractiveShell' # c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell' ## # See also: BaseIPythonApplication.ipython_dir # c.TerminalIPythonApp.ipython_dir = '' ## The date format used by logging formatters for %(asctime)s # See also: Application.log_datefmt # c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S' ## The Logging format template # See also: Application.log_format # c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s' ## Set the log level by value or name. # See also: Application.log_level # c.TerminalIPythonApp.log_level = 30 ## Configure matplotlib for interactive use with # See also: InteractiveShellApp.matplotlib # c.TerminalIPythonApp.matplotlib = None ## Run the module as a script. # See also: InteractiveShellApp.module_to_run # c.TerminalIPythonApp.module_to_run = '' ## Whether to overwrite existing config files when copying # See also: BaseIPythonApplication.overwrite # c.TerminalIPythonApp.overwrite = False ## The IPython profile to use. # See also: BaseIPythonApplication.profile # c.TerminalIPythonApp.profile = 'default' ## Pre-load matplotlib and numpy for interactive use, # See also: InteractiveShellApp.pylab # c.TerminalIPythonApp.pylab = None ## If true, IPython will populate the user namespace with numpy, pylab, etc. # See also: InteractiveShellApp.pylab_import_all # c.TerminalIPythonApp.pylab_import_all = True ## Start IPython quickly by skipping the loading of config files. # Default: False # c.TerminalIPythonApp.quick = False ## Reraise exceptions encountered loading IPython extensions? # See also: InteractiveShellApp.reraise_ipython_extension_failures # c.TerminalIPythonApp.reraise_ipython_extension_failures = False ## Instead of starting the Application, dump configuration to stdout # See also: Application.show_config # c.TerminalIPythonApp.show_config = False ## Instead of starting the Application, dump configuration to stdout (as JSON) # See also: Application.show_config_json # c.TerminalIPythonApp.show_config_json = False ## Create a massive crash report when IPython encounters what may be an # See also: BaseIPythonApplication.verbose_crash # c.TerminalIPythonApp.verbose_crash = False #------------------------------------------------------------------------------ # InteractiveShell(SingletonConfigurable) configuration #------------------------------------------------------------------------------ ## An enhanced, interactive shell for Python. ## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which # nodes should be run interactively (displaying output from expressions). # Choices: any of ['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'] # Default: 'last_expr' # c.InteractiveShell.ast_node_interactivity = 'last_expr' ## A list of ast.NodeTransformer subclass instances, which will be applied to # user input before code is run. # Default: [] # c.InteractiveShell.ast_transformers = [] ## Automatically run await statement in the top level repl. # Default: True # c.InteractiveShell.autoawait = True ## Make IPython automatically call any callable object even if you didn't type # explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically. # The value can be '0' to disable the feature, '1' for 'smart' autocall, where # it is not applied if there are no more arguments on the line, and '2' for # 'full' autocall, where all callable objects are automatically called (even if # no arguments are present). # Choices: any of [0, 1, 2] # Default: 0 # c.InteractiveShell.autocall = 0 ## Autoindent IPython code entered interactively. # Default: True # c.InteractiveShell.autoindent = True ## Enable magic commands to be called without the leading %. # Default: True # c.InteractiveShell.automagic = True ## The part of the banner to be printed before the profile # Default: "Python 3.9.5 (default, May 24 2021, 12:50:35) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.25.0 -- An enhanced Interactive Python. Type '?' for help.\n" # c.InteractiveShell.banner1 = "Python 3.9.5 (default, May 24 2021, 12:50:35) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.25.0 -- An enhanced Interactive Python. Type '?' for help.\n" ## The part of the banner to be printed after the profile # Default: '' # c.InteractiveShell.banner2 = '' ## Set the size of the output cache. The default is 1000, you can change it # permanently in your config file. Setting it to 0 completely disables the # caching system, and the minimum value accepted is 3 (if you provide a value # less than 3, it is reset to 0 and a warning is issued). This limit is defined # because otherwise you'll spend more time re-flushing a too small cache than # working # Default: 1000 # c.InteractiveShell.cache_size = 1000 ## Use colors for displaying information about objects. Because this information # is passed through a pager (like 'less'), and some pagers get confused with # color codes, this capability can be turned off. # Default: True # c.InteractiveShell.color_info = True ## Set the color scheme (NoColor, Neutral, Linux, or LightBG). # Choices: any of ['Neutral', 'NoColor', 'LightBG', 'Linux'] (case-insensitive) # Default: 'Neutral' # c.InteractiveShell.colors = 'Neutral' # Default: False # c.InteractiveShell.debug = False ## Don't call post-execute functions that have failed in the past. # Default: False # c.InteractiveShell.disable_failing_post_execute = False ## If True, anything that would be passed to the pager will be displayed as # regular output instead. # Default: False # c.InteractiveShell.display_page = False ## (Provisional API) enables html representation in mime bundles sent to pagers. # Default: False # c.InteractiveShell.enable_html_pager = False ## Total length of command history # Default: 10000 # c.InteractiveShell.history_length = 10000 ## The number of saved history entries to be loaded into the history buffer at # startup. # Default: 1000 # c.InteractiveShell.history_load_length = 1000 # Default: '' # c.InteractiveShell.ipython_dir = '' ## Start logging to the given file in append mode. Use `logfile` to specify a log # file to **overwrite** logs to. # Default: '' # c.InteractiveShell.logappend = '' ## The name of the logfile to use. # Default: '' # c.InteractiveShell.logfile = '' ## Start logging to the default log file in overwrite mode. Use `logappend` to # specify a log file to **append** logs to. # Default: False # c.InteractiveShell.logstart = False ## Select the loop runner that will be used to execute top-level asynchronous # code # Default: 'IPython.core.interactiveshell._asyncio_runner' # c.InteractiveShell.loop_runner = 'IPython.core.interactiveshell._asyncio_runner' # Choices: any of [0, 1, 2] # Default: 0 # c.InteractiveShell.object_info_string_level = 0 ## Automatically call the pdb debugger after every exception. # Default: False # c.InteractiveShell.pdb = False ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # Default: 'In [\\#]: ' # c.InteractiveShell.prompt_in1 = 'In [\\#]: ' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # Default: ' .\\D.: ' # c.InteractiveShell.prompt_in2 = ' .\\D.: ' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # Default: 'Out[\\#]: ' # c.InteractiveShell.prompt_out = 'Out[\\#]: ' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # Default: True # c.InteractiveShell.prompts_pad_left = True # Default: False # c.InteractiveShell.quiet = False # Default: '\n' # c.InteractiveShell.separate_in = '\n' # Default: '' # c.InteractiveShell.separate_out = '' # Default: '' # c.InteractiveShell.separate_out2 = '' ## Show rewritten input, e.g. for autocall. # Default: True # c.InteractiveShell.show_rewritten_input = True ## Enables rich html representation of docstrings. (This requires the docrepr # module). # Default: False # c.InteractiveShell.sphinxify_docstring = False # Default: True # c.InteractiveShell.wildcards_case_sensitive = True ## Switch modes for the IPython exception handlers. # Choices: any of ['Context', 'Plain', 'Verbose', 'Minimal'] (case-insensitive) # Default: 'Context' # c.InteractiveShell.xmode = 'Context' #------------------------------------------------------------------------------ # TerminalInteractiveShell(InteractiveShell) configuration #------------------------------------------------------------------------------ ## # See also: InteractiveShell.ast_node_interactivity # c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr' ## # See also: InteractiveShell.ast_transformers # c.TerminalInteractiveShell.ast_transformers = [] ## # See also: InteractiveShell.autoawait # c.TerminalInteractiveShell.autoawait = True ## # See also: InteractiveShell.autocall # c.TerminalInteractiveShell.autocall = 0 ## Autoformatter to reformat Terminal code. Can be `'black'` or `None` # Default: None # c.TerminalInteractiveShell.autoformatter = None ## # See also: InteractiveShell.autoindent # c.TerminalInteractiveShell.autoindent = True ## # See also: InteractiveShell.automagic # c.TerminalInteractiveShell.automagic = True ## The part of the banner to be printed before the profile # See also: InteractiveShell.banner1 # c.TerminalInteractiveShell.banner1 = "Python 3.9.5 (default, May 24 2021, 12:50:35) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.25.0 -- An enhanced Interactive Python. Type '?' for help.\n" ## The part of the banner to be printed after the profile # See also: InteractiveShell.banner2 # c.TerminalInteractiveShell.banner2 = '' ## # See also: InteractiveShell.cache_size # c.TerminalInteractiveShell.cache_size = 1000 ## # See also: InteractiveShell.color_info # c.TerminalInteractiveShell.color_info = True ## Set the color scheme (NoColor, Neutral, Linux, or LightBG). # See also: InteractiveShell.colors # c.TerminalInteractiveShell.colors = 'Neutral' ## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix, # Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a # direct exit without any confirmation. # Default: True # c.TerminalInteractiveShell.confirm_exit = True # See also: InteractiveShell.debug # c.TerminalInteractiveShell.debug = False ## Don't call post-execute functions that have failed in the past. # See also: InteractiveShell.disable_failing_post_execute # c.TerminalInteractiveShell.disable_failing_post_execute = False ## Options for displaying tab completions, 'column', 'multicolumn', and # 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit` # documentation for more information. # Choices: any of ['column', 'multicolumn', 'readlinelike'] # Default: 'multicolumn' # c.TerminalInteractiveShell.display_completions = 'multicolumn' ## If True, anything that would be passed to the pager # See also: InteractiveShell.display_page # c.TerminalInteractiveShell.display_page = False ## Shortcut style to use at the prompt. 'vi' or 'emacs'. # Default: 'emacs' c.TerminalInteractiveShell.editing_mode = 'vi' ## Set the editor used by IPython (default to $EDITOR/vi/notepad). # Default: 'vim' # c.TerminalInteractiveShell.editor = 'vim' ## Allows to enable/disable the prompt toolkit history search # Default: True # c.TerminalInteractiveShell.enable_history_search = True ## # See also: InteractiveShell.enable_html_pager # c.TerminalInteractiveShell.enable_html_pager = False ## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is # in addition to the F2 binding, which is always enabled. # Default: False # c.TerminalInteractiveShell.extra_open_editor_shortcuts = False ## Provide an alternative handler to be called when the user presses Return. This # is an advanced option intended for debugging, which may be changed or removed # in later releases. # Default: None # c.TerminalInteractiveShell.handle_return = None ## Highlight matching brackets. # Default: True # c.TerminalInteractiveShell.highlight_matching_brackets = True ## The name or class of a Pygments style to use for syntax highlighting. To see # available styles, run `pygmentize -L styles`. # Default: traitlets.Undefined # c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined ## Override highlighting format for specific tokens # Default: {} # c.TerminalInteractiveShell.highlighting_style_overrides = {} ## Total length of command history # See also: InteractiveShell.history_length # c.TerminalInteractiveShell.history_length = 10000 ## # See also: InteractiveShell.history_load_length # c.TerminalInteractiveShell.history_load_length = 1000 # See also: InteractiveShell.ipython_dir # c.TerminalInteractiveShell.ipython_dir = '' ## # See also: InteractiveShell.logappend # c.TerminalInteractiveShell.logappend = '' ## # See also: InteractiveShell.logfile # c.TerminalInteractiveShell.logfile = '' ## # See also: InteractiveShell.logstart # c.TerminalInteractiveShell.logstart = False ## Select the loop runner that will be used to execute top-level asynchronous # code # See also: InteractiveShell.loop_runner # c.TerminalInteractiveShell.loop_runner = 'IPython.core.interactiveshell._asyncio_runner' # Default: {} # c.TerminalInteractiveShell.mime_renderers = {} ## Enable mouse support in the prompt (Note: prevents selecting text with the # mouse) # Default: False # c.TerminalInteractiveShell.mouse_support = False # See also: InteractiveShell.object_info_string_level # c.TerminalInteractiveShell.object_info_string_level = 0 ## # See also: InteractiveShell.pdb # c.TerminalInteractiveShell.pdb = False ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # See also: InteractiveShell.prompt_in1 # c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: ' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # See also: InteractiveShell.prompt_in2 # c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: ' ## Display the current vi mode (when using vi editing mode). # Default: True # c.TerminalInteractiveShell.prompt_includes_vi_mode = True ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # See also: InteractiveShell.prompt_out # c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: ' ## Class used to generate Prompt token for prompt_toolkit # Default: 'IPython.terminal.prompts.Prompts' # c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. # See also: InteractiveShell.prompts_pad_left # c.TerminalInteractiveShell.prompts_pad_left = True # See also: InteractiveShell.quiet # c.TerminalInteractiveShell.quiet = False # See also: InteractiveShell.separate_in # c.TerminalInteractiveShell.separate_in = '\n' # See also: InteractiveShell.separate_out # c.TerminalInteractiveShell.separate_out = '' # See also: InteractiveShell.separate_out2 # c.TerminalInteractiveShell.separate_out2 = '' ## Show rewritten input, e.g. for autocall. # See also: InteractiveShell.show_rewritten_input # c.TerminalInteractiveShell.show_rewritten_input = True ## Use `raw_input` for the REPL, without completion and prompt colors. # # Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. # Known usage are: IPython own testing machinery, and emacs inferior-shell # integration through elpy. # # This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment # variable is set, or the current terminal is not a tty. # Default: False # c.TerminalInteractiveShell.simple_prompt = False ## Number of line at the bottom of the screen to reserve for the tab completion # menu, search history, ...etc, the height of these menus will at most this # value. Increase it is you prefer long and skinny menus, decrease for short and # wide. # Default: 6 # c.TerminalInteractiveShell.space_for_menu = 6 ## # See also: InteractiveShell.sphinxify_docstring # c.TerminalInteractiveShell.sphinxify_docstring = False ## Automatically set the terminal title # Default: True # c.TerminalInteractiveShell.term_title = True ## Customize the terminal title format. This is a python format string. # Available substitutions are: {cwd}. # Default: 'IPython: {cwd}' # c.TerminalInteractiveShell.term_title_format = 'IPython: {cwd}' ## Use 24bit colors instead of 256 colors in prompt highlighting. If your # terminal supports true color, the following command should print 'TRUECOLOR' # in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n" # Default: False # c.TerminalInteractiveShell.true_color = False # See also: InteractiveShell.wildcards_case_sensitive # c.TerminalInteractiveShell.wildcards_case_sensitive = True ## Switch modes for the IPython exception handlers. # See also: InteractiveShell.xmode # c.TerminalInteractiveShell.xmode = 'Context' #------------------------------------------------------------------------------ # HistoryAccessor(HistoryAccessorBase) configuration #------------------------------------------------------------------------------ ## Access the history database without adding to it. # # This is intended for use by standalone history tools. IPython shells use # HistoryManager, below, which is a subclass of this. ## Options for configuring the SQLite connection # # These options are passed as keyword args to sqlite3.connect when establishing # database connections. # Default: {} # c.HistoryAccessor.connection_options = {} ## enable the SQLite history # # set enabled=False to disable the SQLite history, in which case there will be # no stored history, no SQLite connection, and no background saving thread. # This may be necessary in some threaded environments where IPython is embedded. # Default: True # c.HistoryAccessor.enabled = True ## Path to file to use for SQLite history database. # # By default, IPython will put the history database in the IPython profile # directory. If you would rather share one history among profiles, you can set # this value in each, so that they are consistent. # # Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts. # If you see IPython hanging, try setting this to something on a local disk, # e.g:: # # ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite # # you can also use the specific value `:memory:` (including the colon at both # end but not the back ticks), to avoid creating an history file. # Default: '' # c.HistoryAccessor.hist_file = '' #------------------------------------------------------------------------------ # HistoryManager(HistoryAccessor) configuration #------------------------------------------------------------------------------ ## A class to organize all history-related functionality in one place. ## Options for configuring the SQLite connection # See also: HistoryAccessor.connection_options # c.HistoryManager.connection_options = {} ## Write to database every x commands (higher values save disk access & power). # Values of 1 or less effectively disable caching. # Default: 0 # c.HistoryManager.db_cache_size = 0 ## Should the history database include output? (default: no) # Default: False # c.HistoryManager.db_log_output = False ## enable the SQLite history # See also: HistoryAccessor.enabled # c.HistoryManager.enabled = True ## Path to file to use for SQLite history database. # See also: HistoryAccessor.hist_file # c.HistoryManager.hist_file = '' #------------------------------------------------------------------------------ # ProfileDir(LoggingConfigurable) configuration #------------------------------------------------------------------------------ ## An object to manage the profile directory and its resources. # # The profile directory is used by all IPython applications, to manage # configuration, logging and security. # # This object knows how to find, create and manage these directories. This # should be used by any code that wants to handle profiles. ## Set the profile location directly. This overrides the logic used by the # `profile` option. # Default: '' # c.ProfileDir.location = '' #------------------------------------------------------------------------------ # BaseFormatter(Configurable) configuration #------------------------------------------------------------------------------ ## A base formatter class that is configurable. # # This formatter should usually be used as the base class of all formatters. It # is a traited :class:`Configurable` class and includes an extensible API for # users to determine how their objects are formatted. The following logic is # used to find a function to format an given object. # # 1. The object is introspected to see if it has a method with the name # :attr:`print_method`. If is does, that object is passed to that method # for formatting. # 2. If no print method is found, three internal dictionaries are consulted # to find print method: :attr:`singleton_printers`, :attr:`type_printers` # and :attr:`deferred_printers`. # # Users should use these dictionaries to register functions that will be used to # compute the format data for their objects (if those objects don't have the # special print methods). The easiest way of using these dictionaries is through # the :meth:`for_type` and :meth:`for_type_by_name` methods. # # If no function/callable is found to compute the format data, ``None`` is # returned and this format type is not used. # Default: {} # c.BaseFormatter.deferred_printers = {} # Default: True # c.BaseFormatter.enabled = True # Default: {} # c.BaseFormatter.singleton_printers = {} # Default: {} # c.BaseFormatter.type_printers = {} #------------------------------------------------------------------------------ # PlainTextFormatter(BaseFormatter) configuration #------------------------------------------------------------------------------ ## The default pretty-printer. # # This uses :mod:`IPython.lib.pretty` to compute the format data of the object. # If the object cannot be pretty printed, :func:`repr` is used. See the # documentation of :mod:`IPython.lib.pretty` for details on how to write pretty # printers. Here is a simple example:: # # def dtype_pprinter(obj, p, cycle): # if cycle: # return p.text('dtype(...)') # if hasattr(obj, 'fields'): # if obj.fields is None: # p.text(repr(obj)) # else: # p.begin_group(7, 'dtype([') # for i, field in enumerate(obj.descr): # if i > 0: # p.text(',') # p.breakable() # p.pretty(field) # p.end_group(7, '])') # See also: BaseFormatter.deferred_printers # c.PlainTextFormatter.deferred_printers = {} # Default: '' # c.PlainTextFormatter.float_precision = '' ## Truncate large collections (lists, dicts, tuples, sets) to this size. # # Set to 0 to disable truncation. # Default: 1000 # c.PlainTextFormatter.max_seq_length = 1000 # Default: 79 # c.PlainTextFormatter.max_width = 79 # Default: '\n' # c.PlainTextFormatter.newline = '\n' # Default: True # c.PlainTextFormatter.pprint = True # See also: BaseFormatter.singleton_printers # c.PlainTextFormatter.singleton_printers = {} # See also: BaseFormatter.type_printers # c.PlainTextFormatter.type_printers = {} # Default: False # c.PlainTextFormatter.verbose = False #------------------------------------------------------------------------------ # Completer(Configurable) configuration #------------------------------------------------------------------------------ ## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex # commands, unicode names, and expanding unicode characters back to latex # commands. # Default: True # c.Completer.backslash_combining_completions = True ## Enable debug for the Completer. Mostly print extra information for # experimental jedi integration. # Default: False # c.Completer.debug = False ## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care # of with Jedi. # # This will enable completion on elements of lists, results of function calls, # etc., but can be unsafe because the code is actually evaluated on TAB. # Default: False # c.Completer.greedy = False ## Experimental: restrict time (in milliseconds) during which Jedi can compute # types. Set to 0 to stop computing types. Non-zero value lower than 100ms may # hurt performance by preventing jedi to build its cache. # Default: 400 # c.Completer.jedi_compute_type_timeout = 400 ## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is # installed. # Default: True # c.Completer.use_jedi = True #------------------------------------------------------------------------------ # IPCompleter(Completer) configuration #------------------------------------------------------------------------------ ## Extension of the completer class with IPython-specific features ## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex # commands, unicode names, and expanding unicode characters back to latex # commands. # See also: Completer.backslash_combining_completions # c.IPCompleter.backslash_combining_completions = True ## Enable debug for the Completer. Mostly print extra information for # experimental jedi integration. # See also: Completer.debug # c.IPCompleter.debug = False ## Activate greedy completion # See also: Completer.greedy # c.IPCompleter.greedy = False ## Experimental: restrict time (in milliseconds) during which Jedi can compute # types. # See also: Completer.jedi_compute_type_timeout # c.IPCompleter.jedi_compute_type_timeout = 400 ## DEPRECATED as of version 5.0. # # Instruct the completer to use __all__ for the completion # # Specifically, when completing on ``object.<tab>``. # # When True: only those names in obj.__all__ will be included. # # When False [default]: the __all__ attribute is ignored # Default: False # c.IPCompleter.limit_to__all__ = False ## Whether to merge completion results into a single list # # If False, only the completion results from the first non-empty completer will # be returned. # Default: True # c.IPCompleter.merge_completions = True ## Instruct the completer to omit private method names # # Specifically, when completing on ``object.<tab>``. # # When 2 [default]: all names that start with '_' will be excluded. # # When 1: all 'magic' names (``__foo__``) will be excluded. # # When 0: nothing will be excluded. # Choices: any of [0, 1, 2] # Default: 2 # c.IPCompleter.omit__names = 2 ## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is # installed. # See also: Completer.use_jedi # c.IPCompleter.use_jedi = True #------------------------------------------------------------------------------ # ScriptMagics(Magics) configuration #------------------------------------------------------------------------------ ## Magics for talking to scripts # # This defines a base `%%script` cell magic for running a cell with a program in # a subprocess, and registers a few top-level magics that call %%script with # common interpreters. ## Extra script cell magics to define # # This generates simple wrappers of `%%script foo` as `%%foo`. # # If you want to add script magics that aren't on your path, specify them in # script_paths # Default: [] # c.ScriptMagics.script_magics = [] ## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby' # # Only necessary for items in script_magics where the default path will not find # the right interpreter. # Default: {} # c.ScriptMagics.script_paths = {} #------------------------------------------------------------------------------ # LoggingMagics(Magics) configuration #------------------------------------------------------------------------------ ## Magics related to all logging machinery. ## Suppress output of log state when logging is enabled # Default: False # c.LoggingMagics.quiet = False #------------------------------------------------------------------------------ # StoreMagics(Magics) configuration #------------------------------------------------------------------------------ ## Lightweight persistence for python variables. # # Provides the %store magic. ## If True, any %store-d variables will be automatically restored when IPython # starts. # Default: False # c.StoreMagics.autorestore = False
# Copyright 2016: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import mock import six from rally.cli import cliutils from rally.cli.commands import verify from rally.cli import envutils from rally import consts from rally import exceptions from rally import plugins from rally.verification import reporter from tests.unit import fakes from tests.unit import test class VerifyCommandsTestCase(test.TestCase): def setUp(self): super(VerifyCommandsTestCase, self).setUp() self.verify = verify.VerifyCommands() self.fake_api = fakes.FakeAPI() self.deployment_name = "Some Deploy" self.deployment_uuid = "some-deploy-uuid" self.verifier_name = "My Verifier" self.verifier_uuid = "my-verifier-uuid" self.verifier_type = "OldSchoolTestTool" self.verifier_namespace = "OpenStack" self.verification_uuid = "uuuiiiiddd" self.verifier_data = { "uuid": self.verifier_uuid, "name": self.verifier_name, "type": self.verifier_type, "namespace": self.verifier_namespace, "description": "The best tool in the world", "created_at": "2016-01-01T17:00:03", "updated_at": "2016-01-01T17:01:05", "status": "installed", "source": "https://example.com", "version": "master", "system_wide": False, "extra_settings": {}, "manager.repo_dir": "./verifiers/repo", "manager.venv_dir": "./verifiers/.venv" } self.verification_data = { "uuid": self.verification_uuid, "verifier_uuid": self.verifier_uuid, "deployment_uuid": self.deployment_uuid, "tags": ["bar", "foo"], "status": "success", "created_at": "2016-01-01T17:00:03", "updated_at": "2016-01-01T17:01:05", "tests_count": 2, "tests_duration": 4, "success": 1, "skipped": 0, "expected_failures": 0, "unexpected_success": 0, "failures": 1, "run_args": { "load_list": ["test_1", "test_2"], "skip_list": ["test_3"], "concurrency": "3"}, "tests": { "test_1": { "name": "test_1", "status": "success", "duration": 2, "tags": []}, "test_2": { "name": "test_2", "status": "fail", "duration": 2, "traceback": "Some traceback"} }, "test_2": { "name": "test_2", "status": "fail", "duration": 2, "traceback": "Some traceback"} } self.results_data = { "totals": {"tests_count": 2, "tests_duration": 4, "success": 1, "skipped": 0, "expected_failures": 0, "unexpected_success": 0, "failures": 1}, "tests": { "test_1": { "name": "test_1", "status": "success", "duration": 2, "tags": []} }, "test_2": { "name": "test_2", "status": "fail", "duration": 4, "tags": []} } @mock.patch("rally.cli.commands.verify.cliutils.print_list") @mock.patch("rally.cli.commands.verify.logging.is_debug", return_value=True) def test_list_plugins(self, mock_is_debug, mock_print_list): self.verify.list_plugins(self.fake_api, namespace="some") self.fake_api.verifier.list_plugins.assert_called_once_with( namespace="some") @mock.patch("rally.cli.commands.verify.fileutils.update_globals_file") def test_create_verifier(self, mock_update_globals_file): self.fake_api.verifier.create.return_value = self.verifier_uuid self.fake_api.verifier.get.return_value = self.verifier_data self.verify.create_verifier(self.fake_api, "a", vtype="b", namespace="c", source="d", version="e", system_wide=True, extra={}) self.fake_api.verifier.create.assert_called_once_with( name="a", vtype="b", namespace="c", source="d", version="e", system_wide=True, extra_settings={}) self.fake_api.verifier.get.assert_called_once_with( verifier_id=self.verifier_uuid) mock_update_globals_file.assert_called_once_with( envutils.ENV_VERIFIER, self.verifier_uuid) @mock.patch("rally.cli.commands.verify.fileutils.update_globals_file") def test_use_verifier(self, mock_update_globals_file): self.fake_api.verifier.get.return_value = self.verifier_data self.verify.use_verifier(self.fake_api, self.verifier_uuid) self.fake_api.verifier.get.assert_called_once_with( verifier_id=self.verifier_uuid) mock_update_globals_file.assert_called_once_with( envutils.ENV_VERIFIER, self.verifier_uuid) @mock.patch("rally.cli.commands.verify.cliutils.print_list") def test_list_verifiers_empty_verifiers(self, mock_print_list): self.fake_api.verifier.list.return_value = [] self.verify.list_verifiers(self.fake_api) self.verify.list_verifiers(self.fake_api, "foo") self.verify.list_verifiers(self.fake_api) self.fake_api.verifier.list.assert_has_calls( [mock.call(status=None), mock.call(status="foo")]) @mock.patch("rally.cli.commands.verify.cliutils.print_list") def test_list_verifiers(self, mock_print_list): self.fake_api.verifier.list.return_value = [self.verifier_data] additional_fields = ["UUID", "Name", "Type", "Namespace", "Created at", "Updated at", "Status", "Version", "System-wide", "Active"] additional_keys = ["normalize_field_names", "sortby_index", "formatters"] self.verify.list_verifiers(self.fake_api) # astarove: should be replaced on mock_print_list.assert_called_once() self.assertTrue(1, len(mock_print_list.call_args())) self.assertEqual(([self.verifier_data], additional_fields), mock_print_list.call_args[0]) self.assertEqual(additional_keys.sort(), list(mock_print_list.call_args[1].keys()).sort()) @mock.patch("rally.cli.commands.verify.envutils.get_global") def test_show_verifier(self, mock_get_global): self.fake_api.verifier.get.return_value = self.verifier_data self.verify._base_dir = mock.Mock(return_value="./verifiers/") # It is a hard task to mock default value of function argument, so we # need to apply this workaround original_print_dict = cliutils.print_dict print_dict_calls = [] def print_dict(*args, **kwargs): print_dict_calls.append(six.StringIO()) kwargs["out"] = print_dict_calls[-1] original_print_dict(*args, **kwargs) with mock.patch.object(verify.cliutils, "print_dict", new=print_dict): self.verify.show_verifier(self.fake_api, self.verifier_uuid) self.assertEqual(1, len(print_dict_calls)) self.assertEqual( "+---------------------------------------------+\n" "| Verifier |\n" "+----------------+----------------------------+\n" "| UUID | my-verifier-uuid |\n" "| Status | installed |\n" "| Created at | 2016-01-01 17:00:03 |\n" "| Updated at | 2016-01-01 17:01:05 |\n" "| Active | - |\n" "| Name | My Verifier |\n" "| Description | The best tool in the world |\n" "| Type | OldSchoolTestTool |\n" "| Namespace | OpenStack |\n" "| Source | https://example.com |\n" "| Version | master |\n" "| System-wide | False |\n" "| Extra settings | - |\n" "| Location | ./verifiers/repo |\n" "| Venv location | ./verifiers/.venv |\n" "+----------------+----------------------------+\n", print_dict_calls[0].getvalue()) self.fake_api.verifier.get.assert_called_once_with( verifier_id=self.verifier_uuid) def test_delete_verifier(self): self.verify.delete_verifier(self.fake_api, "v_id", "d_id", force=True) self.fake_api.verifier.delete.assert_called_once_with( verifier_id="v_id", deployment_id="d_id", force=True) def test_update_verifier(self): self.verify.update_verifier(self.fake_api, self.verifier_uuid) self.assertFalse(self.fake_api.verifier.update.called) self.verify.update_verifier(self.fake_api, self.verification_uuid, update_venv=True, system_wide=True) self.assertFalse(self.fake_api.verifier.update.called) self.verify.update_verifier(self.fake_api, self.verification_uuid, system_wide=True, no_system_wide=True) self.assertFalse(self.fake_api.verifier.update.called) self.verify.update_verifier(self.fake_api, self.verification_uuid, version="a", system_wide=True) self.fake_api.verifier.update.assert_called_once_with( verifier_id=self.verification_uuid, system_wide=True, version="a", update_venv=None) @mock.patch("rally.cli.commands.verify.open", create=True) @mock.patch("rally.cli.commands.verify.os.path.exists") def test_configure_verifier(self, mock_exists, mock_open): self.verify.configure_verifier(self.fake_api, self.verifier_uuid, self.deployment_uuid, new_configuration="/p/a/t/h", reconfigure=True, show=True) self.assertFalse(self.fake_api.verifier.configure.called) mock_exists.return_value = False self.verify.configure_verifier(self.fake_api, self.verifier_uuid, self.deployment_uuid, new_configuration="/p/a/t/h", show=True) self.assertFalse(self.fake_api.verifier.override_configuration.called) mock_exists.return_value = True mock_open.return_value = mock.mock_open(read_data="data").return_value self.verify.configure_verifier(self.fake_api, self.verifier_uuid, self.deployment_uuid, new_configuration="/p/a/t/h", show=True) mock_open.assert_called_once_with("/p/a/t/h") self.fake_api.verifier.override_configuration(self.verifier_uuid, self.deployment_uuid, "data") tf = tempfile.NamedTemporaryFile() with open(tf.name, "w") as f: f.write("[DEFAULT]\nopt = val\n[foo]\nopt = val") self.verify.configure_verifier(self.fake_api, self.verifier_uuid, self.deployment_uuid, extra_options=tf.name) expected_options = {"foo": {"opt": "val"}, "DEFAULT": {"opt": "val"}} self.fake_api.verifier.configure.assert_called_once_with( verifier=self.verifier_uuid, deployment_id=self.deployment_uuid, extra_options=expected_options, reconfigure=False) self.verify.configure_verifier(self.fake_api, self.verifier_uuid, self.deployment_uuid, extra_options="{foo: {opt: val}, " "DEFAULT: {opt: val}}") self.fake_api.verifier.configure.assert_called_with( verifier=self.verifier_uuid, deployment_id=self.deployment_uuid, extra_options=expected_options, reconfigure=False) def test_list_verifier_tests(self): self.fake_api.verifier.list_tests.return_value = ["test_1", "test_2"] self.verify.list_verifier_tests(self.fake_api, self.verifier_uuid, "p") self.fake_api.verifier.list_tests.return_value = [] self.verify.list_verifier_tests(self.fake_api, self.verifier_uuid, "p") self.fake_api.verifier.list_tests.assert_has_calls( [mock.call(verifier_id=self.verifier_uuid, pattern="p"), mock.call(verifier_id=self.verifier_uuid, pattern="p")]) def test_add_verifier_ext(self): self.verify.add_verifier_ext(self.fake_api, self.verifier_uuid, "a", "b", "c") self.fake_api.verifier.add_extension.assert_called_once_with( verifier_id=self.verifier_uuid, source="a", version="b", extra_settings="c") @mock.patch("rally.cli.commands.verify.cliutils.print_list") @mock.patch("rally.cli.commands.verify.logging.is_debug", return_value=True) def test_list_verifier_exts_empty_list(self, mock_is_debug, mock_print_list): self.fake_api.verifier.list_extensions.return_value = [] self.verify.list_verifier_exts(self.fake_api, self.verifier_uuid) self.verify.list_verifier_exts(self.fake_api, self.verifier_uuid) self.fake_api.verifier.list_extensions.assert_has_calls( [mock.call(verifier_id=self.verifier_uuid), mock.call(verifier_id=self.verifier_uuid)]) @mock.patch("rally.cli.commands.verify.cliutils.print_list") @mock.patch("rally.cli.commands.verify.logging.is_debug", return_value=False) def test_list_verifier_exts(self, mock_is_debug, mock_print_list): ver_exts = self.fake_api.verifier.list_extensions ver_exts.return_value = [mock.MagicMock()] fields = ["Name", "Entry point"] self.verify.list_verifier_exts(self.fake_api, self.verifier_uuid) self.verify.list_verifier_exts(self.fake_api, self.verifier_uuid) self.fake_api.verifier.list_extensions.assert_has_calls( [mock.call(verifier_id=self.verifier_uuid), mock.call(verifier_id=self.verifier_uuid)]) mock_print_list.assert_called_with(ver_exts.return_value, fields, normalize_field_names=True) @mock.patch("rally.cli.commands.verify.cliutils.print_list") @mock.patch("rally.cli.commands.verify.logging.is_debug", return_value=True) def test_list_verifier_exts_with_logging(self, mock_is_debug, mock_print_list): ver_exts = self.fake_api.verifier.list_extensions ver_exts.return_value = [mock.MagicMock()] fields = ["Name", "Entry point", "Location"] self.verify.list_verifier_exts(self.fake_api, self.verifier_uuid) self.verify.list_verifier_exts(self.fake_api, self.verifier_uuid) self.fake_api.verifier.list_extensions.assert_has_calls( [mock.call(verifier_id=self.verifier_uuid), mock.call(verifier_id=self.verifier_uuid)]) mock_print_list.assert_called_with(ver_exts.return_value, fields, normalize_field_names=True) def test_delete_verifier_ext(self): self.verify.delete_verifier_ext(self.fake_api, self.verifier_uuid, "ext_name") self.fake_api.verifier.delete_extension.assert_called_once_with( verifier_id=self.verifier_uuid, name="ext_name") @mock.patch("rally.cli.commands.verify.fileutils.update_globals_file") @mock.patch("rally.cli.commands.verify.os.path.exists") def test_start(self, mock_exists, mock_update_globals_file): self.verify.start(self.fake_api, self.verifier_uuid, self.deployment_uuid, pattern="pattern", load_list="load-list") self.assertFalse(self.fake_api.verification.start.called) verification = self.verification_data self.fake_api.verification.start.return_value = { "verification": verification, "totals": self.results_data["totals"], "tests": self.results_data["tests"]} self.fake_api.verification.get.return_value = verification mock_exists.return_value = False self.verify.start(self.fake_api, self.verifier_uuid, self.deployment_uuid, load_list="/p/a/t/h") self.assertFalse(self.fake_api.verification.start.called) mock_exists.return_value = True tf = tempfile.NamedTemporaryFile() with open(tf.name, "w") as f: f.write("test_1\ntest_2") self.verify.start(self.fake_api, self.verifier_uuid, self.deployment_uuid, tags=["foo"], load_list=tf.name) self.fake_api.verification.start.assert_called_once_with( verifier_id=self.verifier_uuid, deployment_id=self.deployment_uuid, tags=["foo"], load_list=["test_1", "test_2"]) mock_exists.return_value = False self.fake_api.verification.start.reset_mock() self.verify.start(self.fake_api, self.verifier_uuid, self.verifier_uuid, skip_list="/p/a/t/h") self.assertFalse(self.fake_api.verification.start.called) tf = tempfile.NamedTemporaryFile() with open(tf.name, "w") as f: f.write("test_1:\ntest_2: Reason\n") mock_exists.return_value = True self.verify.start(self.fake_api, self.verifier_uuid, self.deployment_uuid, skip_list=tf.name) self.fake_api.verification.start.assert_called_once_with( verifier_id=self.verifier_uuid, deployment_id=self.deployment_uuid, tags=None, skip_list={"test_1": None, "test_2": "Reason"}) mock_exists.return_value = False self.fake_api.verification.start.reset_mock() self.verify.start(self.fake_api, self.verifier_uuid, self.deployment_uuid, xfail_list="/p/a/t/h") self.assertFalse(self.fake_api.verification.start.called) tf = tempfile.NamedTemporaryFile() with open(tf.name, "w") as f: f.write("test_1:\ntest_2: Reason\n") mock_exists.return_value = True self.verify.start(self.fake_api, self.verifier_uuid, self.deployment_uuid, xfail_list=tf.name) self.fake_api.verification.start.assert_called_once_with( verifier_id=self.verifier_uuid, deployment_id=self.deployment_uuid, tags=None, xfail_list={"test_1": None, "test_2": "Reason"}) self.fake_api.verification.get.assert_called_with( verification_uuid=self.verification_uuid) mock_update_globals_file.assert_called_with( envutils.ENV_VERIFICATION, self.verification_uuid) self.fake_api.verification.get.reset_mock() mock_update_globals_file.reset_mock() self.verify.start(self.fake_api, self.verifier_uuid, self.deployment_uuid, detailed=True, do_use=False) self.assertFalse(self.fake_api.verification.get.called) self.assertFalse(mock_update_globals_file.called) @mock.patch("rally.cli.commands.verify.os.path.exists") @mock.patch("rally.cli.commands.verify.fileutils.update_globals_file") def test_start_on_unfinished_deployment(self, mock_update_globals_file, mock_exists): deployment_id = self.deployment_uuid deployment_name = self.deployment_name exc = exceptions.DeploymentNotFinishedStatus( name=deployment_name, uuid=deployment_id, status=consts.DeployStatus.DEPLOY_INIT) self.fake_api.verification.start.side_effect = exc self.assertEqual( 1, self.verify.start(self.fake_api, self.deployment_uuid, deployment_id)) @mock.patch("rally.cli.commands.verify.fileutils.update_globals_file") def test_use(self, mock_update_globals_file): self.fake_api.verification.get.return_value = self.verification_data self.verify.use(self.fake_api, self.verification_uuid) self.fake_api.verification.get.assert_called_once_with( verification_uuid=self.verification_uuid) mock_update_globals_file.assert_called_once_with( envutils.ENV_VERIFICATION, self.verification_uuid) @mock.patch("rally.cli.commands.verify.fileutils.update_globals_file") def test_rerun(self, mock_update_globals_file): self.fake_api.verification.rerun.return_value = { "verification": self.verification_data, "totals": self.results_data["totals"], "tests": self.results_data["tests"]} self.fake_api.verification.get.return_value = self.verification_data self.verify.rerun(self.fake_api, self.verification_uuid, self.deployment_uuid, failed=True) self.fake_api.verification.rerun.assert_called_once_with( verification_uuid=self.verification_uuid, concurrency=None, deployment_id="some-deploy-uuid", failed=True, tags=None) mock_update_globals_file.assert_called_once_with( envutils.ENV_VERIFICATION, self.verification_uuid) def test_show(self): verification = self.verification_data self.fake_api.verifier.get.return_value = self.verifier_data self.fake_api.verification.get.return_value = verification self.fake_api.deployment.get.return_value = { "name": self.deployment_name, "uuid": self.deployment_uuid} # It is a hard task to mock default value of function argument, so we # need to apply this workaround original_print_dict = cliutils.print_dict print_dict_calls = [] def print_dict(*args, **kwargs): print_dict_calls.append(six.StringIO()) kwargs["out"] = print_dict_calls[-1] original_print_dict(*args, **kwargs) with mock.patch.object(verify.cliutils, "print_dict", new=print_dict): self.verify.show(self.fake_api, self.verifier_uuid, detailed=True) self.assertEqual(1, len(print_dict_calls)) self.assertEqual( "+----------------------------------------------------------------" "--------------------+\n" "| Verification " " |\n" "+---------------------+------------------------------------------" "--------------------+\n" "| UUID | uuuiiiiddd " " |\n" "| Status | success " " |\n" "| Started at | 2016-01-01 17:00:03 " " |\n" "| Finished at | 2016-01-01 17:01:05 " " |\n" "| Duration | 0:01:02 " " |\n" "| Run arguments | concurrency: 3 " " |\n" "| | load_list: (value is too long, will be di" "splayed separately) |\n" "| | skip_list: (value is too long, will be di" "splayed separately) |\n" "| Tags | bar, foo " " |\n" "| Verifier name | My Verifier (UUID: my-verifier-uuid) " " |\n" "| Verifier type | OldSchoolTestTool (namespace: OpenStack) " " |\n" "| Deployment name | Some Deploy (UUID: some-deploy-uuid) " " |\n" "| Tests count | 2 " " |\n" "| Tests duration, sec | 4 " " |\n" "| Success | 1 " " |\n" "| Skipped | 0 " " |\n" "| Expected failures | 0 " " |\n" "| Unexpected success | 0 " " |\n" "| Failures | 1 " " |\n" "+---------------------+------------------------------------------" "--------------------+\n", print_dict_calls[0].getvalue()) self.fake_api.verification.get.assert_called_once_with( verification_uuid=self.verifier_uuid) with mock.patch.object(verify.cliutils, "print_dict", new=print_dict): self.verify.show(self.fake_api, self.verifier_uuid, detailed=False) self.assertEqual(2, len(print_dict_calls)) self.assertEqual("+---------------------------------------------------" "--------------------------------------+\n" "| Verification " " |\n" "+---------------------+-----------------------------" "--------------------------------------+\n" "| UUID | uuuiiiiddd " " |\n" "| Status | success " " |\n" "| Started at | 2016-01-01 17:00:03 " " |\n" "| Finished at | 2016-01-01 17:01:05 " " |\n" "| Duration | 0:01:02 " " |\n" "| Run arguments | concurrency: 3 " " |\n" "| | load_list: (value is too lon" "g, use 'detailed' flag to display it) |\n" "| | skip_list: (value is too lon" "g, use 'detailed' flag to display it) |\n" "| Tags | bar, foo " " |\n" "| Verifier name | My Verifier (UUID: my-verifi" "er-uuid) |\n" "| Verifier type | OldSchoolTestTool (namespace" ": OpenStack) |\n" "| Deployment name | Some Deploy (UUID: some-depl" "oy-uuid) |\n" "| Tests count | 2 " " |\n" "| Tests duration, sec | 4 " " |\n" "| Success | 1 " " |\n" "| Skipped | 0 " " |\n" "| Expected failures | 0 " " |\n" "| Unexpected success | 0 " " |\n" "| Failures | 1 " " |\n" "+---------------------+-----------------------------" "--------------------------------------+\n", print_dict_calls[1].getvalue()) self.fake_api.verification.get.assert_called_with( verification_uuid=self.verifier_uuid) @mock.patch("rally.cli.commands.verify.cliutils.print_list") def test_list_empty_verifications(self, mock_print_list): self.fake_api.verification.list.return_value = [] self.verify.list(self.fake_api, self.verifier_uuid, self.deployment_uuid) self.verify.list(self.fake_api, self.verifier_uuid, self.deployment_uuid, "foo", "bar") self.verify.list(self.fake_api) self.fake_api.verification.list.assert_has_calls( [mock.call(verifier_id=self.verifier_uuid, deployment_id=self.deployment_uuid, tags=None, status=None), mock.call(verifier_id=self.verifier_uuid, deployment_id=self.deployment_uuid, tags="foo", status="bar"), mock.call(verifier_id=None, deployment_id=None, tags=None, status=None)]) @mock.patch("rally.cli.commands.verify.cliutils.print_list") def test_list(self, mock_print_list): self.fake_api.verification.list.return_value = [self.verification_data] self.verify.list(self.fake_api, self.verifier_uuid, self.deployment_uuid) additional_fields = ["UUID", "Tags", "Verifier name", "Deployment name", "Started at", "Finished at", "Duration", "Status"] additional_keys = ["normalize_field_names", "sortby_index", "formatters"] # astarove: Should be replaced on mock_print_list.assert_called_once()) self.assertTrue(1, len(mock_print_list.call_args())) self.assertEqual(([self.verification_data], additional_fields), mock_print_list.call_args[0]) self.assertEqual(additional_keys.sort(), list(mock_print_list.call_args[1].keys()).sort()) def test_delete(self): self.verify.delete(self.fake_api, "v_uuid") self.fake_api.verification.delete.assert_called_once_with( verification_uuid="v_uuid") self.verify.delete(self.fake_api, ["v1_uuid", "v2_uuid"]) self.fake_api.verification.delete.assert_has_calls( [mock.call(verification_uuid="v1_uuid"), mock.call(verification_uuid="v2_uuid")]) @mock.patch("rally.cli.commands.verify.os") @mock.patch("rally.cli.commands.verify.webbrowser.open_new_tab") @mock.patch("rally.cli.commands.verify.open", create=True) def test_report(self, mock_open, mock_open_new_tab, mock_os): output_dest = "/p/a/t/h" output_type = "type" content = "content" self.fake_api.verification.report.return_value = { "files": {output_dest: content}, "open": output_dest} mock_os.path.exists.return_value = False self.verify.report(self.fake_api, verification_uuid=self.verifier_uuid, output_type=output_type, output_dest=output_dest, open_it=True) self.fake_api.verification.report.assert_called_once_with( uuids=[self.verifier_uuid], output_type=output_type, output_dest=output_dest) mock_open.assert_called_once_with(mock_os.path.abspath.return_value, "w") mock_os.makedirs.assert_called_once_with( mock_os.path.dirname.return_value) mock_open.reset_mock() mock_open_new_tab.reset_mock() mock_os.makedirs.reset_mock() mock_os.path.exists.return_value = True self.fake_api.verification.report.return_value = { "files": {output_dest: content}, "print": "foo"} self.verify.report(self.fake_api, self.verifier_uuid, output_type=output_type, output_dest=output_dest) self.assertFalse(mock_open_new_tab.called) self.assertFalse(mock_os.makedirs.called) @mock.patch("rally.cli.commands.verify.VerifyCommands.use") @mock.patch("rally.cli.commands.verify.open", create=True) @mock.patch("rally.cli.commands.verify.os.path.exists") def test_import_results(self, mock_exists, mock_open, mock_use): mock_exists.return_value = False self.verify.import_results(self.fake_api, self.verifier_uuid, self.deployment_uuid) self.assertFalse(self.fake_api.verification.import_results.called) verification = self.verification_data results = self.results_data self.fake_api.verification.import_results.return_value = ( verification, results) mock_exists.return_value = True mock_open.return_value = mock.mock_open(read_data="data").return_value self.verify.import_results(self.fake_api, verifier_id=self.verifier_uuid, deployment=self.deployment_uuid, file_to_parse="/p/a/t/h") mock_open.assert_called_once_with("/p/a/t/h", "r") self.fake_api.verification.import_results.assert_called_once_with( verifier_id=self.verifier_uuid, deployment_id=self.deployment_uuid, data="data") mock_use.assert_called_with(self.fake_api, self.verification_uuid) mock_use.reset_mock() self.verify.import_results(self.fake_api, "v_id", "d_id", do_use=False) self.assertFalse(mock_use.called) @plugins.ensure_plugins_are_loaded def test_default_reporters(self): available_reporters = { cls.get_name().lower() for cls in reporter.VerificationReporter.get_all() # ignore possible external plugins if cls.__module__.startswith("rally")} listed_in_cli = {name.lower() for name in verify.DEFAULT_REPORT_TYPES} not_listed = available_reporters - listed_in_cli if not_listed: self.fail("All default reporters should be listed in " "%s.DEFAULTS_REPORTERS (case of letters doesn't matter)." " Missed reporters: %s" % (verify.__name__, ", ".join(not_listed)))
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Deep Neural Network estimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.contrib import layers from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.framework.python.framework import experimental from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.layers.python.layers import optimizers from tensorflow.contrib.learn.python.learn import evaluable from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn import monitors as monitor_lib from tensorflow.contrib.learn.python.learn import trainable from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.python import summary from tensorflow.python.ops import nn from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope _CENTERED_BIAS_WEIGHT = "centered_bias_weight" # The default learning rate of 0.05 is a historical artifact of the initial # implementation, but seems a reasonable choice. _LEARNING_RATE = 0.05 def _get_feature_dict(features): if isinstance(features, dict): return features return {"": features} def _get_optimizer(optimizer): if callable(optimizer): return optimizer() else: return optimizer def _add_hidden_layer_summary(value, tag): summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value)) summary.histogram("%s_activation" % tag, value) def _dnn_model_fn(features, labels, mode, params, config=None): """Deep Neural Net model_fn. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `_Head` instance. * hidden_units: List of hidden units per layer. * feature_columns: An iterable containing all the feature columns used by the model. * optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training. If `None`, will use the Adagrad optimizer with a default learning rate of 0.05. * activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. * dropout: When not `None`, the probability we will drop out a given coordinate. * gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. * embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. config: `RunConfig` object to configure the runtime settings. Returns: predictions: A dict of `Tensor` objects. loss: A scalar containing the loss of the step. train_op: The op for training. """ head = params["head"] hidden_units = params["hidden_units"] feature_columns = params["feature_columns"] optimizer = params.get("optimizer") or "Adagrad" activation_fn = params.get("activation_fn") dropout = params.get("dropout") gradient_clip_norm = params.get("gradient_clip_norm") num_ps_replicas = config.num_ps_replicas if config else 0 embedding_lr_multipliers = params.get("embedding_lr_multipliers", {}) features = _get_feature_dict(features) parent_scope = "dnn" input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20)) input_layer_scope = parent_scope + "/input_from_feature_columns" with variable_scope.variable_scope( input_layer_scope, values=list(six.itervalues(features)), partitioner=input_layer_partitioner) as scope: net = layers.input_from_feature_columns( columns_to_tensors=features, feature_columns=feature_columns, weight_collections=[parent_scope], scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) for layer_id, num_hidden_units in enumerate(hidden_units): with variable_scope.variable_scope( parent_scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=activation_fn, variables_collections=[parent_scope], scope=scope) if dropout is not None and mode == model_fn.ModeKeys.TRAIN: net = layers.dropout( net, keep_prob=(1.0 - dropout)) _add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( parent_scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: logits = layers.fully_connected( net, head.logits_dimension, activation_fn=None, variables_collections=[parent_scope], scope=scope) _add_hidden_layer_summary(logits, scope.name) def _train_op_fn(loss): """Returns the op to optimize the loss.""" return optimizers.optimize_loss( loss=loss, global_step=contrib_variables.get_global_step(), learning_rate=_LEARNING_RATE, optimizer=_get_optimizer(optimizer), gradient_multipliers=( dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access embedding_lr_multipliers, parent_scope, input_layer_scope)), clip_gradients=gradient_clip_norm, name=parent_scope, # Empty summaries to prevent optimizers from logging the training_loss. summaries=[]) return head.head_ops(features, labels, mode, _train_op_fn, logits) class DNNClassifier(evaluable.Evaluable, trainable.Trainable): """A classifier for TensorFlow DNN models. Example: ```python sparse_feature_a = sparse_column_with_hash_bucket(...) sparse_feature_b = sparse_column_with_hash_bucket(...) sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a, ...) sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b, ...) estimator = DNNClassifier( feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], hidden_units=[1024, 512, 256]) # Or estimator using the ProximalAdagradOptimizer optimizer with # regularization. estimator = DNNClassifier( feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], hidden_units=[1024, 512, 256], optimizer=tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001 )) # Input builders def input_fn_train: # returns x, y (where y represents label's class index). pass estimator.fit(input_fn=input_fn_train) def input_fn_eval: # returns x, y (where y represents label's class index). pass estimator.evaluate(input_fn=input_fn_eval) estimator.predict(x=x) # returns predicted labels (i.e. label's class index). ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: * if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. * for each `column` in `feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn`, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, hidden_units, feature_columns, model_dir=None, n_classes=2, weight_column_name=None, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, enable_centered_bias=False, config=None, feature_engineering_fn=None, embedding_lr_multipliers=None): """Initializes a DNNClassifier instance. Args: hidden_units: List of hidden units per layer. All layers are fully connected. Ex. `[64, 32]` means first layer has 64 nodes and second one has 32. feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. It must be greater than 1. Note: Class labels are integers representing the class index (i.e. values from 0 to n_classes-1). For arbitrary label values (e.g. string labels), convert to class indices first. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: An instance of `tf.Optimizer` used to train the model. If `None`, will use an Adagrad optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dropout: When not `None`, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See `tf.clip_by_global_norm` for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. Returns: A `DNNClassifier` estimator. Raises: ValueError: If `n_classes` < 2. """ self._hidden_units = hidden_units self._feature_columns = tuple(feature_columns or []) self._enable_centered_bias = enable_centered_bias self._estimator = estimator.Estimator( model_fn=_dnn_model_fn, model_dir=model_dir, config=config, params={ "head": head_lib._multi_class_head( # pylint: disable=protected-access n_classes, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias), "hidden_units": hidden_units, "feature_columns": self._feature_columns, "optimizer": optimizer, "activation_fn": activation_fn, "dropout": dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, }, feature_engineering_fn=feature_engineering_fn) def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): """See trainable.Trainable. Note: Labels must be integer class indices.""" # TODO(roumposg): Remove when deprecated monitors are removed. hooks = monitor_lib.replace_monitors_with_hooks(monitors, self) self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=hooks, max_steps=max_steps) return self def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None, checkpoint_path=None): """See evaluable.Evaluable. Note: Labels must be integer class indices.""" return self._estimator.evaluate( x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, steps=steps, metrics=metrics, name=name, checkpoint_path=checkpoint_path) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns predicted classes for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted classes with shape [batch_size] (or an iterable of predicted classes if as_iterable is True). Each predicted class is represented by its class index (i.e. integer from 0 to n_classes-1). """ key = prediction_key.PredictionKey.CLASSES preds = self._estimator.predict(x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return (pred[key] for pred in preds) return preds[key].reshape(-1) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_proba( self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns prediction probabilities for given features. Args: x: features. input_fn: Input function. If set, x and y must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted probabilities with shape [batch_size, n_classes] (or an iterable of predicted probabilities if as_iterable is True). """ key = prediction_key.PredictionKey.PROBABILITIES preds = self._estimator.predict(x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return (pred[key] for pred in preds) return preds[key] def _get_predict_ops(self, features): """See `Estimator` class.""" # This method exists to support some models that use the legacy interface. # pylint: disable=protected-access return self._estimator._get_predict_ops(features) def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. """ return self._estimator.get_variable_names() def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: `Tensor` object. """ return self._estimator.get_variable_value(name) def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BaseEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return self._estimator.export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=( signature_fn or export.classification_signature_fn_with_prob), prediction_key=prediction_key.PredictionKey.PROBABILITIES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) @experimental def export_savedmodel(self, export_dir_base, input_fn, default_output_alternative_key=None, assets_extra=None, as_text=False, exports_to_keep=None): return self._estimator.export_savedmodel( export_dir_base, input_fn, default_output_alternative_key=default_output_alternative_key, assets_extra=assets_extra, as_text=as_text, exports_to_keep=exports_to_keep) @property def model_dir(self): return self._estimator.model_dir @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def weights_(self): hiddenlayer_weights = [ self.get_variable_value("dnn/hiddenlayer_%d/weights" % i) for i, _ in enumerate(self._hidden_units) ] logits_weights = [self.get_variable_value("dnn/logits/weights")] return hiddenlayer_weights + logits_weights @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def bias_(self): hiddenlayer_bias = [ self.get_variable_value("dnn/hiddenlayer_%d/biases" % i) for i, _ in enumerate(self._hidden_units) ] logits_bias = [self.get_variable_value("dnn/logits/biases")] if self._enable_centered_bias: centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)] else: centered_bias = [] return hiddenlayer_bias + logits_bias + centered_bias @property def config(self): return self._estimator.config class DNNRegressor(evaluable.Evaluable, trainable.Trainable): """A regressor for TensorFlow DNN models. Example: ```python sparse_feature_a = sparse_column_with_hash_bucket(...) sparse_feature_b = sparse_column_with_hash_bucket(...) sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a, ...) sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b, ...) estimator = DNNRegressor( feature_columns=[sparse_feature_a, sparse_feature_b], hidden_units=[1024, 512, 256]) # Or estimator using the ProximalAdagradOptimizer optimizer with # regularization. estimator = DNNRegressor( feature_columns=[sparse_feature_a, sparse_feature_b], hidden_units=[1024, 512, 256], optimizer=tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001 )) # Input builders def input_fn_train: # returns x, y pass estimator.fit(input_fn=input_fn_train) def input_fn_eval: # returns x, y pass estimator.evaluate(input_fn=input_fn_eval) estimator.predict(x=x) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: * if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. * for each `column` in `feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn`, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, hidden_units, feature_columns, model_dir=None, weight_column_name=None, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, enable_centered_bias=False, config=None, feature_engineering_fn=None, label_dimension=1, embedding_lr_multipliers=None): """Initializes a `DNNRegressor` instance. Args: hidden_units: List of hidden units per layer. All layers are fully connected. Ex. `[64, 32]` means first layer has 64 nodes and second one has 32. feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: An instance of `tf.Optimizer` used to train the model. If `None`, will use an Adagrad optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dropout: When not `None`, the probability we will drop out a given coordinate. gradient_clip_norm: A `float` > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See `tf.clip_by_global_norm` for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. label_dimension: Dimension of the label for multilabels. Defaults to 1. embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. Returns: A `DNNRegressor` estimator. """ self._feature_columns = tuple(feature_columns or []) self._estimator = estimator.Estimator( model_fn=_dnn_model_fn, model_dir=model_dir, config=config, params={ "head": head_lib._regression_head( # pylint: disable=protected-access label_dimension=label_dimension, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias), "hidden_units": hidden_units, "feature_columns": self._feature_columns, "optimizer": optimizer, "activation_fn": activation_fn, "dropout": dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, }, feature_engineering_fn=feature_engineering_fn) def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): """See trainable.Trainable.""" # TODO(roumposg): Remove when deprecated monitors are removed. hooks = monitor_lib.replace_monitors_with_hooks(monitors, self) self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=hooks, max_steps=max_steps) return self def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None, checkpoint_path=None): """See evaluable.Evaluable.""" # TODO(zakaria): remove once deprecation is finished (b/31229024) custom_metrics = {} if metrics: for key, metric in six.iteritems(metrics): if (not isinstance(metric, metric_spec.MetricSpec) and not isinstance(key, tuple)): custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric else: custom_metrics[key] = metric return self._estimator.evaluate( x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, steps=steps, metrics=custom_metrics, name=name, checkpoint_path=checkpoint_path) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns predicted scores for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted scores (or an iterable of predicted scores if as_iterable is True). If `label_dimension == 1`, the shape of the output is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`. """ key = prediction_key.PredictionKey.SCORES preds = self._estimator.predict(x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return (pred[key] for pred in preds) return preds[key] def _get_predict_ops(self, features): """See `Estimator` class.""" # This method exists to support some models that use the legacy interface. # pylint: disable=protected-access return self._estimator._get_predict_ops(features) def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. """ return self._estimator.get_variable_names() def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: `Tensor` object. """ return self._estimator.get_variable_value(name) def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BaseEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return self._estimator.export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=signature_fn or export.regression_signature_fn, prediction_key=prediction_key.PredictionKey.SCORES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) @property def model_dir(self): return self._estimator.model_dir @property def config(self): return self._estimator.config
import re from types import StringType,ListType import biopython.pairwise2 from oldowan.polymorphism import Polymorphism from reduction_funcs import prefer_known_substitutions from reduction_funcs import prefer_fewer from reduction_funcs import prefer_indels_at_end from reduction_funcs import prefer_multi_inserts from reduction_funcs import prefer_insertions_at_309_and_315 from reduction_funcs import prefer_315_insertion_over_double_310_insertion from reduction_funcs import prefer_indels_over_ts_over_tv from reduction_funcs import prefer_end_of_sorted_list from reduction_funcs import prefer_309del_315ins_over_309T_310C from reduction_funcs import prefer_95ins_97del_over_96T_97C # rCRSplus is an expanded rCRS sequence, which starts at position 15500, # then runs through the whole genome, then has the opening # 1000 bases attached at the end again. This is used so that # query sequences that are not cut precisely at the canonical # origin will still be analyzable. # rCRSplus_positions maps the string indices of rCRSplus to their positions # in the reference sequence. from oldowan.mtdna import rCRSplus, rCRSplus_positions class MatchingRange(object): def __init__(self, query_start, target_start): self.query_start = query_start self.target_start = target_start self.query_end = None self.target_end = None def __str__(self): return '[(%s, %s),(%s,%s)]' % (self.query_start, self.query_end, self.target_start, self.target_end) def __repr__(self): return 'MatchingRange: %s' % str(self) def query_slice(self): return slice(self.query_start, self.query_end) def target_slice(self): return slice(self.target_start, self.target_end) def intersect(self, other): other_query_range = range(other.query_start, other.query_end) other_target_range = range(other.target_start, other.target_end) if ((self.query_start in other_query_range or self.query_end in other_query_range ) and (self.target_start in other_target_range or self.target_end in other_target_range) ): return True return False def merge_with(self, other): self.query_start = min([self.query_start, other.query_start]) self.query_end = max([self.query_end, other.query_end]) self.target_start = min([self.target_start, other.target_start]) self.target_end = max([self.target_end, other.target_end]) def find_match_positions(query, reference, word_size=15): WS = word_size s1 = query s2 = reference word_starts = range(0, len(s1)-WS+1) matches = [] start = None for i in word_starts: if start is None: pos = s2.find(s1[i:i+WS]) else: pos = s2.find(s1[i:i+WS], start, start+100) if pos != -1: start = pos matches.append(pos) return matches def align(query, reference, word_size=15, mismatch_cutoff=0.7): """ Align two similar sequences. """ WS = word_size s1 = query s2 = reference matches = find_match_positions(s1, s2, WS) if -1 not in matches: return [] if matches.count(-1)/float(len(matches)) > mismatch_cutoff: raise Exception('sequences do not match') mismatches = [] mismatch = None for position, value in enumerate(matches): if value == -1: if mismatch is None: if position == 0: # the query starts at 0, but the target doesn't have to # so, figure out where the target start is query_start = 0 count = 0 while matches[count] == -1: count += 1 target_start = matches[count] - count mismatch = MatchingRange(query_start, target_start) else: mismatch = MatchingRange(position, matches[position-1]+1) elif mismatch is not None: mismatch.query_end = position+WS mismatch.target_end = value+WS mismatches.append(mismatch) mismatch = None # if there is a mismatch at the end of the query, we will # never have set the proper end of the query and target if mismatch is not None: mismatch.query_end = len(query) mismatch.target_end = (mismatch.target_start + mismatch.query_end - mismatch.query_start) mismatches.append(mismatch) mismatch = None polymorphisms = [] merged_matches = [] for mm in mismatches: merged = False for nm in merged_matches: if mm.intersect(nm): nm.merge_with(mm) merged = True if not merged: merged_matches.append(mm) for mm in merged_matches: slice1 = s1[mm.query_slice()] slice2 = s2[mm.target_slice()] # align2 args (seq1, seq2, match_score, mismatch_penalty, gap_penalty, gap_extension_penalty) alignments = biopython.pairwise2.align.globalms(slice1, slice2, 3, -1, -3, -1) # if alignment is at end of query, don't penalize end gaps if len(query) == mm.query_end: alignments = biopython.pairwise2.align.globalms(slice1, slice2, 3, -1, -3, -1, penalize_end_gaps=False) aln_polymorphisms = [] for alignment in alignments: this_aln_polymorphisms = [] qry_aln, ref_aln = alignment[0], alignment[1] # get rid of trailing deletions at end of query if alignment is at end of query if len(query) == mm.query_end: trail_dels = re.search(r'-+$', qry_aln) if trail_dels is not None: ndels = len(trail_dels.group()) qry_aln = qry_aln[:-ndels] ref_aln = ref_aln[:-ndels] abs_pos = mm.target_start - 1 insert = 0 for pos,val in enumerate(ref_aln): if val != qry_aln[pos]: if val == '-': insert += 1 else: abs_pos += 1 insert = 0 new_poly = Polymorphism(abs_pos,insert,qry_aln[pos]) this_aln_polymorphisms.append(new_poly) else: abs_pos += 1 insert = 0 aln_polymorphisms.append(this_aln_polymorphisms) polymorphisms.append(aln_polymorphisms) return polymorphisms def seq2sites(seq, word_size=15, mismatch_cutoff=0.7, ambig_cutoff=10): if seq.count('N') > ambig_cutoff: raise Exception, "too many N's in submitted sequence" # remove whitespace seq = re.sub(r'\s', '', seq.upper()) polymorphisms = align(seq, rCRSplus, word_size, mismatch_cutoff) for mismatch_block in polymorphisms: for alternate_alignment in mismatch_block: for pos,site in enumerate(alternate_alignment): # +1 to move from zero-based to one-based counting site.position = rCRSplus_positions[site.position] + 1 reduction_funcs = [ prefer_known_substitutions, prefer_insertions_at_309_and_315, prefer_315_insertion_over_double_310_insertion, prefer_309del_315ins_over_309T_310C, prefer_95ins_97del_over_96T_97C, prefer_fewer, prefer_multi_inserts, prefer_indels_at_end, prefer_indels_over_ts_over_tv, prefer_end_of_sorted_list ] polys = [] for block in polymorphisms: for f in reduction_funcs: block = f(block) polys.append(block) # elminate a lot of the excess nesting unnested = [] for mismatch_block in polys: if len(mismatch_block) == 1: for site in mismatch_block[0]: unnested.append(site) else: unnested.append(mismatch_block) return unnested
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import ConfigParser import json import logging import os import subprocess import re import time import unittest from selenium import webdriver from selenium.webdriver import ActionChains from selenium.webdriver.support.ui import Select from selenium.common.exceptions import UnexpectedAlertPresentException from autothreadharness import settings from autothreadharness.exceptions import FailError, FatalError, GoldenDeviceNotEnoughError from autothreadharness.harness_controller import HarnessController from autothreadharness.helpers import HistoryHelper from autothreadharness.open_thread_controller import OpenThreadController from autothreadharness.pdu_controller_factory import PduControllerFactory from autothreadharness.rf_shield_controller import get_rf_shield_controller logger = logging.getLogger(__name__) THREAD_CHANNEL_MAX = 26 """Maximum channel number of thread protocol""" THREAD_CHANNEL_MIN = 11 """Minimum channel number of thread protocol""" DEFAULT_TIMEOUT = 2700 """Timeout for each test case in seconds""" def wait_until(what, times=-1): """Wait until `what` return True Args: what (Callable[bool]): Call `wait()` again and again until it returns True times (int): Maximum times of trials before giving up Returns: True if success, False if times threshold reached """ while times: logger.info('Waiting times left %d', times) try: if what() is True: return True except: logger.exception('Wait failed') else: logger.warning('Trial[%d] failed', times) times -= 1 time.sleep(1) return False class HarnessCase(unittest.TestCase): """This is the case class of all automation test cases. All test case classes MUST define properties `role`, `case` and `golden_devices_required` """ channel = settings.THREAD_CHANNEL """int: Thread channel. Thread channel ranges from 11 to 26. """ ROLE_LEADER = 1 ROLE_ROUTER = 2 ROLE_SED = 4 ROLE_BORDER = 8 ROLE_REED = 16 ROLE_ED = 32 ROLE_COMMISSIONER = 64 ROLE_JOINER = 128 ROLE_FED = 512 ROLE_MED = 1024 role = None """int: role id. 1 Leader 2 Router 4 Sleepy end device 16 Router eligible end device 32 End device 64 Commissioner 128 Joiner 512 Full end device 1024 Minimal end device """ case = None """str: Case id, e.g. '6 5 1'. """ golden_devices_required = 0 """int: Golden devices needed to finish the test """ child_timeout = settings.THREAD_CHILD_TIMEOUT """int: Child timeout in seconds """ sed_polling_interval = settings.THREAD_SED_POLLING_INTERVAL """int: SED polling interval in seconds """ auto_dut = settings.AUTO_DUT """bool: whether use harness auto dut feature""" timeout = hasattr(settings, 'TIMEOUT') and settings.TIMEOUT or DEFAULT_TIMEOUT """number: timeout in seconds to stop running this test case""" started = 0 """number: test case started timestamp""" def __init__(self, *args, **kwargs): self.dut = None self._browser = None self._hc = None self.result_dir = '%s\\%s' % (settings.OUTPUT_PATH, self.__class__.__name__) self.history = HistoryHelper() super(HarnessCase, self).__init__(*args, **kwargs) def _init_devices(self): """Reboot all usb devices. Note: If PDU_CONTROLLER_TYPE is not valid, usb devices is not rebooted. """ if not settings.PDU_CONTROLLER_TYPE: if settings.AUTO_DUT: return for device in settings.GOLDEN_DEVICES: port, _ = device try: with OpenThreadController(port) as otc: logger.info('Resetting %s', port) otc.reset() except: logger.exception('Failed to reset device %s', port) self.history.mark_bad_golden_device(device) return tries = 3 pdu_factory = PduControllerFactory() while True: try: pdu = pdu_factory.create_pdu_controller(settings.PDU_CONTROLLER_TYPE) pdu.open(**settings.PDU_CONTROLLER_OPEN_PARAMS) except EOFError: logger.warning('Failed to connect to telnet') tries = tries - 1 if tries: time.sleep(10) continue else: logger.error('Fatal error: cannot connect to apc') raise else: pdu.reboot(**settings.PDU_CONTROLLER_REBOOT_PARAMS) pdu.close() break time.sleep(len(settings.GOLDEN_DEVICES)) def _init_harness(self): """Restart harness backend service. Please start the harness controller before running the cases, otherwise, nothing happens """ self._hc = HarnessController(self.result_dir) self._hc.stop() time.sleep(1) self._hc.start() time.sleep(2) harness_config = ConfigParser.ConfigParser() harness_config.read('%s\\Config\\Configuration.ini' % settings.HARNESS_HOME) if harness_config.has_option('THREAD_HARNESS_CONFIG', 'BrowserAutoNavigate') and \ harness_config.getboolean('THREAD_HARNESS_CONFIG', 'BrowserAutoNavigate'): os.system('taskkill /t /f /im chrome.exe') def _destroy_harness(self): """Stop harness backend service Stop harness service. """ self._hc.stop() time.sleep(2) def _init_dut(self): """Initialize the DUT. DUT will be restarted. and openthread will started. """ if self.auto_dut: self.dut = None return dut_port = settings.DUT_DEVICE[0] dut = OpenThreadController(dut_port) self.dut = dut def _destroy_dut(self): self.dut = None def _init_browser(self): """Open harness web page. Open a quiet chrome which: 1. disables extensions, 2. ignore certificate errors and 3. always allow notifications. """ chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--disable-extensions') chrome_options.add_argument('--ignore-certificate-errors') chrome_options.add_experimental_option('prefs', { 'profile.managed_default_content_settings.notifications': 1 }) browser = webdriver.Chrome(chrome_options=chrome_options) browser.set_page_load_timeout(10) browser.implicitly_wait(1) browser.maximize_window() browser.get(settings.HARNESS_URL) self._browser = browser if not wait_until(lambda: 'Thread' in browser.title, 30): self.assertIn('Thread', browser.title) def _destroy_browser(self): """Close the browser. """ self._browser.close() self._browser = None def _init_rf_shield(self): if getattr(settings, 'SHIELD_CONTROLLER_TYPE', None) and getattr(settings, 'SHIELD_CONTROLLER_PARAMS', None): self.rf_shield = get_rf_shield_controller( shield_type=settings.SHIELD_CONTROLLER_TYPE, params=settings.SHIELD_CONTROLLER_PARAMS ) else: self.rf_shield = None def _destroy_rf_shield(self): self.rf_shield = None def setUp(self): """Prepare to run test case. Start harness service, init golden devices, reset DUT and open browser. """ if self.__class__ is HarnessCase: return logger.info('Setting up') # clear files logger.info('Deleting all .pdf') os.system('del /q "%HOMEDRIVE%%HOMEPATH%\\Downloads\\NewPdf_*.pdf"') logger.info('Deleting all .xlsx') os.system('del /q "%HOMEDRIVE%%HOMEPATH%\\Downloads\\ExcelReport*.xlsx"') logger.info('Deleting all .pcapng') os.system('del /q "%s\\Captures\\*.pcapng"' % settings.HARNESS_HOME) # using temp files to fix excel downloading fail logger.info('Empty files in temps') os.system('del /q "%s\\Thread_Harness\\temp\\*.*"' % settings.HARNESS_HOME) # create directory os.system('mkdir %s' % self.result_dir) self._init_harness() self._init_devices() self._init_dut() self._init_rf_shield() def tearDown(self): """Clean up after each case. Stop harness service, close browser and close DUT. """ if self.__class__ is HarnessCase: return logger.info('Tearing down') self._destroy_harness() self._destroy_browser() self._destroy_dut() self._destroy_rf_shield() def _setup_page(self): """Do sniffer settings and general settings """ if not self.started: self.started = time.time() if time.time() - self.started > 30: self._browser.refresh() return # Detect Sniffer try: dialog = self._browser.find_element_by_id('capture-Setup-modal') except: logger.exception('Failed to get dialog.') else: if dialog and dialog.get_attribute('aria-hidden') == 'false': times = 60 while times: status = dialog.find_element_by_class_name('status-notify').text if 'Searching' in status: logger.info('Still detecting..') elif 'Not' in status: logger.warning('Sniffer device not verified!') button = dialog.find_element_by_id('snifferAutoDetectBtn') button.click() elif 'Verified' in status: logger.info('Verified!') button = dialog.find_element_by_id('saveCaptureSettings') button.click() break else: logger.warning('Unexpected sniffer verification status') times = times - 1 time.sleep(1) if not times: raise Exception('Unable to detect sniffer device') time.sleep(1) try: skip_button = self._browser.find_element_by_id('SkipPrepareDevice') if skip_button.is_enabled(): skip_button.click() time.sleep(1) except: logger.info('Still detecting sniffers') try: next_button = self._browser.find_element_by_id('nextButton') except: logger.exception('Failed to finish setup') return if not next_button.is_enabled(): logger.info('Harness is still not ready') return # General Setup try: if self.child_timeout or self.sed_polling_interval: button = self._browser.find_element_by_id('general-Setup') button.click() time.sleep(2) dialog = self._browser.find_element_by_id('general-Setup-modal') if dialog.get_attribute('aria-hidden') != 'false': raise Exception('Missing General Setup dialog') field = dialog.find_element_by_id('inp_general_child_update_wait_time') field.clear() if self.child_timeout: field.send_keys(str(self.child_timeout)) field = dialog.find_element_by_id('inp_general_sed_polling_rate') field.clear() if self.sed_polling_interval: field.send_keys(str(self.sed_polling_interval)) button = dialog.find_element_by_id('saveGeneralSettings') button.click() time.sleep(1) except: logger.exception('Failed to do general setup') return # Finish this page next_button.click() time.sleep(1) def _connect_devices(self): connect_all = self._browser.find_element_by_link_text('Connect All') connect_all.click() def _add_device(self, port, device_type_id): browser = self._browser test_bed = browser.find_element_by_id('test-bed') device = browser.find_element_by_id(device_type_id) # drag action_chains = ActionChains(browser) action_chains.click_and_hold(device) action_chains.move_to_element(test_bed).perform() time.sleep(1) # drop drop_hw = browser.find_element_by_class_name('drop-hw') action_chains = ActionChains(browser) action_chains.move_to_element(drop_hw) action_chains.release(drop_hw).perform() time.sleep(0.5) selected_hw = browser.find_element_by_class_name('selected-hw') form_inputs = selected_hw.find_elements_by_tag_name('input') form_port = form_inputs[0] form_port.clear() form_port.send_keys(port) def _test_bed(self): """Set up the test bed. Connect number of golden devices required by each case. """ browser = self._browser test_bed = browser.find_element_by_id('test-bed') time.sleep(3) selected_hw_set = test_bed.find_elements_by_class_name('selected-hw') selected_hw_num = len(selected_hw_set) while selected_hw_num: remove_button = selected_hw_set[selected_hw_num - 1].find_element_by_class_name( 'removeSelectedDevice') remove_button.click() selected_hw_num = selected_hw_num - 1 devices = [device for device in settings.GOLDEN_DEVICES if not self.history.is_bad_golden_device(device[0]) and \ not (settings.DUT_DEVICE and device[0] == settings.DUT_DEVICE[0])] logger.info('Available golden devices: %s', json.dumps(devices, indent=2)) golden_devices_required = self.golden_devices_required if self.auto_dut and not settings.DUT_DEVICE: golden_devices_required += 1 if len(devices) < golden_devices_required: raise GoldenDeviceNotEnoughError() # add golden devices while golden_devices_required: self._add_device(*devices.pop()) golden_devices_required = golden_devices_required - 1 # add DUT if settings.DUT_DEVICE: self._add_device(*settings.DUT_DEVICE) # enable AUTO DUT if self.auto_dut: checkbox_auto_dut = browser.find_element_by_id('EnableAutoDutSelection') if not checkbox_auto_dut.is_selected(): checkbox_auto_dut.click() time.sleep(1) if settings.DUT_DEVICE: radio_auto_dut = browser.find_element_by_class_name('AutoDUT_RadBtns') if not radio_auto_dut.is_selected(): radio_auto_dut.click() while True: try: self._connect_devices() button_next = browser.find_element_by_id('nextBtn') if not wait_until(lambda: 'disabled' not in button_next.get_attribute('class'), times=(30 + 4 * self.golden_devices_required)): bad_ones = [] selected_hw_set = test_bed.find_elements_by_class_name('selected-hw') for selected_hw in selected_hw_set: form_inputs = selected_hw.find_elements_by_tag_name('input') form_port = form_inputs[0] if form_port.is_enabled(): bad_ones.append(selected_hw) for selected_hw in bad_ones: form_inputs = selected_hw.find_elements_by_tag_name('input') form_port = form_inputs[0] port = form_port.get_attribute('value').encode('utf8') if settings.DUT_DEVICE and port == settings.DUT_DEVICE[0]: if settings.PDU_CONTROLLER_TYPE is None: # connection error cannot recover without power cycling raise FatalError('Failed to connect to DUT') else: raise FailError('Failed to connect to DUT') if settings.PDU_CONTROLLER_TYPE is None: # port cannot recover without power cycling self.history.mark_bad_golden_device(port) # remove the bad one selected_hw.find_element_by_class_name('removeSelectedDevice').click() time.sleep(0.1) if len(devices): self._add_device(*devices.pop()) else: devices = None if devices is None: logger.warning('Golden devices not enough') raise GoldenDeviceNotEnoughError() else: logger.info('Try again with new golden devices') continue if self.auto_dut and not settings.DUT_DEVICE: radio_auto_dut = browser.find_element_by_class_name('AutoDUT_RadBtns') if not radio_auto_dut.is_selected(): radio_auto_dut.click() time.sleep(5) button_next.click() if not wait_until(lambda: self._browser.current_url.endswith('TestExecution.html'), 20): raise Exception('Failed to load TestExecution page') except FailError: raise except: logger.exception('Unexpected error') else: break def _select_case(self, role, case): """Select the test case. """ # select the case elem = Select(self._browser.find_element_by_id('select-dut')) elem.select_by_value(str(role)) time.sleep(1) checkbox = None wait_until(lambda: self._browser.find_elements_by_css_selector('.tree-node .tree-title') and True) elems = self._browser.find_elements_by_css_selector('.tree-node .tree-title') finder = re.compile(r'.*\b' + case + r'\b') finder_dotted = re.compile(r'.*\b' + case.replace(' ', r'\.') + r'\b') for elem in elems: action_chains = ActionChains(self._browser) action_chains.move_to_element(elem) action_chains.perform() logger.debug(elem.text) if finder.match(elem.text) or finder_dotted.match(elem.text): parent = elem.find_element_by_xpath('..') checkbox = parent.find_element_by_class_name('tree-checkbox') break if not checkbox: time.sleep(5) raise Exception('Failed to find the case') self._browser.execute_script("$('.overview').css('left', '0')") checkbox.click() time.sleep(1) elem = self._browser.find_element_by_id('runTest') elem.click() if not wait_until(lambda: self._browser.find_element_by_id('stopTest') and True, 10): raise Exception('Failed to start test case') def _collect_result(self): """Collect test result. Generate PDF, excel and pcap file """ # generate pdf self._browser.find_element_by_class_name('save-pdf').click() time.sleep(1) try: dialog = self._browser.find_element_by_id('Testinfo') except: logger.exception('Failed to get test info dialog.') else: if dialog.get_attribute('aria-hidden') != 'false': raise Exception('Test information dialog not ready') version = self.auto_dut and settings.DUT_VERSION or self.dut.version dialog.find_element_by_id('inp_dut_manufacturer').send_keys(settings.DUT_MANUFACTURER) dialog.find_element_by_id('inp_dut_firmware_version').send_keys(version) dialog.find_element_by_id('inp_tester_name').send_keys(settings.TESTER_NAME) dialog.find_element_by_id('inp_remarks').send_keys(settings.TESTER_REMARKS) dialog.find_element_by_id('generatePdf').click() time.sleep(1) main_window = self._browser.current_window_handle # generate excel self._browser.find_element_by_class_name('save-excel').click() time.sleep(1) for window_handle in self._browser.window_handles: if window_handle != main_window: self._browser.switch_to.window(window_handle) self._browser.close() self._browser.switch_to.window(main_window) # save pcap self._browser.find_element_by_class_name('save-wireshark').click() time.sleep(1) for window_handle in self._browser.window_handles: if window_handle != main_window: self._browser.switch_to.window(window_handle) self._browser.close() self._browser.switch_to.window(main_window) os.system('copy "%%HOMEPATH%%\\Downloads\\NewPdf_*.pdf" %s\\' % self.result_dir) os.system('copy "%%HOMEPATH%%\\Downloads\\ExcelReport_*.xlsx" %s\\' % self.result_dir) os.system('copy "%s\\Captures\\*.pcapng" %s\\' % (settings.HARNESS_HOME, self.result_dir)) os.system('copy "%s\\Thread_Harness\\temp\\*.*" "%s"' % (settings.HARNESS_HOME, self.result_dir)) def _wait_dialog(self): """Wait for dialogs and handle them until done. """ logger.debug('waiting for dialog') done = False error = False while not done and self.timeout: try: dialog = self._browser.find_element_by_id('RemoteConfirm') except: logger.exception('Failed to get dialog.') else: if dialog and dialog.get_attribute('aria-hidden') == 'false': title = dialog.find_element_by_class_name('modal-title').text time.sleep(1) logger.info('Handling dialog[%s]', title) try: done = self._handle_dialog(dialog, title) except: logger.exception('Error handling dialog: %s', title) error = True if done is None: raise FailError('Unexpected dialog occurred') dialog.find_element_by_id('ConfirmOk').click() time.sleep(1) try: stop_button = self._browser.find_element_by_id('stopTest') if done: stop_button.click() # wait for stop procedure end time.sleep(10) except: logger.exception('Test stopped') time.sleep(5) done = True self.timeout -= 1 # check if already ended capture if self.timeout % 10 == 0: lines = self._hc.tail() if 'SUCCESS: The process "dumpcap.exe" with PID ' in lines: logger.info('Tshark should be ended now, lets wait at most 30 seconds.') if not wait_until(lambda: 'tshark.exe' not in subprocess.check_output('tasklist'), 30): res = subprocess.check_output('taskkill /t /f /im tshark.exe', stderr=subprocess.STDOUT, shell=True) logger.info(res) # Wait until case really stopped wait_until(lambda: self._browser.find_element_by_id('runTest') and True, 30) if error: raise FailError('Fail for previous exceptions') def _handle_dialog(self, dialog, title): """Handle a dialog. Returns: bool True if no more dialogs expected, False if more dialogs needed, and None if not handled """ done = self.on_dialog(dialog, title) if isinstance(done, bool): return done if title.startswith('Start DUT'): body = dialog.find_element_by_id('cnfrmMsg').text if 'Sleepy End Device' in body: self.dut.mode = 's' self.dut.child_timeout = self.child_timeout elif 'End Device' in body: self.dut.mode = 'rsn' self.dut.child_timeout = self.child_timeout else: self.dut.mode = 'rsdn' if 'at channel' in body: self.channel = int(body.split(':')[1]) self.dut.channel = self.channel self.dut.panid = settings.THREAD_PANID self.dut.networkname = settings.THREAD_NETWORKNAME self.dut.extpanid = settings.THREAD_EXTPANID self.dut.start() elif (title.startswith('MAC Address Required') or title.startswith('DUT Random Extended MAC Address Required')): mac = self.dut.mac inp = dialog.find_element_by_id('cnfrmInpText') inp.clear() inp.send_keys('0x%s' % mac) elif title.startswith('LL64 Address'): ll64 = None for addr in self.dut.addrs: addr = addr.lower() if addr.startswith('fe80') and not re.match('.+ff:fe00:[0-9a-f]{0,4}$', addr): ll64 = addr break if not ll64: raise FailError('No link local address found') logger.info('Link local address is %s', ll64) inp = dialog.find_element_by_id('cnfrmInpText') inp.clear() inp.send_keys(ll64) elif title.startswith('Enter Channel'): self.dut.channel = self.channel inp = dialog.find_element_by_id('cnfrmInpText') inp.clear() inp.send_keys(str(self.dut.channel)) elif title.startswith('User Action Needed'): body = dialog.find_element_by_id('cnfrmMsg').text if body.startswith('Power Down the DUT'): self.dut.stop() return True elif title.startswith('Short Address'): short_addr = '0x%s' % self.dut.short_addr inp = dialog.find_element_by_id('cnfrmInpText') inp.clear() inp.send_keys(short_addr) elif title.startswith('ML64 Address'): ml64 = None for addr in self.dut.addrs: if addr.startswith('fd') and not re.match('.+ff:fe00:[0-9a-f]{0,4}$', addr): ml64 = addr break if not ml64: raise Exception('No mesh local address found') logger.info('Mesh local address is %s', ml64) inp = dialog.find_element_by_id('cnfrmInpText') inp.clear() inp.send_keys(ml64) elif title.startswith('Shield Devices') or title.startswith('Sheild DUT'): if self.rf_shield: logger.info('Shielding devices') with self.rf_shield: self.rf_shield.shield() elif self.dut and settings.SHIELD_SIMULATION: self.dut.channel = (self.channel == THREAD_CHANNEL_MAX and THREAD_CHANNEL_MIN) or (self.channel + 1) else: raw_input('Shield DUT and press enter to continue..') elif title.startswith('Unshield Devices') or title.startswith('Bring DUT Back to network'): if self.rf_shield: logger.info('Unshielding devices') with self.rf_shield: self.rf_shield.unshield() elif self.dut and settings.SHIELD_SIMULATION: self.dut.channel = self.channel else: raw_input('Bring DUT and press enter to continue..') elif title.startswith('Configure Prefix on DUT'): body = dialog.find_element_by_id('cnfrmMsg').text body = body.split(': ')[1] params = reduce(lambda params, param: params.update(((param[0].strip(' '), param[1]),)) or params, [it.split('=') for it in body.split(', ')], {}) prefix = params['P_Prefix'].strip('\0\r\n\t ') flags = [] if params.get('P_slaac_preferred', 0) == '1': flags.append('p') flags.append('ao') if params.get('P_stable', 0) == '1': flags.append('s') if params.get('P_default', 0) == '1': flags.append('r') prf = 'high' self.dut.add_prefix(prefix, ''.join(flags), prf) return False def test(self): """This method will only start test case in child class""" if self.__class__ is HarnessCase: logger.warning('Skip this harness itself') return logger.info('Testing role[%d] case[%s]', self.role, self.case) try: self._init_browser() # prepare test case while True: url = self._browser.current_url if url.endswith('SetupPage.html'): self._setup_page() elif url.endswith('TestBed.html'): self._test_bed() elif url.endswith('TestExecution.html'): logger.info('Ready to handle dialogs') break time.sleep(2) except UnexpectedAlertPresentException: logger.exception('Failed to connect to harness server') raise SystemExit() except FatalError: logger.exception('Test stopped for fatal error') raise SystemExit() except FailError: logger.exception('Test failed') raise except: logger.exception('Something wrong') self._select_case(self.role, self.case) self._wait_dialog() try: self._collect_result() except: logger.exception('Failed to collect results') raise # get case result status = self._browser.find_element_by_class_name('title-test').text logger.info(status) success = 'Pass' in status self.assertTrue(success)
# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import encodeutils import six from six.moves.urllib import parse import warlock from glanceclient.common import utils from glanceclient.v2 import schemas DEFAULT_PAGE_SIZE = 20 SORT_DIR_VALUES = ('asc', 'desc') SORT_KEY_VALUES = ('created_at', 'namespace') class NamespaceController(object): def __init__(self, http_client, schema_client): self.http_client = http_client self.schema_client = schema_client @utils.memoized_property def model(self): schema = self.schema_client.get('metadefs/namespace') return warlock.model_factory(schema.raw(), schemas.SchemaBasedModel) def create(self, **kwargs): """Create a namespace. :param kwargs: Unpacked namespace object. """ url = '/v2/metadefs/namespaces' try: namespace = self.model(kwargs) except (warlock.InvalidOperation, ValueError) as e: raise TypeError(encodeutils.exception_to_unicode(e)) resp, body = self.http_client.post(url, data=namespace) body.pop('self', None) return self.model(**body) def update(self, namespace_name, **kwargs): """Update a namespace. :param namespace_name: Name of a namespace (old one). :param kwargs: Unpacked namespace object. """ namespace = self.get(namespace_name) for (key, value) in six.iteritems(kwargs): try: setattr(namespace, key, value) except warlock.InvalidOperation as e: raise TypeError(encodeutils.exception_to_unicode(e)) # Remove read-only parameters. read_only = ['schema', 'updated_at', 'created_at'] for elem in read_only: if elem in namespace: del namespace[elem] url = '/v2/metadefs/namespaces/{0}'.format(namespace_name) self.http_client.put(url, data=namespace) return self.get(namespace.namespace) def get(self, namespace, **kwargs): """Get one namespace.""" query_params = parse.urlencode(kwargs) if kwargs: query_params = '?%s' % query_params url = '/v2/metadefs/namespaces/{0}{1}'.format(namespace, query_params) resp, body = self.http_client.get(url) # NOTE(bcwaldon): remove 'self' for now until we have an elegant # way to pass it into the model constructor without conflict body.pop('self', None) return self.model(**body) def list(self, **kwargs): """Retrieve a listing of Namespace objects. :param page_size: Number of items to request in each paginated request :param limit: Use to request a specific page size. Expect a response to a limited request to return between zero and limit items. :param marker: Specifies the namespace of the last-seen namespace. The typical pattern of limit and marker is to make an initial limited request and then to use the last namespace from the response as the marker parameter in a subsequent limited request. :param sort_key: The field to sort on (for example, 'created_at') :param sort_dir: The direction to sort ('asc' or 'desc') :returns: generator over list of Namespaces """ ori_validate_fun = self.model.validate empty_fun = lambda *args, **kwargs: None def paginate(url): resp, body = self.http_client.get(url) for namespace in body['namespaces']: # NOTE(bcwaldon): remove 'self' for now until we have # an elegant way to pass it into the model constructor # without conflict. namespace.pop('self', None) yield self.model(**namespace) # NOTE(zhiyan): In order to resolve the performance issue # of JSON schema validation for image listing case, we # don't validate each image entry but do it only on first # image entry for each page. self.model.validate = empty_fun # NOTE(zhiyan); Reset validation function. self.model.validate = ori_validate_fun try: next_url = body['next'] except KeyError: return else: for namespace in paginate(next_url): yield namespace filters = kwargs.get('filters', {}) filters = {} if filters is None else filters if not kwargs.get('page_size'): filters['limit'] = DEFAULT_PAGE_SIZE else: filters['limit'] = kwargs['page_size'] if 'marker' in kwargs: filters['marker'] = kwargs['marker'] sort_key = kwargs.get('sort_key') if sort_key is not None: if sort_key in SORT_KEY_VALUES: filters['sort_key'] = sort_key else: raise ValueError('sort_key must be one of the following: %s.' % ', '.join(SORT_KEY_VALUES)) sort_dir = kwargs.get('sort_dir') if sort_dir is not None: if sort_dir in SORT_DIR_VALUES: filters['sort_dir'] = sort_dir else: raise ValueError('sort_dir must be one of the following: %s.' % ', '.join(SORT_DIR_VALUES)) for param, value in six.iteritems(filters): if isinstance(value, list): filters[param] = encodeutils.safe_encode(','.join(value)) elif isinstance(value, six.string_types): filters[param] = encodeutils.safe_encode(value) url = '/v2/metadefs/namespaces?%s' % parse.urlencode(filters) for namespace in paginate(url): yield namespace def delete(self, namespace): """Delete a namespace.""" url = '/v2/metadefs/namespaces/{0}'.format(namespace) self.http_client.delete(url) class ResourceTypeController(object): def __init__(self, http_client, schema_client): self.http_client = http_client self.schema_client = schema_client @utils.memoized_property def model(self): schema = self.schema_client.get('metadefs/resource_type') return warlock.model_factory(schema.raw(), schemas.SchemaBasedModel) def associate(self, namespace, **kwargs): """Associate a resource type with a namespace.""" try: res_type = self.model(kwargs) except (warlock.InvalidOperation, ValueError) as e: raise TypeError(encodeutils.exception_to_unicode(e)) url = '/v2/metadefs/namespaces/{0}/resource_types'.format(namespace, res_type) resp, body = self.http_client.post(url, data=res_type) body.pop('self', None) return self.model(**body) def deassociate(self, namespace, resource): """Deasociate a resource type with a namespace.""" url = '/v2/metadefs/namespaces/{0}/resource_types/{1}'. \ format(namespace, resource) self.http_client.delete(url) def list(self): """Retrieve a listing of available resource types. :returns: generator over list of resource_types """ url = '/v2/metadefs/resource_types' resp, body = self.http_client.get(url) for resource_type in body['resource_types']: yield self.model(**resource_type) def get(self, namespace): url = '/v2/metadefs/namespaces/{0}/resource_types'.format(namespace) resp, body = self.http_client.get(url) body.pop('self', None) for resource_type in body['resource_type_associations']: yield self.model(**resource_type) class PropertyController(object): def __init__(self, http_client, schema_client): self.http_client = http_client self.schema_client = schema_client @utils.memoized_property def model(self): schema = self.schema_client.get('metadefs/property') return warlock.model_factory(schema.raw(), schemas.SchemaBasedModel) def create(self, namespace, **kwargs): """Create a property. :param namespace: Name of a namespace the property will belong. :param kwargs: Unpacked property object. """ try: prop = self.model(kwargs) except (warlock.InvalidOperation, ValueError) as e: raise TypeError(encodeutils.exception_to_unicode(e)) url = '/v2/metadefs/namespaces/{0}/properties'.format(namespace) resp, body = self.http_client.post(url, data=prop) body.pop('self', None) return self.model(**body) def update(self, namespace, prop_name, **kwargs): """Update a property. :param namespace: Name of a namespace the property belongs. :param prop_name: Name of a property (old one). :param kwargs: Unpacked property object. """ prop = self.get(namespace, prop_name) for (key, value) in kwargs.items(): try: setattr(prop, key, value) except warlock.InvalidOperation as e: raise TypeError(encodeutils.exception_to_unicode(e)) url = '/v2/metadefs/namespaces/{0}/properties/{1}'.format(namespace, prop_name) self.http_client.put(url, data=prop) return self.get(namespace, prop.name) def get(self, namespace, prop_name): url = '/v2/metadefs/namespaces/{0}/properties/{1}'.format(namespace, prop_name) resp, body = self.http_client.get(url) body.pop('self', None) body['name'] = prop_name return self.model(**body) def list(self, namespace, **kwargs): """Retrieve a listing of metadata properties. :returns: generator over list of objects """ url = '/v2/metadefs/namespaces/{0}/properties'.format(namespace) resp, body = self.http_client.get(url) for key, value in body['properties'].items(): value['name'] = key yield self.model(value) def delete(self, namespace, prop_name): """Delete a property.""" url = '/v2/metadefs/namespaces/{0}/properties/{1}'.format(namespace, prop_name) self.http_client.delete(url) def delete_all(self, namespace): """Delete all properties in a namespace.""" url = '/v2/metadefs/namespaces/{0}/properties'.format(namespace) self.http_client.delete(url) class ObjectController(object): def __init__(self, http_client, schema_client): self.http_client = http_client self.schema_client = schema_client @utils.memoized_property def model(self): schema = self.schema_client.get('metadefs/object') return warlock.model_factory(schema.raw(), schemas.SchemaBasedModel) def create(self, namespace, **kwargs): """Create an object. :param namespace: Name of a namespace the object belongs. :param kwargs: Unpacked object. """ try: obj = self.model(kwargs) except (warlock.InvalidOperation, ValueError) as e: raise TypeError(encodeutils.exception_to_unicode(e)) url = '/v2/metadefs/namespaces/{0}/objects'.format(namespace) resp, body = self.http_client.post(url, data=obj) body.pop('self', None) return self.model(**body) def update(self, namespace, object_name, **kwargs): """Update an object. :param namespace: Name of a namespace the object belongs. :param prop_name: Name of an object (old one). :param kwargs: Unpacked object. """ obj = self.get(namespace, object_name) for (key, value) in kwargs.items(): try: setattr(obj, key, value) except warlock.InvalidOperation as e: raise TypeError(encodeutils.exception_to_unicode(e)) # Remove read-only parameters. read_only = ['schema', 'updated_at', 'created_at'] for elem in read_only: if elem in obj: del obj[elem] url = '/v2/metadefs/namespaces/{0}/objects/{1}'.format(namespace, object_name) self.http_client.put(url, data=obj) return self.get(namespace, obj.name) def get(self, namespace, object_name): url = '/v2/metadefs/namespaces/{0}/objects/{1}'.format(namespace, object_name) resp, body = self.http_client.get(url) body.pop('self', None) return self.model(**body) def list(self, namespace, **kwargs): """Retrieve a listing of metadata objects. :returns: generator over list of objects """ url = '/v2/metadefs/namespaces/{0}/objects'.format(namespace,) resp, body = self.http_client.get(url) for obj in body['objects']: yield self.model(obj) def delete(self, namespace, object_name): """Delete an object.""" url = '/v2/metadefs/namespaces/{0}/objects/{1}'.format(namespace, object_name) self.http_client.delete(url) def delete_all(self, namespace): """Delete all objects in a namespace.""" url = '/v2/metadefs/namespaces/{0}/objects'.format(namespace) self.http_client.delete(url) class TagController(object): def __init__(self, http_client, schema_client): self.http_client = http_client self.schema_client = schema_client @utils.memoized_property def model(self): schema = self.schema_client.get('metadefs/tag') return warlock.model_factory(schema.raw(), schemas.SchemaBasedModel) def create(self, namespace, tag_name): """Create a tag. :param namespace: Name of a namespace the Tag belongs. :param tag_name: The name of the new tag to create. """ url = ('/v2/metadefs/namespaces/{0}/tags/{1}'.format(namespace, tag_name)) resp, body = self.http_client.post(url) body.pop('self', None) return self.model(**body) def create_multiple(self, namespace, **kwargs): """Create the list of tags. :param namespace: Name of a namespace to which the Tags belong. :param kwargs: list of tags. """ tag_names = kwargs.pop('tags', []) md_tag_list = [] for tag_name in tag_names: try: md_tag_list.append(self.model(name=tag_name)) except (warlock.InvalidOperation) as e: raise TypeError(encodeutils.exception_to_unicode(e)) tags = {'tags': md_tag_list} url = '/v2/metadefs/namespaces/{0}/tags'.format(namespace) resp, body = self.http_client.post(url, data=tags) body.pop('self', None) for tag in body['tags']: yield self.model(tag) def update(self, namespace, tag_name, **kwargs): """Update a tag. :param namespace: Name of a namespace the Tag belongs. :param prop_name: Name of the Tag (old one). :param kwargs: Unpacked tag. """ tag = self.get(namespace, tag_name) for (key, value) in kwargs.items(): try: setattr(tag, key, value) except warlock.InvalidOperation as e: raise TypeError(encodeutils.exception_to_unicode(e)) # Remove read-only parameters. read_only = ['updated_at', 'created_at'] for elem in read_only: if elem in tag: del tag[elem] url = '/v2/metadefs/namespaces/{0}/tags/{1}'.format(namespace, tag_name) self.http_client.put(url, data=tag) return self.get(namespace, tag.name) def get(self, namespace, tag_name): url = '/v2/metadefs/namespaces/{0}/tags/{1}'.format(namespace, tag_name) resp, body = self.http_client.get(url) body.pop('self', None) return self.model(**body) def list(self, namespace, **kwargs): """Retrieve a listing of metadata tags. :returns: generator over list of tags. """ url = '/v2/metadefs/namespaces/{0}/tags'.format(namespace) resp, body = self.http_client.get(url) for tag in body['tags']: yield self.model(tag) def delete(self, namespace, tag_name): """Delete a tag.""" url = '/v2/metadefs/namespaces/{0}/tags/{1}'.format(namespace, tag_name) self.http_client.delete(url) def delete_all(self, namespace): """Delete all tags in a namespace.""" url = '/v2/metadefs/namespaces/{0}/tags'.format(namespace) self.http_client.delete(url)
# -*- coding: utf-8 -*- from __future__ import unicode_literals ''' Developer Navdeep Ghai Email navdeep@korecent.com This is beta version code, can be make enhancements in future ''' import sys import os import frappe from frappe.utils.background_jobs import enqueue from frappe import _, msgprint, throw from bcommerce.connection import BcommerceAPI from bcommerce.utils.logger import make_logs from frappe.utils import cint, flt, cstr, now_datetime import bcommerce from bcommerce.exceptions import SyncError, BcommerceSetupError, BcommerceConnectionError , \ BcommerceOptionError, BcommerceOptionSetError, BcommerceProductError, \ ProductDoesNotExist, CustomerDoesNotExist, BcommerceCustomerError, \ BcommerceOptionError, BcommerceOrderError from bigcommerce.exception import EmptyResponseWarning, ClientRequestException, RateLimitingException, \ ServerException MIN_ID = 0 MAX_ID = 250 DEBUG = 0 ''' Get Connection information ''' def get_connection(): setting = frappe.get_doc("Bcommerce Setting", "Bcommerce Setting") validate_mandatory(setting) auth_type = setting.authentication_type api = None if auth_type == "Basic Authentication": api = BcommerceAPI(setting.host, basic_auth=(setting.app_name, setting.token)) else: api = BcommerceAPI(client_id=setting.client_id, access_token=setting.access_token, store_hash=setting.store_hash) return api ''' Validate Bcommerce Store Setting ''' def validate_mandatory(setting): if not setting: raise BcommerceSetupError(_("Please Setup your Bcommerce Setting")) return else: auth_type = setting.authentication_type if auth_type == "Basic Authentication": if not setting.host or not setting.token or not setting.app_name: #later Stage will implement logs for tracking the issue for developer raise BcommerceSetupError(_("Please Check your all fields related to Basic Authetication")) else: if not setting.client_id or not setting.access_token or not setting.store_hash: raise BcommerceSetupError(_("Client ID, Access Token, Store Hash all are mandatory")) return ''' Just to ensure record already exists in database or not If Yes/ Then return the name of record to be link further with transactional data ''' def is_exists(_id, doctype, fieldname): resource_id, resource_doctype = _id, doctype if isinstance(resource_id, basestring): resource_id = cint(resource_id) # Below code will return only name of resource flag = frappe.db.get_value(resource_doctype, filters={fieldname:resource_id}, as_dict=True) return flag if flag else None ''' Sync Master Store Setting [ Store Reference ] ''' def sync_store_setting(): setting = frappe.get_doc("Bcommerce Setting", "Bcommerce Setting") try: from bcommerce.utils.store import sync_with_store enqueue("bcommerce.utils.store.sync_with_store", queue="long") except Exception as e: make_logs("Failed", "Sync Error", message=frappe.get_traceback()) ''' Function to Start the Syncing process between Bcommerce and ERPNext ''' def start_sync(): setting = frappe.get_doc("Bcommerce Setting", "Bcommerce Setting") if not setting.enable: frappe.msgprint(_("Please enable bcommerce app for EPRNext")) return False elif not validate_setting(setting): return False sync_bulk(setting) ''' Start synchronization between both servers ''' def sync_bulk(setting): """ Function used to sync all the prodcut from provide Min ID and Max ID """ if not get_queue_status(): frappe.msgprint(_("Syncing already in progress")) return {} try: make_logs("Queued", "Syncing", message="Syncing in progress") sync_customers(setting) sync_products(setting) sync_orders(setting) make_logs("Syncing Completed", "Syncing", message="Syncing complete successfully") frappe.db.commit() ''' Exceptions are more important for tracking the logs the error For Future you can even Sync the same resource/create new resource using manual sync ''' except Exception, e: make_logs("Failed", "Syncing", message="Syncing Failed") ''' Get Curency status of queue, so that user can't make same request again and again ''' def get_queue_status(): flag = True status = frappe.db.sql(""" SELECT title, resource_type FROM `tabBcommerce Log` WHERE resource_type="Syncing" \ ORDER BY modified DESC LIMIT 1""", as_dict=True) if status and len(status) == 1: status = status[0] if status.get("title") == "Queued": flag = False elif status.get("title") == "Syncing Completed": flag = True elif status.get("title") == "Failed": flag = True return flag ''' Start sycing Customer ''' def sync_customers(setting, min_id=None, max_id=None, id=None): from bcommerce.utils.customers import sync_bulk_customers customers = [] conn = get_connection() if min_id and max_id: customers = conn.Customers.all(min_id=min_id, max_id=max_id, limit=250) elif id: pass else: min_id = get_last_sync_id("bcommerce_customer_id", "Customer") max_id = min_id + MAX_ID customers = conn.Customers.all(min_id=min_id, max_id=max_id, limit=250) if not validate_resources(customers): return sync_bulk_customers(customers, setting, conn) ''' Start sycing products ''' def sync_products(setting): conn = get_connection() from bcommerce.utils.products import sync_bulk_products min_id = get_last_sync_id("bcommerce_product_id", "Item") max_id = min_id + MAX_ID products = conn.Products.all(min_id=min_id, max_id=max_id, limit=250) if not validate_resources(products): return sync_bulk_products(products, setting, conn) ''' Start syncing Orders ''' def sync_orders(setting): conn = get_connection() from bcommerce.utils.orders import sync_bulk_orders min_id = get_last_sync_id("bcommerce_order_id", "Sales Order") if min_id == 0 and setting.start_syncing_from_id: min_id = setting.start_syncing_from_id max_id = min_id + MAX_ID orders = conn.Orders.all(min_id=min_id, max_id=max_id, limit=250) if not validate_resources(orders): return sync_bulk_orders(orders, setting, conn) ''' Validate resources, just to ensure response has data or not ''' def validate_resources(resources): if (resources and not isinstance(resources, list)) or (isinstance(resources, list) and len(resources) == 0): return False return True ''' To ensure resource does exist on remote [Big Commerce] exist or not ''' def is_exists_on_remote(resource_type, id): conn = get_connection() if hasattr(conn, resource_type): try: getattr(conn, resource_type).get(id=id) except Exception as e: print frappe.get_traceback() ''' Last synced ID, from where next syncing will be start ''' def get_last_sync_id(fieldname, doctype): b_id = frappe.db.sql("""SELECT {fieldname} FROM `tab{doctype}` WHERE {flag} != 0 ORDER BY creation DESC \ LIMIT 1""".format(fieldname=fieldname, doctype=doctype, flag=fieldname), as_dict=1) id = b_id[0].get(fieldname) if (b_id and len(b_id) >= 1) else 1 return id+1 def validate_setting(setting): flag = True if not setting: flag = False elif setting and setting.authentication_type == "Basic Authentication": if not setting.app_name or not setting.token or not setting.host: flag = False elif setting and setting.authentication_type == "OAuth Authentication": if not setting.client_id or not setting.store_hash or not setting.access_token: flag = False if flag == True: from bcommerce.utils import validate_products_setting from bcommerce.utils import validate_customers_setting from bcommerce.utils import validate_orders_setting product = validate_products_setting(setting) customer = validate_customers_setting(setting) order = validate_orders_setting(setting) if not product or not customer or not order: flag = False return flag
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Miscellaneous algorithms for 2D contours and 3D triangularized meshes handling Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) ''' from __future__ import division from builtins import zip import os.path as op from warnings import warn import numpy as np from numpy import linalg as nla from .. import logging from ..external.six import string_types from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File, BaseInterfaceInputSpec) iflogger = logging.getLogger('interface') class TVTKBaseInterface(BaseInterface): _redirect_x = True _vtk_major = 6 def __init__(self, **inputs): try: from tvtk.tvtk_classes.vtk_version import vtk_build_version self._vtk_major = int(vtk_build_version[0]) except ImportError: iflogger.warning('VTK version-major inspection using tvtk failed.') super(TVTKBaseInterface, self).__init__(**inputs) class WarpPointsInputSpec(BaseInterfaceInputSpec): points = File(exists=True, mandatory=True, desc=('file containing the point set')) warp = File(exists=True, mandatory=True, desc=('dense deformation field to be applied')) interp = traits.Enum('cubic', 'nearest', 'linear', usedefault=True, mandatory=True, desc='interpolation') out_points = File(name_source='points', name_template='%s_warped', output_name='out_points', keep_extension=True, desc='the warped point set') class WarpPointsOutputSpec(TraitedSpec): out_points = File(desc='the warped point set') class WarpPoints(TVTKBaseInterface): """ Applies a displacement field to a point set given in vtk format. Any discrete deformation field, given in physical coordinates and which volume covers the extent of the vtk point set, is a valid ``warp`` file. FSL interfaces are compatible, for instance any field computed with :class:`nipype.interfaces.fsl.utils.ConvertWarp`. Example ------- >>> from nipype.algorithms.mesh import WarpPoints >>> wp = WarpPoints() >>> wp.inputs.points = 'surf1.vtk' >>> wp.inputs.warp = 'warpfield.nii' >>> res = wp.run() # doctest: +SKIP """ input_spec = WarpPointsInputSpec output_spec = WarpPointsOutputSpec def _gen_fname(self, in_file, suffix='generated', ext=None): import os.path as op fname, fext = op.splitext(op.basename(in_file)) if fext == '.gz': fname, fext2 = op.splitext(fname) fext = fext2 + fext if ext is None: ext = fext if ext[0] == '.': ext = ext[1:] return op.abspath('%s_%s.%s' % (fname, suffix, ext)) def _run_interface(self, runtime): import nibabel as nb import numpy as np from scipy import ndimage try: from tvtk.api import tvtk except ImportError: raise ImportError('Interface requires tvtk') r = tvtk.PolyDataReader(file_name=self.inputs.points) r.update() mesh = r.output points = np.array(mesh.points) warp_dims = nb.funcs.four_to_three(nb.load(self.inputs.warp)) affine = warp_dims[0].affine voxsize = warp_dims[0].header.get_zooms() vox2ras = affine[0:3, 0:3] ras2vox = np.linalg.inv(vox2ras) origin = affine[0:3, 3] voxpoints = np.array([np.dot(ras2vox, (p - origin)) for p in points]) warps = [] for axis in warp_dims: wdata = axis.get_data() if np.any(wdata != 0): warp = ndimage.map_coordinates(wdata, voxpoints.transpose()) else: warp = np.zeros((points.shape[0],)) warps.append(warp) disps = np.squeeze(np.dstack(warps)) newpoints = [p + d for p, d in zip(points, disps)] mesh.points = newpoints w = tvtk.PolyDataWriter() if self._vtk_major <= 5: w.input = mesh else: w.set_input_data_object(mesh) w.file_name = self._gen_fname(self.inputs.points, suffix='warped', ext='.vtk') w.write() return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_points'] = self._gen_fname(self.inputs.points, suffix='warped', ext='.vtk') return outputs class ComputeMeshWarpInputSpec(BaseInterfaceInputSpec): surface1 = File(exists=True, mandatory=True, desc=('Reference surface (vtk format) to which compute ' 'distance.')) surface2 = File(exists=True, mandatory=True, desc=('Test surface (vtk format) from which compute ' 'distance.')) metric = traits.Enum('euclidean', 'sqeuclidean', usedefault=True, desc=('norm used to report distance')) weighting = traits.Enum( 'none', 'area', usedefault=True, desc=('"none": no weighting is performed, surface": edge distance is ' 'weighted by the corresponding surface area')) out_warp = File('surfwarp.vtk', usedefault=True, desc='vtk file based on surface1 and warpings mapping it ' 'to surface2') out_file = File('distance.npy', usedefault=True, desc='numpy file keeping computed distances and weights') class ComputeMeshWarpOutputSpec(TraitedSpec): distance = traits.Float(desc="computed distance") out_warp = File(exists=True, desc=('vtk file with the vertex-wise ' 'mapping of surface1 to surface2')) out_file = File(exists=True, desc='numpy file keeping computed distances and weights') class ComputeMeshWarp(TVTKBaseInterface): """ Calculates a the vertex-wise warping to get surface2 from surface1. It also reports the average distance of vertices, using the norm specified as input. .. warning: A point-to-point correspondence between surfaces is required Example ------- >>> import nipype.algorithms.mesh as m >>> dist = m.ComputeMeshWarp() >>> dist.inputs.surface1 = 'surf1.vtk' >>> dist.inputs.surface2 = 'surf2.vtk' >>> res = dist.run() # doctest: +SKIP """ input_spec = ComputeMeshWarpInputSpec output_spec = ComputeMeshWarpOutputSpec def _triangle_area(self, A, B, C): A = np.array(A) B = np.array(B) C = np.array(C) ABxAC = nla.norm(A - B) * nla.norm(A - C) prod = np.dot(B - A, C - A) angle = np.arccos(prod / ABxAC) area = 0.5 * ABxAC * np.sin(angle) return area def _run_interface(self, runtime): try: from tvtk.api import tvtk except ImportError: raise ImportError('Interface requires tvtk') r1 = tvtk.PolyDataReader(file_name=self.inputs.surface1) r2 = tvtk.PolyDataReader(file_name=self.inputs.surface2) vtk1 = r1.output vtk2 = r2.output r1.update() r2.update() assert(len(vtk1.points) == len(vtk2.points)) points1 = np.array(vtk1.points) points2 = np.array(vtk2.points) diff = points2 - points1 weights = np.ones(len(diff)) try: errvector = nla.norm(diff, axis=1) except TypeError: # numpy < 1.9 errvector = np.apply_along_axis(nla.norm, 1, diff) if self.inputs.metric == 'sqeuclidean': errvector = errvector ** 2 if (self.inputs.weighting == 'area'): faces = vtk1.polys.to_array().reshape(-1, 4).astype(int)[:, 1:] for i, p1 in enumerate(points2): # compute surfaces, set in weight w = 0.0 point_faces = faces[(faces[:, :] == i).any(axis=1)] for idset in point_faces: fp1 = points1[int(idset[0])] fp2 = points1[int(idset[1])] fp3 = points1[int(idset[2])] w += self._triangle_area(fp1, fp2, fp3) weights[i] = w result = np.vstack([errvector, weights]) np.save(op.abspath(self.inputs.out_file), result.transpose()) out_mesh = tvtk.PolyData() out_mesh.points = vtk1.points out_mesh.polys = vtk1.polys out_mesh.point_data.vectors = diff out_mesh.point_data.vectors.name = 'warpings' writer = tvtk.PolyDataWriter( file_name=op.abspath(self.inputs.out_warp)) if self._vtk_major <= 5: writer.input = mesh else: writer.set_input_data_object(mesh) writer.write() self._distance = np.average(errvector, weights=weights) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = op.abspath(self.inputs.out_file) outputs['out_warp'] = op.abspath(self.inputs.out_warp) outputs['distance'] = self._distance return outputs class MeshWarpMathsInputSpec(BaseInterfaceInputSpec): in_surf = File(exists=True, mandatory=True, desc=('Input surface in vtk format, with associated warp ' 'field as point data (ie. from ComputeMeshWarp')) float_trait = traits.Either(traits.Float(1.0), traits.Tuple( traits.Float(1.0), traits.Float(1.0), traits.Float(1.0))) operator = traits.Either( float_trait, File(exists=True), default=1.0, mandatory=True, desc=('image, float or tuple of floats to act as operator')) operation = traits.Enum('sum', 'sub', 'mul', 'div', usedefault=True, desc=('operation to be performed')) out_warp = File('warp_maths.vtk', usedefault=True, desc='vtk file based on in_surf and warpings mapping it ' 'to out_file') out_file = File('warped_surf.vtk', usedefault=True, desc='vtk with surface warped') class MeshWarpMathsOutputSpec(TraitedSpec): out_warp = File(exists=True, desc=('vtk file with the vertex-wise ' 'mapping of surface1 to surface2')) out_file = File(exists=True, desc='vtk with surface warped') class MeshWarpMaths(TVTKBaseInterface): """ Performs the most basic mathematical operations on the warping field defined at each vertex of the input surface. A surface with scalar or vector data can be used as operator for non-uniform operations. .. warning: A point-to-point correspondence between surfaces is required Example ------- >>> import nipype.algorithms.mesh as m >>> mmath = m.MeshWarpMaths() >>> mmath.inputs.in_surf = 'surf1.vtk' >>> mmath.inputs.operator = 'surf2.vtk' >>> mmath.inputs.operation = 'mul' >>> res = mmath.run() # doctest: +SKIP """ input_spec = MeshWarpMathsInputSpec output_spec = MeshWarpMathsOutputSpec def _run_interface(self, runtime): try: from tvtk.api import tvtk except ImportError: raise ImportError('Interface requires tvtk') r1 = tvtk.PolyDataReader(file_name=self.inputs.in_surf) vtk1 = r1.output r1.update() points1 = np.array(vtk1.points) if vtk1.point_data.vectors is None: raise RuntimeError(('No warping field was found in in_surf')) operator = self.inputs.operator opfield = np.ones_like(points1) if isinstance(operator, string_types): r2 = tvtk.PolyDataReader(file_name=self.inputs.surface2) vtk2 = r2.output r2.update() assert(len(points1) == len(vtk2.points)) opfield = vtk2.point_data.vectors if opfield is None: opfield = vtk2.point_data.scalars if opfield is None: raise RuntimeError( ('No operator values found in operator file')) opfield = np.array(opfield) if opfield.shape[1] < points1.shape[1]: opfield = np.array([opfield.tolist()] * points1.shape[1]).T else: operator = np.atleast_1d(operator) opfield *= operator warping = np.array(vtk1.point_data.vectors) if self.inputs.operation == 'sum': warping += opfield elif self.inputs.operation == 'sub': warping -= opfield elif self.inputs.operation == 'mul': warping *= opfield elif self.inputs.operation == 'div': warping /= opfield vtk1.point_data.vectors = warping writer = tvtk.PolyDataWriter( file_name=op.abspath(self.inputs.out_warp)) if self._vtk_major <= 5: writer.input = vtk1 else: writer.set_input_data_object(vtk1) writer.write() vtk1.point_data.vectors = None vtk1.points = points1 + warping writer = tvtk.PolyDataWriter( file_name=op.abspath(self.inputs.out_file)) if self._vtk_major <= 5: writer.input = vtk1 else: writer.set_input_data_object(vtk1) writer.write() return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = op.abspath(self.inputs.out_file) outputs['out_warp'] = op.abspath(self.inputs.out_warp) return outputs class P2PDistance(ComputeMeshWarp): """ Calculates a point-to-point (p2p) distance between two corresponding VTK-readable meshes or contours. A point-to-point correspondence between nodes is required .. deprecated:: 1.0-dev Use :py:class:`ComputeMeshWarp` instead. """ def __init__(self, **inputs): super(P2PDistance, self).__init__(**inputs) warn(('This interface has been deprecated since 1.0, please use ' 'ComputeMeshWarp'), DeprecationWarning)
# vim: ft=python fileencoding=utf-8 sw=4 et sts=4 """Deals with transformations like rotate and flip and deleting files.""" import os from threading import Thread from gi.repository import GObject from vimiv import imageactions from vimiv.exceptions import (NotTransformable, TrashUndeleteError, StringConversionError) from vimiv.fileactions import edit_supported from vimiv.helpers import get_int from vimiv.settings import settings from vimiv.trash_manager import TrashManager class Transform(GObject.Object): """Deals with transformations like rotate/flip and deleting files. Attributes: threads_running: If True, a thread is running to apply to files. trash_manager: Class to handle a shared trash directory. _app: The main vimiv application to interact with. _changes: Dictionary for rotate and flip. Key: Filename; Item: [Int, Bool, Bool] Signals: changed: Emitted when an image was transformed so Image can update. applied-to-file: Emitted when the file was successfully transformed. """ def __init__(self, app): super(Transform, self).__init__() self._app = app self._changes = {} self.trash_manager = TrashManager() self.threads_running = False def delete(self): """Delete all marked images or the current one.""" # Get all images images = self.get_images("Deleted") self._app["mark"].marked = [] # Delete all images remembering possible errors message = "" for im in images: if not os.path.exists(im): message += "Image %s does not exist." % (im) elif os.path.isdir(im): message += "Deleting directory %s is not supported." % (im) else: self.trash_manager.delete(im) if message: self._app["statusbar"].message(message, "error") self._app.emit("paths-changed", self) def undelete(self, basename): """Undelete an image in the trash. Args: basename: The basename of the image in the trash directory. """ try: self.trash_manager.undelete(basename) self._app.emit("paths-changed", self) except TrashUndeleteError as e: message = "Could not restore %s, %s" % (basename, str(e)) self._app["statusbar"].message(message, "error") def write(self, quit_app=False): """Write transformed/manipulated image(s) to disk. Args: quit: If True, quit the application. Activated by :wq. """ self._app["statusbar"].message("Saving...", "info") # Manipulations include transformations implicitly if self._app["manipulate"].is_visible(): self._app["manipulate"].finish(True) self._changes.clear() # Only apply any transformations else: self._thread_for_apply() # Quit or inform if quit_app: self._app.quit_wrapper() else: self._app["statusbar"].message("Changes written to disk", "info") def get_images(self, info): """Return the images which should be manipulated. Either the currently focused image or all marked images. Args: info: Info to display when acting on marked images. """ # Add all marked images if self._app["mark"].marked: images = self._app["mark"].marked if len(images) == 1: message = "%s %d marked image" % (info, len(images)) else: message = "%s %d marked images" % (info, len(images)) self._app["statusbar"].message(message, "info") # Add the image shown else: images = [os.path.abspath(self._app.get_pos(True))] return images def rotate(self, cwise): """Rotate the displayed image and call thread to rotate files. Args: cwise: Rotate image 90 * cwise degrees. """ try: self._is_transformable() cwise = get_int(cwise, allow_sign=True) except NotTransformable as e: self._app["statusbar"].message(str(e) + " rotate", "error") return except StringConversionError as e: self._app["statusbar"].message(str(e), "error") return images = self.get_images("Rotated") cwise = cwise % 4 # Update properties for fil in images: if fil in self._changes: self._changes[fil][0] = \ (self._changes[fil][0] + cwise) % 4 else: self._changes[fil] = [cwise, 0, 0] # Rotate the image shown if self._app.get_path() in images: self.emit("changed", "rotate", cwise) # Reload thumbnails of rotated images immediately if self._app["thumbnail"].toggled: self.apply() def apply(self): """Start thread for rotate and flip.""" # TODO improve this, it is currently not possible to find out what is # being changed and what should still be done if self.threads_running: return if settings["autosave_images"].get_value(): t = Thread(target=self._thread_for_apply) t.start() else: self._changes.clear() def _thread_for_apply(self): """Rotate and flip image file in an extra thread.""" self.threads_running = True to_remove = list(self._changes.keys()) for f in self._changes: if self._changes[f][0]: imageactions.rotate_file(f, self._changes[f][0]) if self._changes[f][1]: imageactions.flip_file(f, True) if self._changes[f][2]: imageactions.flip_file(f, False) for key in to_remove: del self._changes[key] self.emit("applied-to-file", to_remove) self.threads_running = False def _is_transformable(self): """Check if the current image is transformable.""" if not self._app.get_paths(): raise NotTransformable("No image to") elif not edit_supported(self._app.get_path()): raise NotTransformable("Filetype not supported for") # Some operations only make sense if we are allowed to save to file elif not settings["autosave_images"].get_value(): message = "" if self._app["thumbnail"].toggled: message = 'When operating in thumbnail mode ' \ '"autosave_images" must be enabled for' elif self._app["mark"].marked: message = 'When images are marked ' \ '"autosave_images" must be enabled for' if message: raise NotTransformable(message) def flip(self, horizontal): """Flip the displayed image and call thread to flip files. Args: horizontal: If True, flip horizontally. Else vertically. """ try: self._is_transformable() horizontal = get_int(horizontal) except NotTransformable as e: self._app["statusbar"].message(str(e) + " flip", "error") return except StringConversionError as e: self._app["statusbar"].message(str(e), "error") return images = self.get_images("Flipped") # Apply changes for fil in images: if fil not in self._changes: self._changes[fil] = [0, 0, 0] if horizontal: self._changes[fil][1] = \ (self._changes[fil][1] + 1) % 2 else: self._changes[fil][2] = \ (self._changes[fil][2] + 1) % 2 # Flip the image shown if self._app.get_path() in images: self.emit("changed", "flip", horizontal) # Reload thumbnails of flipped images immediately if self._app["thumbnail"].toggled: self.apply() def rotate_auto(self): """Autorotate all pictures in the current pathlist.""" autorotate = imageactions.Autorotate(self._app.get_paths()) self.threads_running = True autorotate.connect("completed", self._on_autorotate_completed) autorotate.run() def _on_autorotate_completed(self, autorotate, amount): message = "Completed autorotate, %d files rotated" % (amount) self.threads_running = False self._app["statusbar"].message(message, "info") GObject.signal_new("changed", Transform, GObject.SIGNAL_RUN_LAST, None, (GObject.TYPE_PYOBJECT, GObject.TYPE_PYOBJECT)) GObject.signal_new("applied-to-file", Transform, GObject.SIGNAL_RUN_LAST, None, (GObject.TYPE_PYOBJECT,))
"""Routines that compute Earth nutation.""" from numpy import array, cos, dot, fmod, sin, outer, zeros from .constants import ASEC2RAD, ASEC360, DEG2RAD, tau, T0 from .functions import load_bundled_npy _TENTH_USEC_2_RAD = ASEC2RAD / 1e7 _arrays = load_bundled_npy('nutation.npz') ke0_t = _arrays['ke0_t'] ke1 = _arrays['ke1'] lunisolar_longitude_coefficients = _arrays['lunisolar_longitude_coefficients'] lunisolar_obliquity_coefficients = _arrays['lunisolar_obliquity_coefficients'] nals_t = _arrays['nals_t'] napl_t = _arrays['napl_t'] nutation_coefficients_longitude = _arrays['nutation_coefficients_longitude'] nutation_coefficients_obliquity = _arrays['nutation_coefficients_obliquity'] se0_t_0 = _arrays['se0_t_0'] se0_t_1 = _arrays['se0_t_1'] # These wrappers return nutation angles in radians as expected by the # Time object. We can't change the units returned by the underlying # routines without breaking any applications that discovered them at # some point in the past few years (though they are officially # undocumented) and started calling them directly. def iau2000a_radians(t, fundamental_argument_terms=5, lunisolar_terms=687, planetary_terms=687): """Return the IAU 2000A angles delta-psi and delta-epsilon in radians.""" d_psi, d_eps = iau2000a(t.tt, fundamental_argument_terms, lunisolar_terms, planetary_terms) d_psi *= _TENTH_USEC_2_RAD d_eps *= _TENTH_USEC_2_RAD return d_psi, d_eps def iau2000b_radians(t): """Return the IAU 2000B angles delta-psi and delta-epsilon in radians.""" d_psi, d_eps = iau2000b(t.tt) d_psi *= _TENTH_USEC_2_RAD d_eps *= _TENTH_USEC_2_RAD return d_psi, d_eps # Lower-level routines. def build_nutation_matrix(mean_obliquity_radians, true_obliquity_radians, psi_radians): """Generate the nutation rotation matrix, given three nutation parameters. The input angles can be simple floats. Or, they can be arrays of the same length, in which case the output matrix will have an extra dimension of that same length providing *n* rotation matrices. """ cobm = cos(mean_obliquity_radians) sobm = sin(mean_obliquity_radians) cobt = cos(true_obliquity_radians) sobt = sin(true_obliquity_radians) cpsi = cos(psi_radians) spsi = sin(psi_radians) return array(((cpsi, -spsi * cobm, -spsi * sobm), (spsi * cobt, cpsi * cobm * cobt + sobm * sobt, cpsi * sobm * cobt - cobm * sobt), (spsi * sobt, cpsi * cobm * sobt - sobm * cobt, cpsi * sobm * sobt + cobm * cobt))) def mean_obliquity(jd_tdb): """Return the mean obliquity of the ecliptic in arcseconds. `jd_tt` - TDB time as a Julian date float, or NumPy array of floats """ # Compute time in Julian centuries from epoch J2000.0. t = (jd_tdb - T0) / 36525.0 # Compute the mean obliquity in arcseconds. Use expression from the # reference's eq. (39) with obliquity at J2000.0 taken from eq. (37) # or Table 8. epsilon = (((( - 0.0000000434 * t - 0.000000576 ) * t + 0.00200340 ) * t - 0.0001831 ) * t - 46.836769 ) * t + 84381.406 return epsilon def equation_of_the_equinoxes_complimentary_terms(jd_tt): """Compute the complementary terms of the equation of the equinoxes. `jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats """ # Interval between fundamental epoch J2000.0 and current date. t = (jd_tt - T0) / 36525.0 # Build array for intermediate results. shape = getattr(jd_tt, 'shape', ()) fa = zeros((14,) if shape == () else (14, shape[0])) # Mean Anomaly of the Moon. fa[0] = ((485868.249036 + (715923.2178 + ( 31.8792 + ( 0.051635 + ( -0.00024470) * t) * t) * t) * t) * ASEC2RAD + (1325.0*t % 1.0) * tau) # Mean Anomaly of the Sun. fa[1] = ((1287104.793048 + (1292581.0481 + ( -0.5532 + ( +0.000136 + ( -0.00001149) * t) * t) * t) * t) * ASEC2RAD + (99.0*t % 1.0) * tau) # Mean Longitude of the Moon minus Mean Longitude of the Ascending # Node of the Moon. fa[2] = (( 335779.526232 + ( 295262.8478 + ( -12.7512 + ( -0.001037 + ( 0.00000417) * t) * t) * t) * t) * ASEC2RAD + (1342.0*t % 1.0) * tau) # Mean Elongation of the Moon from the Sun. fa[3] = ((1072260.703692 + (1105601.2090 + ( -6.3706 + ( 0.006593 + ( -0.00003169) * t) * t) * t) * t) * ASEC2RAD + (1236.0*t % 1.0) * tau) # Mean Longitude of the Ascending Node of the Moon. fa[4] = (( 450160.398036 + (-482890.5431 + ( 7.4722 + ( 0.007702 + ( -0.00005939) * t) * t) * t) * t) * ASEC2RAD + (-5.0*t % 1.0) * tau) fa[ 5] = (4.402608842 + 2608.7903141574 * t) fa[ 6] = (3.176146697 + 1021.3285546211 * t) fa[ 7] = (1.753470314 + 628.3075849991 * t) fa[ 8] = (6.203480913 + 334.0612426700 * t) fa[ 9] = (0.599546497 + 52.9690962641 * t) fa[10] = (0.874016757 + 21.3299104960 * t) fa[11] = (5.481293872 + 7.4781598567 * t) fa[12] = (5.311886287 + 3.8133035638 * t) fa[13] = (0.024381750 + 0.00000538691 * t) * t fa %= tau # Evaluate the complementary terms. a = ke1.dot(fa) c_terms = se1_0 * sin(a) c_terms += se1_1 * cos(a) c_terms *= t a = ke0_t.dot(fa) c_terms += se0_t_0.dot(sin(a)) c_terms += se0_t_1.dot(cos(a)) c_terms *= ASEC2RAD return c_terms anomaly_constant, anomaly_coefficient = array([ # Mean anomaly of the Moon. (2.35555598, 8328.6914269554), # Mean anomaly of the Sun. (6.24006013, 628.301955), # Mean argument of the latitude of the Moon. (1.627905234, 8433.466158131), # Mean elongation of the Moon from the Sun. (5.198466741, 7771.3771468121), # Mean longitude of the ascending node of the Moon. (2.18243920, - 33.757045), # Planetary longitudes, Mercury through Neptune (Souchay et al. 1999). (4.402608842, 2608.7903141574), (3.176146697, 1021.3285546211), (1.753470314, 628.3075849991), (6.203480913, 334.0612426700), (0.599546497, 52.9690962641), (0.874016757, 21.3299104960), (5.481293871, 7.4781598567), (5.321159000, 3.8127774000), # General accumulated precession in longitude (gets multiplied by t). (0.02438175, 0.00000538691), ]).T def iau2000a(jd_tt, fundamental_argument_terms=5, lunisolar_terms=687, planetary_terms=687): """Compute Earth nutation based on the IAU 2000A nutation model. ``jd_tt`` - Terrestrial Time: Julian date float, or NumPy array of floats Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of a micro-arcsecond. Each value is either a float, or a NumPy array with the same dimensions as the input argument. Supply smaller integer values for ``fundamental_argument_terms``, ``lunisolar_terms``, and ``planetary_terms`` to trade off accuraccy for speed. """ # Interval between fundamental epoch J2000.0 and given date. t = (jd_tt - T0) / 36525.0 # Compute fundamental arguments from Simon et al. (1994), in radians. a = fundamental_arguments(t, fundamental_argument_terms) # ** Luni-solar nutation ** # Summation of luni-solar nutation series. cutoff = lunisolar_terms arg = nals_t[:cutoff].dot(a).T sarg = sin(arg) carg = cos(arg) dpsi = dot(sarg, lunisolar_longitude_coefficients[:cutoff,0]) dpsi += dot(sarg, lunisolar_longitude_coefficients[:cutoff,1]) * t dpsi += dot(carg, lunisolar_longitude_coefficients[:cutoff,2]) deps = dot(carg, lunisolar_obliquity_coefficients[:cutoff,0]) deps += dot(carg, lunisolar_obliquity_coefficients[:cutoff,1]) * t deps += dot(sarg, lunisolar_obliquity_coefficients[:cutoff,2]) # Compute and add in planetary components. if not planetary_terms: return dpsi, deps if getattr(t, 'shape', ()) == (): a = t * anomaly_coefficient + anomaly_constant else: a = (outer(anomaly_coefficient, t).T + anomaly_constant).T a[-1] *= t cutoff = planetary_terms arg = napl_t[:cutoff].dot(a).T sarg = sin(arg) carg = cos(arg) dpsi += dot(sarg, nutation_coefficients_longitude[:cutoff,0]) dpsi += dot(carg, nutation_coefficients_longitude[:cutoff,1]) deps += dot(sarg, nutation_coefficients_obliquity[:cutoff,0]) deps += dot(carg, nutation_coefficients_obliquity[:cutoff,1]) return dpsi, deps def iau2000b(jd_tt): """Compute Earth nutation based on the faster IAU 2000B nutation model. `jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of a micro-arcsecond. Each is either a float, or a NumPy array with the same dimensions as the input argument. The result will not take as long to compute as the full IAU 2000A series, but should still agree with ``iau2000a()`` to within a milliarcsecond between the years 1995 and 2020. """ dpsi, deps = iau2000a(jd_tt, 2, 77, 0) dpsi += -0.000135e7 deps += 0.000388e7 return dpsi, deps fa0, fa1, fa2, fa3, fa4 = array(( # Mean Anomaly of the Moon. (485868.249036, 1717915923.2178, 31.8792, 0.051635, - .00024470), # Mean Anomaly of the Sun. (1287104.79305, 129596581.0481, - 0.5532, 0.000136, - 0.00001149), # Mean Longitude of the Moon minus Mean Longitude of the Ascending # Node of the Moon. (335779.526232, 1739527262.8478, - 12.7512, - 0.001037, 0.00000417), # Mean Elongation of the Moon from the Sun. (1072260.70369, 1602961601.2090, - 6.3706, 0.006593, - 0.00003169), # Mean Longitude of the Ascending Node of the Moon. (450160.398036, - 6962890.5431, 7.4722, 0.007702, - 0.00005939), )).T[:,:,None] def fundamental_arguments(t, terms=5): """Compute the fundamental arguments (mean elements) of Sun and Moon. ``t`` - TDB time in Julian centuries since J2000.0, as float or NumPy array Outputs fundamental arguments, in radians: a[0] = l (mean anomaly of the Moon) a[1] = l' (mean anomaly of the Sun) a[2] = F (mean argument of the latitude of the Moon) a[3] = D (mean elongation of the Moon from the Sun) a[4] = Omega (mean longitude of the Moon's ascending node); from Simon section 3.4(b.3), precession = 5028.8200 arcsec/cy) Pass a smaller value for the number of polynomial ``terms`` if you want to trade accuracy for speed. """ fa = iter((fa4, fa3, fa2, fa1)[-terms+1:]) a = next(fa) * t for fa_i in fa: a += fa_i a *= t a += fa0 fmod(a, ASEC360, out=a) a *= ASEC2RAD if getattr(t, 'shape', ()): return a return a[:,0] # Sine and cosine coefficients for t^1. se1_0 = -0.87e-6 se1_1 = +0.00e-6 # Deprecated functions that third-party code might still call; several # of our tests also still call them, to help keep them working. def compute_nutation(t): """Deprecated: this is now a method on the Time object.""" return t.N def earth_tilt(t): """Deprecated: these are now computed separately on the Time object.""" d_psi, d_eps = t._nutation_angles_radians mean_ob = t._mean_obliquity_radians true_ob = mean_ob + d_eps c_terms = equation_of_the_equinoxes_complimentary_terms(t.tt) eq_eq = d_psi * cos(mean_ob) + c_terms return (mean_ob / DEG2RAD, true_ob / DEG2RAD, eq_eq / ASEC2RAD / 15.0, d_psi / ASEC2RAD, d_eps / ASEC2RAD)
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/client/gui_utilities.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import contextlib import logging import os import threading from king_phisher import find from king_phisher import utilities from gi.repository import Gdk from gi.repository import GLib from gi.repository import GObject from gi.repository import Gtk GOBJECT_PROPERTY_MAP = { 'checkbutton': 'active', 'combobox': ( lambda c, v: c.set_active_iter(search_list_store(c.get_model(), v)), lambda c: c.get_model().get_value(c.get_active_iter() or c.get_model().get_iter_first(), 0) ), 'entry': 'text', 'spinbutton': 'value', 'switch': 'active', 'textview': ( lambda t, v: t.get_buffer().set_text(v), lambda t: t.get_buffer().get_text(t.get_buffer().get_start_iter(), t.get_buffer().get_end_iter(), False) ) } """ The dictionary which maps GObjects to either the names of properties to store text or a tuple which contains a set and get function. If a tuple of two functions is specified the set function will be provided two parameters, the object and the value and the get function will just be provided the object. """ if isinstance(Gtk.Window, utilities.Mock): _Gtk_FileChooserDialog = type('Gtk.FileChooserDialog', (object,), {}) _Gtk_FileChooserDialog.__module__ = '' else: _Gtk_FileChooserDialog = Gtk.FileChooserDialog def which_glade(): """ Locate the glade data file. :return: The path to the glade data file. :rtype: str """ return find.find_data_file(os.environ['KING_PHISHER_GLADE_FILE']) def glib_idle_add_wait(function, *args): """ Execute *function* in the main GTK loop using :py:func:`GLib.idle_add` and block until it has completed. This is useful for threads that need to update GUI data. :param function function: The function to call. :param args: The arguments to *functoin*. :return: The result of the function call. """ gsource_completed = threading.Event() results = [] def wrapper(): results.append(function(*args)) gsource_completed.set() return False GLib.idle_add(wrapper) gsource_completed.wait() return results.pop() def gobject_get_value(gobject, gtype=None): """ Retreive the value of a GObject widget. Only objects with value retrieving functions present in the :py:data:`.GOBJECT_PROPERTY_MAP` can be processed by this function. :param gobject: The object to retrieve the value for. :type gobject: :py:class:`GObject.GObject` :param str gtype: An explicit type to treat *gobject* as. :return: The value of *gobject*. :rtype: str """ gtype = (gtype or gobject.__class__.__name__) gtype = gtype.lower() if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)): try: value = GOBJECT_PROPERTY_MAP[gtype][1](gobject) except AttributeError: return None else: value = gobject.get_property(GOBJECT_PROPERTY_MAP[gtype]) return value @contextlib.contextmanager def gobject_signal_blocked(gobject, signal_name): """ This is a context manager that can be used with the 'with' statement to execute a block of code while *signal_name* is blocked. :param gobject: The object to block the signal on. :type gobject: :py:class:`GObject.GObject` :param str signal_name: The name of the signal to block. """ signal_id = GObject.signal_lookup(signal_name, gobject.__class__) handler_id = GObject.signal_handler_find(gobject, GObject.SignalMatchType.ID, signal_id, 0, None, 0, 0) GObject.signal_handler_block(gobject, handler_id) yield GObject.signal_handler_unblock(gobject, handler_id) def gtk_sync(): """Process all pending GTK events.""" while Gtk.events_pending(): Gtk.main_iteration() def gtk_widget_destroy_children(widget): """ Destroy all GTK child objects of *widget*. :param widget: The widget to destroy all the children of. :type widget: :py:class:`Gtk.Widget` """ map(lambda child: child.destroy(), widget.get_children()) def gtk_treeview_selection_to_clipboard(treeview, column=1): """ Copy the currently selected values from the specified column in the treeview to the users clipboard. If no value is selected in the treeview, then the clipboard is left unmodified. If multiple values are selected, they will all be placed in the clipboard on seperate lines. :param treeview: The treeview instance to get the selection from. :type treeview: :py:class:`Gtk.TreeView` :param int column: The column number to retrieve the value for. """ treeview_selection = treeview.get_selection() (model, tree_paths) = treeview_selection.get_selected_rows() if not tree_paths: return tree_iters = map(model.get_iter, tree_paths) selection_values = map(lambda ti: model.get_value(ti, column), tree_iters) selection_values = os.linesep.join(selection_values) clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) clipboard.set_text(selection_values, -1) def search_list_store(list_store, value): """ Search a GTK ListStore for a value. :param list_store: The list store to search. :type list_store: :py:class:`Gtk.ListStore` :param value: The value to search for. :return: The row on which the value was found. :rtype: :py:class:`Gtk.TreeIter` """ for row in list_store: if row[0] == value: return row.iter return None def show_dialog(message_type, message, parent, secondary_text=None, message_buttons=Gtk.ButtonsType.OK): """ Display a dialog and return the response. The response is dependent on the value of *message_buttons*. :param message_type: The GTK message type to display. :type message_type: :py:class:`Gtk.MessageType` :param str message: The text to display in the dialog. :param parent: The parent window that the dialog should belong to. :type parent: :py:class:`Gtk.Window` :param str secondary_text: Optional subtext for the dialog. :param message_buttons: The buttons to display in the dialog box. :type message_buttons: :py:class:`Gtk.ButtonsType` :return: The response of the dialog. :rtype: int """ dialog = Gtk.MessageDialog(parent, Gtk.DialogFlags.DESTROY_WITH_PARENT, message_type, message_buttons, message) if secondary_text: dialog.format_secondary_text(secondary_text) dialog.show_all() response = dialog.run() dialog.destroy() return response def show_dialog_error(*args, **kwargs): """Display an error dialog with :py:func:`.show_dialog`.""" return show_dialog(Gtk.MessageType.ERROR, *args, **kwargs) def show_dialog_info(*args, **kwargs): """Display an informational dialog with :py:func:`.show_dialog`.""" return show_dialog(Gtk.MessageType.INFO, *args, **kwargs) def show_dialog_warning(*args, **kwargs): """Display an warning dialog with :py:func:`.show_dialog`.""" return show_dialog(Gtk.MessageType.WARNING, *args, **kwargs) def show_dialog_yes_no(*args, **kwargs): """ Display a dialog which asks a yes or no question with :py:func:`.show_dialog`. :return: True if the response is Yes. :rtype: bool """ kwargs['message_buttons'] = Gtk.ButtonsType.YES_NO return show_dialog(Gtk.MessageType.QUESTION, *args, **kwargs) == Gtk.ResponseType.YES class UtilityGladeGObject(object): """ A base object to wrap GTK widgets loaded from Glade data files. This provides a number of convenience methods for managing the main widget and child widgets. This class is meant to be subclassed by classes representing objects from the Glade data file. The class names must be identical to the name of the object they represent in the Glade data file. """ gobject_ids = [] """A list of children GObjects to load from the Glade data file.""" top_level_dependencies = [] """Additional top level GObjects to load from the Glade data file.""" config_prefix = '' """A prefix to be used for keys when looking up value in the :py:attr:`~.UtilityGladeGObject.config`.""" top_gobject = 'gobject' """The name of the attribute to set a reference of the top level GObject to.""" def __init__(self, config, parent): """ :param dict config: The King Phisher client configuration. :param parent: The parent window for this object. :type parent: :py:class:`Gtk.Window` """ self.config = config """A reference to the King Phisher client configuration.""" self.parent = parent """The parent :py:class:`Gtk.Window` instance.""" self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__) builder = Gtk.Builder() self.gtk_builder = builder """A :py:class:`Gtk.Builder` instance used to load Glade data with.""" builder.add_objects_from_file(which_glade(), self.top_level_dependencies + [self.__class__.__name__]) builder.connect_signals(self) gobject = builder.get_object(self.__class__.__name__) if isinstance(gobject, Gtk.Window): gobject.set_transient_for(self.parent) setattr(self, self.top_gobject, gobject) self.gobjects = {} for gobject_id in self.gobject_ids: gobject = self.gtk_builder_get(gobject_id) # the following five lines ensure that the types match up, this is to enforce clean development gtype = gobject_id.split('_', 1)[0] if gobject == None: raise TypeError("gobject {0} could not be found in the glade file".format(gobject_id)) elif gobject.__class__.__name__.lower() != gtype: raise TypeError("gobject {0} is of type {1} expected {2}".format(gobject_id, gobject.__class__.__name__, gtype)) self.gobjects[gobject_id] = gobject self.objects_load_from_config() def gtk_builder_get(self, gobject_id): """ Find the child GObject with name *gobject_id* from the GTK builder. :param str gobject_id: The object name to look for. :return: The GObject as found by the GTK builder. :rtype: :py:class:`GObject.GObject` """ gtkbuilder_id = "{0}.{1}".format(self.__class__.__name__, gobject_id) self.logger.debug('loading GTK builder object with id: ' + gtkbuilder_id) return self.gtk_builder.get_object(gtkbuilder_id) def objects_load_from_config(self): """ Iterate through :py:attr:`.gobjects` and set the GObject's value from the corresponding value in the :py:attr:`~.UtilityGladeGObject.config`. """ for gobject_id, gobject in self.gobjects.items(): gtype, config_name = gobject_id.split('_', 1) config_name = self.config_prefix + config_name if not gtype in GOBJECT_PROPERTY_MAP or not config_name in self.config: continue value = self.config[config_name] if value == None: continue if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)): GOBJECT_PROPERTY_MAP[gtype][0](gobject, value) else: gobject.set_property(GOBJECT_PROPERTY_MAP[gtype], value) def objects_save_to_config(self): for gobject_id, gobject in self.gobjects.items(): gtype, config_name = gobject_id.split('_', 1) config_name = self.config_prefix + config_name if not gtype in GOBJECT_PROPERTY_MAP: continue self.config[config_name] = gobject_get_value(gobject, gtype) class UtilityFileChooser(_Gtk_FileChooserDialog): """Display a file chooser dialog.""" def __init__(self, *args, **kwargs): super(UtilityFileChooser, self).__init__(*args, **kwargs) self.parent = self.get_parent_window() def quick_add_filter(self, name, patterns): """ Add a filter for displaying files, this is useful in conjunction with :py:meth:`.run_quick_open`. :param str name: The name of the filter. :param patterns: The pattern(s) to match. :type patterns: list, str """ if not isinstance(patterns, (list, tuple)): patterns = (patterns,) new_filter = Gtk.FileFilter() new_filter.set_name(name) for pattern in patterns: new_filter.add_pattern(pattern) self.add_filter(new_filter) def run_quick_open(self): """ Display a dialog asking a user which file should be opened. The value of target_path in the returned dictionary is an absolute path. :return: A dictionary with target_uri and target_path keys representing the path choosen. :rtype: dict """ self.set_action(Gtk.FileChooserAction.OPEN) self.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL) self.add_button(Gtk.STOCK_OPEN, Gtk.ResponseType.ACCEPT) self.show_all() response = self.run() if response == Gtk.ResponseType.CANCEL: return None target_path = self.get_filename() if not os.access(target_path, os.R_OK): show_dialog_error('Can not read the selected file', self.parent) return None target_uri = self.get_uri() return {'target_uri': target_uri, 'target_path': target_path} def run_quick_save(self, current_name=None): """ Display a dialog which asks the user where a file should be saved. The value of target_path in the returned dictionary is an absolute path. :param set current_name: The name of the file to save. :return: A dictionary with target_uri and target_path keys representing the path choosen. :rtype: dict """ self.set_action(Gtk.FileChooserAction.SAVE) self.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL) self.add_button(Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT) self.set_do_overwrite_confirmation(True) if current_name: self.set_current_name(current_name) self.show_all() response = self.run() if response == Gtk.ResponseType.CANCEL: return None target_path = self.get_filename() if os.path.isfile(target_path): if not os.access(target_path, os.W_OK): show_dialog_error('Can not write to the selected file', self.parent) return None elif not os.access(os.path.dirname(target_path), os.W_OK): show_dialog_error('Can not create the selected file', self.parent) return None target_uri = self.get_uri() return {'target_uri': target_uri, 'target_path': target_path} def run_quick_select_directory(self): """ Display a dialog which asks the user to select a directory to use. The value of target_path in the returned dictionary is an absolute path. :return: A dictionary with target_uri and target_path keys representing the path choosen. :rtype: dict """ self.set_action(Gtk.FileChooserAction.SELECT_FOLDER) self.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL) self.add_button(Gtk.STOCK_OPEN, Gtk.ResponseType.ACCEPT) self.show_all() response = self.run() if response == Gtk.ResponseType.CANCEL: return None target_uri = self.get_uri() target_path = self.get_filename() return {'target_uri': target_uri, 'target_path': target_path}
# Copyright 2014 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import TYPE_CHECKING import twisted.internet.ssl from twisted.web.resource import Resource from twisted.web.server import Site from sydent.http.httpcommon import SizeLimitingRequest from sydent.http.servlets.authenticated_bind_threepid_servlet import ( AuthenticatedBindThreePidServlet, ) from sydent.http.servlets.authenticated_unbind_threepid_servlet import ( AuthenticatedUnbindThreePidServlet, ) if TYPE_CHECKING: from sydent.sydent import Sydent logger = logging.getLogger(__name__) class ClientApiHttpServer: def __init__(self, sydent: "Sydent") -> None: self.sydent = sydent root = Resource() matrix = Resource() identity = Resource() api = Resource() v1 = self.sydent.servlets.v1 v2 = self.sydent.servlets.v2 validate = Resource() validate_v2 = Resource() email = Resource() email_v2 = Resource() msisdn = Resource() msisdn_v2 = Resource() threepid_v1 = Resource() threepid_v2 = Resource() unbind = self.sydent.servlets.threepidUnbind pubkey = Resource() ephemeralPubkey = Resource() root.putChild(b"_matrix", matrix) matrix.putChild(b"identity", identity) identity.putChild(b"api", api) identity.putChild(b"v2", v2) api.putChild(b"v1", v1) validate.putChild(b"email", email) validate.putChild(b"msisdn", msisdn) validate_v2.putChild(b"email", email_v2) validate_v2.putChild(b"msisdn", msisdn_v2) v1.putChild(b"validate", validate) v1.putChild(b"lookup", self.sydent.servlets.lookup) v1.putChild(b"bulk_lookup", self.sydent.servlets.bulk_lookup) v1.putChild(b"pubkey", pubkey) pubkey.putChild(b"isvalid", self.sydent.servlets.pubkeyIsValid) pubkey.putChild(b"ed25519:0", self.sydent.servlets.pubkey_ed25519) pubkey.putChild(b"ephemeral", ephemeralPubkey) ephemeralPubkey.putChild( b"isvalid", self.sydent.servlets.ephemeralPubkeyIsValid ) threepid_v2.putChild( b"getValidated3pid", self.sydent.servlets.getValidated3pidV2 ) threepid_v2.putChild(b"bind", self.sydent.servlets.threepidBindV2) threepid_v2.putChild(b"unbind", unbind) threepid_v1.putChild(b"getValidated3pid", self.sydent.servlets.getValidated3pid) threepid_v1.putChild(b"unbind", unbind) if self.sydent.config.general.enable_v1_associations: threepid_v1.putChild(b"bind", self.sydent.servlets.threepidBind) v1.putChild(b"3pid", threepid_v1) email.putChild(b"requestToken", self.sydent.servlets.emailRequestCode) email.putChild(b"submitToken", self.sydent.servlets.emailValidate) email_v2.putChild(b"requestToken", self.sydent.servlets.emailRequestCodeV2) email_v2.putChild(b"submitToken", self.sydent.servlets.emailValidateV2) msisdn.putChild(b"requestToken", self.sydent.servlets.msisdnRequestCode) msisdn.putChild(b"submitToken", self.sydent.servlets.msisdnValidate) msisdn_v2.putChild(b"requestToken", self.sydent.servlets.msisdnRequestCodeV2) msisdn_v2.putChild(b"submitToken", self.sydent.servlets.msisdnValidateV2) v1.putChild(b"store-invite", self.sydent.servlets.storeInviteServlet) v1.putChild(b"sign-ed25519", self.sydent.servlets.blindlySignStuffServlet) # v2 # note v2 loses the /api so goes on 'identity' not 'api' identity.putChild(b"v2", v2) # v2 exclusive APIs v2.putChild(b"terms", self.sydent.servlets.termsServlet) account = self.sydent.servlets.accountServlet v2.putChild(b"account", account) account.putChild(b"register", self.sydent.servlets.registerServlet) account.putChild(b"logout", self.sydent.servlets.logoutServlet) # v2 versions of existing APIs v2.putChild(b"validate", validate_v2) v2.putChild(b"pubkey", pubkey) v2.putChild(b"3pid", threepid_v2) v2.putChild(b"store-invite", self.sydent.servlets.storeInviteServletV2) v2.putChild(b"sign-ed25519", self.sydent.servlets.blindlySignStuffServletV2) v2.putChild(b"lookup", self.sydent.servlets.lookup_v2) v2.putChild(b"hash_details", self.sydent.servlets.hash_details) self.factory = Site(root, SizeLimitingRequest) self.factory.displayTracebacks = False def setup(self) -> None: httpPort = self.sydent.config.http.client_port interface = self.sydent.config.http.client_bind_address logger.info("Starting Client API HTTP server on %s:%d", interface, httpPort) self.sydent.reactor.listenTCP( httpPort, self.factory, backlog=50, # taken from PosixReactorBase.listenTCP interface=interface, ) class InternalApiHttpServer: def __init__(self, sydent: "Sydent") -> None: self.sydent = sydent def setup(self, interface: str, port: int) -> None: logger.info("Starting Internal API HTTP server on %s:%d", interface, port) root = Resource() matrix = Resource() root.putChild(b"_matrix", matrix) identity = Resource() matrix.putChild(b"identity", identity) internal = Resource() identity.putChild(b"internal", internal) authenticated_bind = AuthenticatedBindThreePidServlet(self.sydent) internal.putChild(b"bind", authenticated_bind) authenticated_unbind = AuthenticatedUnbindThreePidServlet(self.sydent) internal.putChild(b"unbind", authenticated_unbind) factory = Site(root) factory.displayTracebacks = False self.sydent.reactor.listenTCP( port, factory, backlog=50, # taken from PosixReactorBase.listenTCP interface=interface, ) class ReplicationHttpsServer: def __init__(self, sydent: "Sydent") -> None: self.sydent = sydent root = Resource() matrix = Resource() identity = Resource() root.putChild(b"_matrix", matrix) matrix.putChild(b"identity", identity) replicate = Resource() replV1 = Resource() identity.putChild(b"replicate", replicate) replicate.putChild(b"v1", replV1) replV1.putChild(b"push", self.sydent.servlets.replicationPush) self.factory = Site(root) self.factory.displayTracebacks = False def setup(self) -> None: httpPort = self.sydent.config.http.replication_port interface = self.sydent.config.http.replication_bind_address if self.sydent.sslComponents.myPrivateCertificate: # We will already have logged a warn if this is absent, so don't do it again cert = self.sydent.sslComponents.myPrivateCertificate certOptions = twisted.internet.ssl.CertificateOptions( privateKey=cert.privateKey.original, certificate=cert.original, trustRoot=self.sydent.sslComponents.trustRoot, ) logger.info("Loaded server private key and certificate!") logger.info( "Starting Replication HTTPS server on %s:%d", interface, httpPort ) self.sydent.reactor.listenSSL( httpPort, self.factory, certOptions, backlog=50, # taken from PosixReactorBase.listenTCP interface=interface, )
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import struct import socket import logging from ryu.ofproto import ofproto_v1_0 from ryu.lib import ofctl_utils from ryu.lib.mac import haddr_to_bin, haddr_to_str LOG = logging.getLogger('ryu.lib.ofctl_v1_0') DEFAULT_TIMEOUT = 1.0 # TODO:XXX UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_0) def to_actions(dp, acts): actions = [] for a in acts: action_type = a.get('type') if action_type == 'OUTPUT': port = UTIL.ofp_port_from_user( a.get('port', ofproto_v1_0.OFPP_NONE)) # NOTE: The reason of this magic number (0xffe5) # is because there is no good constant in of1.0. # The same value as OFPCML_MAX of of1.2 and of1.3 is used. max_len = int(a.get('max_len', 0xffe5)) actions.append(dp.ofproto_parser.OFPActionOutput(port, max_len)) elif action_type == 'SET_VLAN_VID': vlan_vid = int(a.get('vlan_vid', 0xffff)) actions.append(dp.ofproto_parser.OFPActionVlanVid(vlan_vid)) elif action_type == 'SET_VLAN_PCP': vlan_pcp = int(a.get('vlan_pcp', 0)) actions.append(dp.ofproto_parser.OFPActionVlanPcp(vlan_pcp)) elif action_type == 'STRIP_VLAN': actions.append(dp.ofproto_parser.OFPActionStripVlan()) elif action_type == 'SET_DL_SRC': dl_src = haddr_to_bin(a.get('dl_src')) actions.append(dp.ofproto_parser.OFPActionSetDlSrc(dl_src)) elif action_type == 'SET_DL_DST': dl_dst = haddr_to_bin(a.get('dl_dst')) actions.append(dp.ofproto_parser.OFPActionSetDlDst(dl_dst)) elif action_type == 'SET_NW_SRC': nw_src = ipv4_to_int(a.get('nw_src')) actions.append(dp.ofproto_parser.OFPActionSetNwSrc(nw_src)) elif action_type == 'SET_NW_DST': nw_dst = ipv4_to_int(a.get('nw_dst')) actions.append(dp.ofproto_parser.OFPActionSetNwDst(nw_dst)) elif action_type == 'SET_NW_TOS': nw_tos = int(a.get('nw_tos', 0)) actions.append(dp.ofproto_parser.OFPActionSetNwTos(nw_tos)) elif action_type == 'SET_TP_SRC': tp_src = int(a.get('tp_src', 0)) actions.append(dp.ofproto_parser.OFPActionSetTpSrc(tp_src)) elif action_type == 'SET_TP_DST': tp_dst = int(a.get('tp_dst', 0)) actions.append(dp.ofproto_parser.OFPActionSetTpDst(tp_dst)) elif action_type == 'ENQUEUE': port = UTIL.ofp_port_from_user( a.get('port', ofproto_v1_0.OFPP_NONE)) queue_id = UTIL.ofp_queue_from_user(a.get('queue_id', 0)) actions.append(dp.ofproto_parser.OFPActionEnqueue(port, queue_id)) else: LOG.error('Unknown action type') return actions def actions_to_str(acts): actions = [] for a in acts: action_type = a.cls_action_type if action_type == ofproto_v1_0.OFPAT_OUTPUT: port = UTIL.ofp_port_to_user(a.port) buf = 'OUTPUT:' + str(port) elif action_type == ofproto_v1_0.OFPAT_SET_VLAN_VID: buf = 'SET_VLAN_VID:' + str(a.vlan_vid) elif action_type == ofproto_v1_0.OFPAT_SET_VLAN_PCP: buf = 'SET_VLAN_PCP:' + str(a.vlan_pcp) elif action_type == ofproto_v1_0.OFPAT_STRIP_VLAN: buf = 'STRIP_VLAN' elif action_type == ofproto_v1_0.OFPAT_SET_DL_SRC: buf = 'SET_DL_SRC:' + haddr_to_str(a.dl_addr) elif action_type == ofproto_v1_0.OFPAT_SET_DL_DST: buf = 'SET_DL_DST:' + haddr_to_str(a.dl_addr) elif action_type == ofproto_v1_0.OFPAT_SET_NW_SRC: buf = 'SET_NW_SRC:' + \ socket.inet_ntoa(struct.pack('!I', a.nw_addr)) elif action_type == ofproto_v1_0.OFPAT_SET_NW_DST: buf = 'SET_NW_DST:' + \ socket.inet_ntoa(struct.pack('!I', a.nw_addr)) elif action_type == ofproto_v1_0.OFPAT_SET_NW_TOS: buf = 'SET_NW_TOS:' + str(a.tos) elif action_type == ofproto_v1_0.OFPAT_SET_TP_SRC: buf = 'SET_TP_SRC:' + str(a.tp) elif action_type == ofproto_v1_0.OFPAT_SET_TP_DST: buf = 'SET_TP_DST:' + str(a.tp) elif action_type == ofproto_v1_0.OFPAT_ENQUEUE: port = UTIL.ofp_port_to_user(a.port) queue = UTIL.ofp_queue_to_user(a.queue_id) buf = 'ENQUEUE:' + str(port) + ":" + str(queue) elif action_type == ofproto_v1_0.OFPAT_VENDOR: buf = 'VENDOR' else: buf = 'UNKNOWN' actions.append(buf) return actions def ipv4_to_int(addr): ip = addr.split('.') assert len(ip) == 4 i = 0 for b in ip: b = int(b) i = (i << 8) | b return i def to_match(dp, attrs): ofp = dp.ofproto wildcards = ofp.OFPFW_ALL in_port = 0 dl_src = 0 dl_dst = 0 dl_vlan = 0 dl_vlan_pcp = 0 dl_type = 0 nw_tos = 0 nw_proto = 0 nw_src = 0 nw_dst = 0 tp_src = 0 tp_dst = 0 for key, value in attrs.items(): if key == 'in_port': in_port = UTIL.ofp_port_from_user(value) wildcards &= ~ofp.OFPFW_IN_PORT elif key == 'dl_src': dl_src = haddr_to_bin(value) wildcards &= ~ofp.OFPFW_DL_SRC elif key == 'dl_dst': dl_dst = haddr_to_bin(value) wildcards &= ~ofp.OFPFW_DL_DST elif key == 'dl_vlan': dl_vlan = int(value) wildcards &= ~ofp.OFPFW_DL_VLAN elif key == 'dl_vlan_pcp': dl_vlan_pcp = int(value) wildcards &= ~ofp.OFPFW_DL_VLAN_PCP elif key == 'dl_type': dl_type = int(value) wildcards &= ~ofp.OFPFW_DL_TYPE elif key == 'nw_tos': nw_tos = int(value) wildcards &= ~ofp.OFPFW_NW_TOS elif key == 'nw_proto': nw_proto = int(value) wildcards &= ~ofp.OFPFW_NW_PROTO elif key == 'nw_src': ip = value.split('/') nw_src = struct.unpack('!I', socket.inet_aton(ip[0]))[0] mask = 32 if len(ip) == 2: mask = int(ip[1]) assert 0 < mask <= 32 v = (32 - mask) << ofp.OFPFW_NW_SRC_SHIFT | \ ~ofp.OFPFW_NW_SRC_MASK wildcards &= v elif key == 'nw_dst': ip = value.split('/') nw_dst = struct.unpack('!I', socket.inet_aton(ip[0]))[0] mask = 32 if len(ip) == 2: mask = int(ip[1]) assert 0 < mask <= 32 v = (32 - mask) << ofp.OFPFW_NW_DST_SHIFT | \ ~ofp.OFPFW_NW_DST_MASK wildcards &= v elif key == 'tp_src': tp_src = int(value) wildcards &= ~ofp.OFPFW_TP_SRC elif key == 'tp_dst': tp_dst = int(value) wildcards &= ~ofp.OFPFW_TP_DST else: LOG.error("unknown match name %s, %s, %d", key, value, len(key)) match = dp.ofproto_parser.OFPMatch( wildcards, in_port, dl_src, dl_dst, dl_vlan, dl_vlan_pcp, dl_type, nw_tos, nw_proto, nw_src, nw_dst, tp_src, tp_dst) return match def match_to_str(m): match = {} if ~m.wildcards & ofproto_v1_0.OFPFW_IN_PORT: match['in_port'] = UTIL.ofp_port_to_user(m.in_port) if ~m.wildcards & ofproto_v1_0.OFPFW_DL_SRC: match['dl_src'] = haddr_to_str(m.dl_src) if ~m.wildcards & ofproto_v1_0.OFPFW_DL_DST: match['dl_dst'] = haddr_to_str(m.dl_dst) if ~m.wildcards & ofproto_v1_0.OFPFW_DL_VLAN: match['dl_vlan'] = m.dl_vlan if ~m.wildcards & ofproto_v1_0.OFPFW_DL_VLAN_PCP: match['dl_vlan_pcp'] = m.dl_vlan_pcp if ~m.wildcards & ofproto_v1_0.OFPFW_DL_TYPE: match['dl_type'] = m.dl_type if ~m.wildcards & ofproto_v1_0.OFPFW_NW_TOS: match['nw_tos'] = m.nw_tos if ~m.wildcards & ofproto_v1_0.OFPFW_NW_PROTO: match['nw_proto'] = m.nw_proto if ~m.wildcards & ofproto_v1_0.OFPFW_NW_SRC_ALL: match['nw_src'] = nw_src_to_str(m.wildcards, m.nw_src) if ~m.wildcards & ofproto_v1_0.OFPFW_NW_DST_ALL: match['nw_dst'] = nw_dst_to_str(m.wildcards, m.nw_dst) if ~m.wildcards & ofproto_v1_0.OFPFW_TP_SRC: match['tp_src'] = m.tp_src if ~m.wildcards & ofproto_v1_0.OFPFW_TP_DST: match['tp_dst'] = m.tp_dst return match def nw_src_to_str(wildcards, addr): ip = socket.inet_ntoa(struct.pack('!I', addr)) mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_SRC_MASK) >> ofproto_v1_0.OFPFW_NW_SRC_SHIFT) if mask == 32: mask = 0 if mask: ip += '/%d' % mask return ip def nw_dst_to_str(wildcards, addr): ip = socket.inet_ntoa(struct.pack('!I', addr)) mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_DST_MASK) >> ofproto_v1_0.OFPFW_NW_DST_SHIFT) if mask == 32: mask = 0 if mask: ip += '/%d' % mask return ip def get_desc_stats(dp, waiters): stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) s = {} for msg in msgs: stats = msg.body s = {'mfr_desc': stats.mfr_desc, 'hw_desc': stats.hw_desc, 'sw_desc': stats.sw_desc, 'serial_num': stats.serial_num, 'dp_desc': stats.dp_desc} desc = {str(dp.id): s} return desc def get_queue_stats(dp, waiters, port=None, queue_id=None): if port is None: port = dp.ofproto.OFPP_ALL else: port = int(str(port), 0) if queue_id is None: queue_id = dp.ofproto.OFPQ_ALL else: queue_id = int(str(queue_id), 0) stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port, queue_id) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) s = [] for msg in msgs: stats = msg.body for stat in stats: s.append({'port_no': stat.port_no, 'queue_id': stat.queue_id, 'tx_bytes': stat.tx_bytes, 'tx_errors': stat.tx_errors, 'tx_packets': stat.tx_packets}) desc = {str(dp.id): s} return desc def get_flow_stats(dp, waiters, flow=None): flow = flow if flow else {} match = to_match(dp, flow.get('match', {})) table_id = UTIL.ofp_table_from_user( flow.get('table_id', 0xff)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_NONE)) stats = dp.ofproto_parser.OFPFlowStatsRequest( dp, 0, match, table_id, out_port) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: for stats in msg.body: actions = actions_to_str(stats.actions) match = match_to_str(stats.match) s = {'priority': stats.priority, 'cookie': stats.cookie, 'idle_timeout': stats.idle_timeout, 'hard_timeout': stats.hard_timeout, 'actions': actions, 'match': match, 'byte_count': stats.byte_count, 'duration_sec': stats.duration_sec, 'duration_nsec': stats.duration_nsec, 'packet_count': stats.packet_count, 'table_id': UTIL.ofp_table_to_user(stats.table_id)} flows.append(s) flows = {str(dp.id): flows} return flows def get_aggregate_flow_stats(dp, waiters, flow=None): flow = flow if flow else {} match = to_match(dp, flow.get('match', {})) table_id = UTIL.ofp_table_from_user( flow.get('table_id', 0xff)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_NONE)) stats = dp.ofproto_parser.OFPAggregateStatsRequest( dp, 0, match, table_id, out_port) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: stats = msg.body for st in stats: s = {'packet_count': st.packet_count, 'byte_count': st.byte_count, 'flow_count': st.flow_count} flows.append(s) flows = {str(dp.id): flows} return flows def get_table_stats(dp, waiters): stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0) ofp = dp.ofproto msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) match_convert = {ofp.OFPFW_IN_PORT: 'IN_PORT', ofp.OFPFW_DL_VLAN: 'DL_VLAN', ofp.OFPFW_DL_SRC: 'DL_SRC', ofp.OFPFW_DL_DST: 'DL_DST', ofp.OFPFW_DL_TYPE: 'DL_TYPE', ofp.OFPFW_NW_PROTO: 'NW_PROTO', ofp.OFPFW_TP_SRC: 'TP_SRC', ofp.OFPFW_TP_DST: 'TP_DST', ofp.OFPFW_NW_SRC_SHIFT: 'NW_SRC_SHIFT', ofp.OFPFW_NW_SRC_BITS: 'NW_SRC_BITS', ofp.OFPFW_NW_SRC_MASK: 'NW_SRC_MASK', ofp.OFPFW_NW_SRC: 'NW_SRC', ofp.OFPFW_NW_SRC_ALL: 'NW_SRC_ALL', ofp.OFPFW_NW_DST_SHIFT: 'NW_DST_SHIFT', ofp.OFPFW_NW_DST_BITS: 'NW_DST_BITS', ofp.OFPFW_NW_DST_MASK: 'NW_DST_MASK', ofp.OFPFW_NW_DST: 'NW_DST', ofp.OFPFW_NW_DST_ALL: 'NW_DST_ALL', ofp.OFPFW_DL_VLAN_PCP: 'DL_VLAN_PCP', ofp.OFPFW_NW_TOS: 'NW_TOS', ofp.OFPFW_ALL: 'ALL', ofp.OFPFW_ICMP_TYPE: 'ICMP_TYPE', ofp.OFPFW_ICMP_CODE: 'ICMP_CODE'} tables = [] for msg in msgs: stats = msg.body for stat in stats: wildcards = [] for k, v in match_convert.items(): if (1 << k) & stat.wildcards: wildcards.append(v) s = {'table_id': UTIL.ofp_table_to_user(stat.table_id), 'name': stat.name.decode('utf-8'), 'wildcards': wildcards, 'max_entries': stat.max_entries, 'active_count': stat.active_count, 'lookup_count': stat.lookup_count, 'matched_count': stat.matched_count} tables.append(s) desc = {str(dp.id): tables} return desc def get_port_stats(dp, waiters, port=None): if port is None: port = dp.ofproto.OFPP_NONE else: port = int(str(port), 0) stats = dp.ofproto_parser.OFPPortStatsRequest( dp, 0, port) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) ports = [] for msg in msgs: for stats in msg.body: s = {'port_no': UTIL.ofp_port_to_user(stats.port_no), 'rx_packets': stats.rx_packets, 'tx_packets': stats.tx_packets, 'rx_bytes': stats.rx_bytes, 'tx_bytes': stats.tx_bytes, 'rx_dropped': stats.rx_dropped, 'tx_dropped': stats.tx_dropped, 'rx_errors': stats.rx_errors, 'tx_errors': stats.tx_errors, 'rx_frame_err': stats.rx_frame_err, 'rx_over_err': stats.rx_over_err, 'rx_crc_err': stats.rx_crc_err, 'collisions': stats.collisions} ports.append(s) ports = {str(dp.id): ports} return ports def get_port_desc(dp, waiters): stats = dp.ofproto_parser.OFPFeaturesRequest(dp) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) descs = [] for msg in msgs: stats = msg.ports for stat in stats.values(): d = {'port_no': UTIL.ofp_port_to_user(stat.port_no), 'hw_addr': stat.hw_addr, 'name': stat.name.decode('utf-8'), 'config': stat.config, 'state': stat.state, 'curr': stat.curr, 'advertised': stat.advertised, 'supported': stat.supported, 'peer': stat.peer} descs.append(d) descs = {str(dp.id): descs} return descs def mod_flow_entry(dp, flow, cmd): cookie = int(flow.get('cookie', 0)) priority = int(flow.get('priority', dp.ofproto.OFP_DEFAULT_PRIORITY)) buffer_id = UTIL.ofp_buffer_from_user( flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_NONE)) flags = int(flow.get('flags', 0)) idle_timeout = int(flow.get('idle_timeout', 0)) hard_timeout = int(flow.get('hard_timeout', 0)) actions = to_actions(dp, flow.get('actions', [])) match = to_match(dp, flow.get('match', {})) flow_mod = dp.ofproto_parser.OFPFlowMod( datapath=dp, match=match, cookie=cookie, command=cmd, idle_timeout=idle_timeout, hard_timeout=hard_timeout, priority=priority, buffer_id=buffer_id, out_port=out_port, flags=flags, actions=actions) ofctl_utils.send_msg(dp, flow_mod, LOG) def delete_flow_entry(dp): match = dp.ofproto_parser.OFPMatch( dp.ofproto.OFPFW_ALL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) flow_mod = dp.ofproto_parser.OFPFlowMod( datapath=dp, match=match, cookie=0, command=dp.ofproto.OFPFC_DELETE) ofctl_utils.send_msg(dp, flow_mod, LOG) def mod_port_behavior(dp, port_config): port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) hw_addr = str(port_config.get('hw_addr')) config = int(port_config.get('config', 0)) mask = int(port_config.get('mask', 0)) advertise = int(port_config.get('advertise')) port_mod = dp.ofproto_parser.OFPPortMod( dp, port_no, hw_addr, config, mask, advertise) ofctl_utils.send_msg(dp, port_mod, LOG)
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe import json from frappe import _ from frappe.model.document import Document from six import iteritems from frappe.custom.doctype.custom_field.custom_field import create_custom_field class KanbanBoard(Document): def validate(self): self.validate_column_name() def on_update(self): frappe.clear_cache(doctype=self.reference_doctype) def validate_column_name(self): for column in self.columns: if not column.column_name: frappe.msgprint(frappe._("Column Name cannot be empty"), raise_exception=True) def get_permission_query_conditions(user): if not user: user = frappe.session.user if user == "Administrator": return "" return """(`tabKanban Board`.private=0 or `tabKanban Board`.owner="{user}")""".format(user=user) def has_permission(doc, ptype, user): if doc.private == 0 or user == "Administrator": return True if user == doc.owner: return True return False @frappe.whitelist() def get_kanban_boards(doctype): '''Get Kanban Boards for doctype to show in List View''' return frappe.get_list('Kanban Board', fields=['name', 'filters', 'reference_doctype', 'private'], filters={ 'reference_doctype': doctype } ) @frappe.whitelist() def add_column(board_name, column_title): '''Adds new column to Kanban Board''' doc = frappe.get_doc("Kanban Board", board_name) for col in doc.columns: if column_title == col.column_name: frappe.throw(_("Column <b>{0}</b> already exist.").format(column_title)) doc.append("columns", dict( column_name=column_title )) doc.save() return doc.columns @frappe.whitelist() def archive_restore_column(board_name, column_title, status): '''Set column's status to status''' doc = frappe.get_doc("Kanban Board", board_name) for col in doc.columns: if column_title == col.column_name: col.status = status doc.save() return doc.columns @frappe.whitelist() def update_doc(doc): '''Updates the doc when card is edited''' doc = json.loads(doc) try: to_update = doc doctype = doc['doctype'] docname = doc['name'] doc = frappe.get_doc(doctype, docname) doc.update(to_update) doc.save() except: return { 'doc': doc, 'exc': frappe.utils.get_traceback() } return doc @frappe.whitelist() def update_order(board_name, order): '''Save the order of cards in columns''' board = frappe.get_doc('Kanban Board', board_name) doctype = board.reference_doctype fieldname = board.field_name order_dict = json.loads(order) updated_cards = [] for col_name, cards in iteritems(order_dict): order_list = [] for card in cards: column = frappe.get_value( doctype, {'name': card}, fieldname ) if column != col_name: frappe.set_value(doctype, card, fieldname, col_name) updated_cards.append(dict( name=card, column=col_name )) for column in board.columns: if column.column_name == col_name: column.order = json.dumps(cards) board.save() return board, updated_cards @frappe.whitelist() def quick_kanban_board(doctype, board_name, field_name, project=None): '''Create new KanbanBoard quickly with default options''' doc = frappe.new_doc('Kanban Board') if field_name == 'kanban_column': create_custom_field(doctype, { 'label': 'Kanban Column', 'fieldname': 'kanban_column', 'fieldtype': 'Select', 'hidden': 1 }) meta = frappe.get_meta(doctype) options = '' for field in meta.fields: if field.fieldname == field_name: options = field.options columns = [] if options: columns = options.split('\n') for column in columns: if not column: continue doc.append("columns", dict( column_name=column )) doc.kanban_board_name = board_name doc.reference_doctype = doctype doc.field_name = field_name if project: doc.filters = '[["Task","project","=","{0}"]]'.format(project) if doctype in ['Note', 'ToDo']: doc.private = 1 doc.save() return doc @frappe.whitelist() def update_column_order(board_name, order): '''Set the order of columns in Kanban Board''' board = frappe.get_doc('Kanban Board', board_name) order = json.loads(order) old_columns = board.columns new_columns = [] for col in order: for column in old_columns: if col == column.column_name: new_columns.append(column) old_columns.remove(column) new_columns.extend(old_columns) board.columns = [] for col in new_columns: board.append("columns", dict( column_name=col.column_name, status=col.status, order=col.order, indicator=col.indicator, )) board.save() return board @frappe.whitelist() def set_indicator(board_name, column_name, indicator): '''Set the indicator color of column''' board = frappe.get_doc('Kanban Board', board_name) for column in board.columns: if column.column_name == column_name: column.indicator = indicator board.save() return board @frappe.whitelist() def save_filters(board_name, filters): '''Save filters silently''' frappe.db.set_value('Kanban Board', board_name, 'filters', filters, update_modified=False)
# -*- test-case-name: twisted.words.test -*- # Copyright (c) 2001-2005 Twisted Matrix Laboratories. # See LICENSE for details. """ Implements a AOL Instant Messenger TOC server and client, using the Twisted framework. TODO: info,dir: see how gaim connects for this...it may never work if it tries to connect to the aim server automatically This module is stable, but deprecated. Maintainer: U{Paul Swartz<mailto:z3p@twistedmatrix.com>} Modified by Jinna Lei for Kamaelia. """ # twisted imports ##from twisted.internet import reactor, protocol ##from twisted.python import log # base imports import struct import string import time import base64 import os import StringIO SIGNON,DATA,ERROR,SIGNOFF,KEEP_ALIVE=range(1,6) PERMITALL,DENYALL,PERMITSOME,DENYSOME=range(1,5) DUMMY_CHECKSUM = -559038737 # 0xdeadbeef def quote(s): rep=['\\','$','{','}','[',']','(',')','"'] for r in rep: s=string.replace(s,r,"\\"+r) return "\""+s+"\"" def unquote(s): if s=="": return "" if s[0]!='"': return s r=string.replace s=s[1:-1] s=r(s,"\\\\","\\") s=r(s,"\\$","$") s=r(s,"\\{","{") s=r(s,"\\}","}") s=r(s,"\\[","[") s=r(s,"\\]","]") s=r(s,"\\(","(") s=r(s,"\\)",")") s=r(s,"\\\"","\"") return s def unquotebeg(s): for i in range(1,len(s)): if s[i]=='"' and s[i-1]!='\\': q=unquote(s[:i+1]) return [q,s[i+2:]] def unroast(pw): roaststring="Tic/Toc" pw=string.lower(pw[2:]) r="" count=0 hex=["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f"] while pw: st,pw=pw[:2],pw[2:] value=(16*hex.index(st[0]))+hex.index(st[1]) xor=ord(roaststring[count]) count=(count+1)%len(roaststring) r=r+chr(value^xor) return r def roast(pw): # contributed by jemfinch on #python key="Tic/Toc" ro="0x" i=0 ascii=map(ord,pw) for c in ascii: ro=ro+'%02x'%(c^ord(key[i%len(key)])) i=i+1 return string.lower(ro) def checksum(b): return DUMMY_CHECKSUM # do it like gaim does, since the checksum def checksum_file(f): return DUMMY_CHECKSUM # do it like gaim does, since the checksum def normalize(s): s=string.lower(s) s=string.replace(s," ","") return s class TOCParseError(ValueError): pass class TOC(protocol.Protocol): users={} def connectionMade(self): # initialization of protocol self._buf="" self._ourseqnum=0L self._theirseqnum=0L self._mode="Flapon" self._onlyflaps=0 self._laststatus={} # the last status for a user self.username=None self.permitmode=PERMITALL self.permitlist=[] self.denylist=[] self.buddylist=[] self.signontime=0 self.idletime=0 self.userinfo="<br>" self.userclass=" O" self.away="" self.saved=None def _debug(self,data): log.msg(data) def connectionLost(self, reason): self._debug("dropped connection from %s" % self.username) try: del self.factory.users[self.username] except: pass for k in self.factory.chatroom.keys(): try: self.factory.chatroom[k].leave(self) except TOCParseError: pass if self.saved: self.factory.savedusers[self.username]=self.saved self.updateUsers() def sendFlap(self,type,data): """ send a FLAP to the client """ send="*" self._debug(data) if type==DATA: data=data+"\000" length=len(data) send=send+struct.pack("!BHH",type,self._ourseqnum,length) send=send+data self._ourseqnum=self._ourseqnum+1 if self._ourseqnum>(256L**4): self._ourseqnum=0 self.transport.write(send) def dataReceived(self,data): self._buf=self._buf+data try: func=getattr(self,"mode%s"%self._mode) except: return self._mode=func() if self._onlyflaps and self.isFlap(): self.dataReceived("") def isFlap(self): """ tests to see if a flap is actually on the buffer """ if self._buf=='': return 0 if self._buf[0]!="*": return 0 if len(self._buf)<6: return 0 foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) if type not in range(1,6): return 0 if len(self._buf)<6+length: return 0 return 1 def readFlap(self): """ read the first FLAP off self._buf, raising errors if it isn't in the right form. the FLAP is the basic TOC message format, and is logically equivilant to a packet in TCP """ if self._buf=='': return None if self._buf[0]!="*": raise TOCParseError if len(self._buf)<6: return None foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) if len(self._buf)<6+length: return None data=self._buf[6:6+length] self._buf=self._buf[6+length:] if data and data[-1]=="\000": data=data[:-1] self._debug([type,data]) return [type,data] #def modeWeb(self): # try: # line,rest=string.split(self._buf,"\n",1) # get,username,http=string.split(line," ",2) # except: # return "Web" # not enough data # foo,type,username=string.split(username,"/") # if type=="info": # user=self.factory.users[username] # text="<HTML><HEAD><TITLE>User Information for %s</TITLE></HEAD><BODY>Username: <B>%s</B><br>\nWarning Level: <B>%s%</B><br>\n Online Since: <B>%s</B><br>\nIdle Minutes: <B>%s</B><br>\n<hr><br>\n%s\n<hr><br>\n"%(user.saved.nick, user.saved.nick, user.saved.evilness, time.asctime(user.signontime), int((time.time()-user.idletime)/60), user.userinfo) # self.transport.write("HTTP/1.1 200 OK\n") # self.transport.write("Content-Type: text/html\n") # self.transport.write("Content-Length: %s\n\n"%len(text)) # self.transport.write(text) # self.loseConnection() def modeFlapon(self): #if self._buf[:3]=="GET": self.modeWeb() # TODO: get this working if len(self._buf)<10: return "Flapon" # not enough bytes flapon,self._buf=self._buf[:10],self._buf[10:] if flapon!="FLAPON\r\n\r\n": raise TOCParseError self.sendFlap(SIGNON,"\000\000\000\001") self._onlyflaps=1 return "Signon" def modeSignon(self): flap=self.readFlap() if flap==None: return "Signon" if flap[0]!=SIGNON: raise TOCParseError version,tlv,unlength=struct.unpack("!LHH",flap[1][:8]) if version!=1 or tlv!=1 or unlength+8!=len(flap[1]): raise TOCParseError self.username=normalize(flap[1][8:]) if self.username in self.factory.savedusers.keys(): self.saved=self.factory.savedusers[self.username] else: self.saved=SavedUser() self.saved.nick=self.username return "TocSignon" def modeTocSignon(self): flap=self.readFlap() if flap==None: return "TocSignon" if flap[0]!=DATA: raise TOCParseError data=string.split(flap[1]," ") if data[0]!="toc_signon": raise TOCParseError for i in data: if not i:data.remove(i) password=unroast(data[4]) if not(self.authorize(data[1],int(data[2]),data[3],password)): self.sendError(BAD_NICKNAME) self.transport.loseConnection() return self.sendFlap(DATA,"SIGN_ON:TOC1.0") self.sendFlap(DATA,"NICK:%s"%self.saved.nick) self.sendFlap(DATA,"CONFIG:%s"%self.saved.config) # sending user configuration goes here return "Connected" def authorize(self,server,port,username,password): if self.saved.password=="": self.saved.password=password return 1 else: return self.saved.password==password def modeConnected(self): flap=self.readFlap() while flap!=None: if flap[0] not in [DATA,KEEP_ALIVE]: raise TOCParseError flapdata=string.split(flap[1]," ",1) tocname=flapdata[0][4:] if len(flapdata)==2: data=flapdata[1] else: data="" func=getattr(self,"toc_"+tocname,None) if func!=None: func(data) else: self.toc_unknown(tocname,data) flap=self.readFlap() return "Connected" def toc_unknown(self,tocname,data): self._debug("unknown! %s %s" % (tocname,data)) def toc_init_done(self,data): """ called when all the setup is done. toc_init_done """ self.signontime=int(time.time()) self.factory.users[self.username]=self self.updateUsers() def toc_add_permit(self,data): """ adds users to the permit list. if the list is null, then set the mode to DENYALL """ if data=="": self.permitmode=DENYALL self.permitlist=[] self.denylist=[] else: self.permitmode=PERMITSOME self.denylist=[] users=string.split(data," ") map(self.permitlist.append,users) self.updateUsers() def toc_add_deny(self,data): """ adds users to the deny list. if the list is null, then set the mode to PERMITALL """ if data=="": self.permitmode=PERMITALL self.permitlist=[] self.denylist=[] else: self.permitmode=DENYSOME self.permitlist=[] users=string.split(data," ") map(self.denylist.append,users) self.updateUsers() def toc_evil(self,data): """ warns a user. toc_evil <username> <anon|norm> """ username,nora=string.split(data," ") if nora=="anon": user="" else: user=self.saved.nick if not(self.factory.users.has_key(username)): self.sendError(CANT_WARN,username) return if self.factory.users[username].saved.evilness>=100: self.sendError(CANT_WARN,username) return self.factory.users[username].evilFrom(user) def toc_add_buddy(self,data): """ adds users to the buddy list toc_add_buddy <buddyname1> [<buddyname2>] [<buddyname3>]... """ buddies=map(normalize,string.split(data," ")) for b in buddies: if b not in self.buddylist: self.buddylist.append(b) for buddy in buddies: try: buddy=self.factory.users[buddy] except: pass else: self.buddyUpdate(buddy) def toc_remove_buddy(self,data): """ removes users from the buddy list toc_remove_buddy <buddyname1> [<buddyname2>] [<buddyname3>]... """ buddies=string.split(data," ") for buddy in buddies: try: self.buddylist.remove(normalize(buddy)) except: pass def toc_send_im(self,data): """ incoming instant message toc_send_im <screenname> <quoted message> [auto] """ username,data=string.split(data," ",1) auto=0 if data[-4:]=="auto": auto=1 data=data[:-5] data=unquote(data) if not(self.factory.users.has_key(username)): self.sendError(NOT_AVAILABLE,username) return user=self.factory.users[username] if not(self.canContact(user)): self.sendError(NOT_AVAILABLE,username) return user.hearWhisper(self,data,auto) def toc_set_info(self,data): """ set the users information, retrivable with toc_get_info toc_set_info <user info (quoted)> """ info=unquote(data) self._userinfo=info def toc_set_idle(self,data): """ set/unset idle toc_set_idle <seconds> """ seconds=int(data) self.idletime=time.time()-seconds # time when they started being idle self.updateUsers() def toc_set_away(self,data): """ set/unset away message toc_set_away [<away message>] """ away=unquote(data) if not self.away and away: # setting an away message self.away=away self.userclass=self.userclass+'U' self.updateUsers() elif self.away and not away: # coming back self.away="" self.userclass=self.userclass[:2] self.updateUsers() else: raise TOCParseError def toc_chat_join(self,data): """ joins the chat room. toc_chat_join <exchange> <room name> """ exchange,name=string.split(data," ",1) self.factory.getChatroom(int(exchange),unquote(name)).join(self) def toc_chat_invite(self,data): """ invite others to the room. toc_chat_invite <room id> <invite message> <buddy 1> [<buddy2>]... """ id,data=string.split(data," ",1) id=int(id) message,data=unquotebeg(data) buddies=string.split(data," ") for b in buddies: room=self.factory.chatroom[id] bud=self.factory.users[b] bud.chatInvite(room,self,message) def toc_chat_accept(self,data): """ accept an invitation. toc_chat_accept <room id> """ id=int(data) self.factory.chatroom[id].join(self) def toc_chat_send(self,data): """ send a message to the chat room. toc_chat_send <room id> <message> """ id,message=string.split(data," ",1) id=int(id) message=unquote(message) self.factory.chatroom[id].say(self,message) def toc_chat_whisper(self,data): id,user,message=string.split(data," ",2) id=int(id) room=self.factory.chatroom[id] message=unquote(message) self.factory.users[user].chatWhisper(room,self,message) def toc_chat_leave(self,data): """ leave the room. toc_chat_leave <room id> """ id=int(data) self.factory.chatroom[id].leave(self) def toc_set_config(self,data): """ set the saved config. this gets send when you log in. toc_set_config <config> """ self.saved.config=unquote(data) def toc_get_info(self,data): """ get the user info for a user toc_get_info <username> """ if not self.factory.users.has_key(data): self.sendError(901,data) return self.sendFlap(2,"GOTO_URL:TIC:info/%s"%data) def toc_format_nickname(self,data): """ change the format of your nickname. toc_format_nickname <new format> """ # XXX may not work nick=unquote(data) if normalize(nick)==self.username: self.saved.nick=nick self.sendFlap(2,"ADMIN_NICK_STATUS:0") else: self.sendError(BAD_INPUT) def toc_change_passwd(self,data): orig,data=unquotebeg(data) new=unquote(data) if orig==self.saved.password: self.saved.password=new self.sendFlap(2,"ADMIN_PASSWD_STATUS:0") else: self.sendError(BAD_INPUT) def sendError(self,code,*varargs): """ send an error to the user. listing of error messages is below. """ send="ERROR:%s"%code for v in varargs: send=send+":"+v self.sendFlap(DATA,send) def updateUsers(self): """ Update the users who have us on their buddylist. Called when the user changes anything (idle,away) so people can get updates. """ for user in self.factory.users.values(): if self.username in user.buddylist and self.canContact(user): user.buddyUpdate(self) def getStatus(self,user): if self.canContact(user): if self in self.factory.users.values():ol='T' else: ol='F' idle=0 if self.idletime: idle=int((time.time()-self.idletime)/60) return (self.saved.nick,ol,self.saved.evilness,self.signontime,idle,self.userclass) else: return (self.saved.nick,'F',0,0,0,self.userclass) def canContact(self,user): if self.permitmode==PERMITALL: return 1 elif self.permitmode==DENYALL: return 0 elif self.permitmode==PERMITSOME: if user.username in self.permitlist: return 1 else: return 0 elif self.permitmode==DENYSOME: if user.username in self.denylist: return 0 else: return 1 else: assert 0,"bad permitmode %s" % self.permitmode def buddyUpdate(self,user): """ Update the buddy. Called from updateUsers() """ if not self.canContact(user): return status=user.getStatus(self) if not self._laststatus.has_key(user): self._laststatus[user]=() if self._laststatus[user]!=status: send="UPDATE_BUDDY:%s:%s:%s:%s:%s:%s"%status self.sendFlap(DATA,send) self._laststatus[user]=status def hearWhisper(self,user,data,auto=0): """ Called when you get an IM. If auto=1, it's an autoreply from an away message. """ if not self.canContact(user): return if auto: auto='T' else: auto='F' send="IM_IN:%s:%s:%s"%(user.saved.nick,auto,data) self.sendFlap(DATA,send) def evilFrom(self,user): if user=="": percent=0.03 else: percent=0.1 self.saved.evilness=self.saved.evilness+int((100-self.saved.evilness)*percent) self.sendFlap(2,"EVILED:%s:%s"%(self.saved.evilness,user)) self.updateUsers() def chatJoin(self,room): self.sendFlap(2,"CHAT_JOIN:%s:%s"%(room.id,room.name)) f="CHAT_UPDATE_BUDDY:%s:T"%room.id for u in room.users: if u!=self: u.chatUserUpdate(room,self) f=f+":"+u.saved.nick self.sendFlap(2,f) def chatInvite(self,room,user,message): if not self.canContact(user): return self.sendFlap(2,"CHAT_INVITE:%s:%s:%s:%s"%(room.name,room.id,user.saved.nick,message)) def chatUserUpdate(self,room,user): if user in room.users: inroom='T' else: inroom='F' self.sendFlap(2,"CHAT_UPDATE_BUDDY:%s:%s:%s"%(room.id,inroom,user.saved.nick)) def chatMessage(self,room,user,message): if not self.canContact(user): return self.sendFlap(2,"CHAT_IN:%s:%s:F:%s"%(room.id,user.saved.nick,message)) def chatWhisper(self,room,user,message): if not self.canContact(user): return self.sendFlap(2,"CHAT_IN:%s:%s:T:%s"%(room.id,user.saved.nick,message)) def chatLeave(self,room): self.sendFlap(2,"CHAT_LEFT:%s"%(room.id)) class Chatroom: def __init__(self,fac,exchange,name,id): self.exchange=exchange self.name=name self.id=id self.factory=fac self.users=[] def join(self,user): if user in self.users: return self.users.append(user) user.chatJoin(self) def leave(self,user): if user not in self.users: raise TOCParseError self.users.remove(user) user.chatLeave(self) for u in self.users: u.chatUserUpdate(self,user) if len(self.users)==0: self.factory.remChatroom(self) def say(self,user,message): for u in self.users: u.chatMessage(self,user,message) class SavedUser: def __init__(self): self.config="" self.nick="" self.password="" self.evilness=0 class TOCFactory(protocol.Factory): def __init__(self): self.users={} self.savedusers={} self.chatroom={} self.chatroomid=0 def buildProtocol(self,addr): p=TOC() p.factory=self return p def getChatroom(self,exchange,name): for i in self.chatroom.values(): if normalize(i.name)==normalize(name): return i self.chatroom[self.chatroomid]=Chatroom(self,exchange,name,self.chatroomid) self.chatroomid=self.chatroomid+1 return self.chatroom[self.chatroomid-1] def remChatroom(self,room): id=room.id del self.chatroom[id] MAXARGS={} MAXARGS["CONFIG"]=0 MAXARGS["NICK"]=0 MAXARGS["IM_IN"]=2 MAXARGS["UPDATE_BUDDY"]=5 MAXARGS["ERROR"]=-1 MAXARGS["EVILED"]=1 MAXARGS["CHAT_JOIN"]=1 MAXARGS["CHAT_IN"]=3 MAXARGS["CHAT_UPDATE_BUDDY"]=-1 MAXARGS["CHAT_INVITE"]=3 MAXARGS["CHAT_LEFT"]=0 MAXARGS["ADMIN_NICK_STATUS"]=0 MAXARGS["ADMIN_PASSWD_STATUS"]=0 class TOCClient(protocol.Protocol): def __init__(self,username,password,authhost="login.oscar.aol.com",authport=5190): self.username=normalize(username) # our username self._password=password # our password self._mode="SendNick" # current mode self._ourseqnum=19071 # current sequence number (for sendFlap) self._authhost=authhost # authorization host self._authport=authport # authorization port self._online=0 # are we online? self._buddies=[] # the current buddy list self._privacymode=PERMITALL # current privacy mode self._permitlist=[] # list of users on the permit list self._roomnames={} # the names for each of the rooms we're in self._receivedchatmembers={} # have we gotten who's in our room yet? self._denylist=[] self._cookies={} # for file transfers self._buf='' # current data buffer self._awaymessage='' def _debug(self,data): log.msg(data) def sendFlap(self,type,data): if type==DATA: data=data+"\000" length=len(data) s="*" s=s+struct.pack("!BHH",type,self._ourseqnum,length) s=s+data self._ourseqnum=self._ourseqnum+1 if self._ourseqnum>(256*256+256): self._ourseqnum=0 self._debug(data) self.transport.write(s) def isFlap(self): """ tests to see if a flap is actually on the buffer """ if self._buf=='': return 0 if self._buf[0]!="*": return 0 if len(self._buf)<6: return 0 foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) if type not in range(1,6): return 0 if len(self._buf)<6+length: return 0 return 1 def readFlap(self): if self._buf=='': return None if self._buf[0]!="*": raise TOCParseError if len(self._buf)<6: return None foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) if len(self._buf)<6+length: return None data=self._buf[6:6+length] self._buf=self._buf[6+length:] if data and data[-1]=="\000": data=data[:-1] return [type,data] def connectionMade(self): self._debug("connection made! %s" % self.transport) self.transport.write("FLAPON\r\n\r\n") def connectionLost(self, reason): self._debug("connection lost!") self._online=0 def dataReceived(self,data): self._buf=self._buf+data while self.isFlap(): flap=self.readFlap() func=getattr(self,"mode%s"%self._mode) func(flap) def modeSendNick(self,flap): if flap!=[1,"\000\000\000\001"]: raise TOCParseError s="\000\000\000\001\000\001"+struct.pack("!H",len(self.username))+self.username self.sendFlap(1,s) s="toc_signon %s %s %s %s english \"penguin\""%(self._authhost,\ self._authport,self.username,roast(self._password)) self.sendFlap(2,s) self._mode="Data" def modeData(self,flap): if not flap[1]: return if not ':' in flap[1]: self._debug("bad SNAC:%s"%(flap[1])) return command,rest=string.split(flap[1],":",1) if MAXARGS.has_key(command): maxsplit=MAXARGS[command] else: maxsplit=-1 if maxsplit==-1: l=tuple(string.split(rest,":")) elif maxsplit==0: l=(rest,) else: l=tuple(string.split(rest,":",maxsplit)) self._debug("%s %s"%(command,l)) try: func=getattr(self,"toc%s"%command) self._debug("calling %s"%func) except: self._debug("calling %s"%self.tocUNKNOWN) self.tocUNKNOWN(command,l) return func(l) def tocUNKNOWN(self,command,data): pass def tocSIGN_ON(self,data): if data!=("TOC1.0",): raise TOCParseError self._debug("Whee, signed on!") if self._buddies: self.add_buddy(self._buddies) self._online=1 self.onLine() def tocNICK(self,data): """ Handle a message that looks like:: NICK:<format of nickname> """ self.username=data[0] def tocCONFIG(self,data): """ Handle a message that looks like:: CONFIG:<config> Format of config data: - g: group. all users until next g or end of config are in this group - b: buddy - p: person on the permit list - d: person on the deny list - m: permit/deny mode (1: permit all, 2: deny all, 3: permit some, 4: deny some) """ data=data[0] if data and data[0]=="{":data=data[1:-1] lines=string.split(data,"\n") buddylist={} currentgroup="" permit=[] deny=[] mode=1 for l in lines: if l: code,data=l[0],l[2:] if code=='g': # group currentgroup=data buddylist[currentgroup]=[] elif code=='b': buddylist[currentgroup].append(data) elif code=='p': permit.append(data) elif code=='d': deny.append(data) elif code=='m': mode=int(data) self.gotConfig(mode,buddylist,permit,deny) def tocIM_IN(self,data): """ Handle a message that looks like:: IM_IN:<user>:<autoreply T|F>:message """ user=data[0] autoreply=(data[1]=='T') message=data[2] self.hearMessage(user,message,autoreply) def tocUPDATE_BUDDY(self,data): """ Handle a message that looks like:: UPDATE_BUDDY:<username>:<online T|F>:<warning level>:<signon time>:<idle time (minutes)>:<user class> """ data=list(data) online=(data[1]=='T') if len(data[5])==2: data[5]=data[5]+" " away=(data[5][-1]=='U') if data[5][-1]=='U': data[5]=data[5][:-1] self.updateBuddy(data[0],online,int(data[2]),int(data[3]),int(data[4]),data[5],away) def tocERROR(self,data): """ Handle a message that looks like:: ERROR:<error code>:<misc. data> """ code,args=data[0],data[1:] self.hearError(int(code),args) def tocEVILED(self,data): """ Handle a message that looks like:: EVILED:<current warning level>:<user who warned us> """ self.hearWarning(data[0],data[1]) def tocCHAT_JOIN(self,data): """ Handle a message that looks like:: CHAT_JOIN:<room id>:<room name> """ #self.chatJoined(int(data[0]),data[1]) self._roomnames[int(data[0])]=data[1] self._receivedchatmembers[int(data[0])]=0 def tocCHAT_UPDATE_BUDDY(self,data): """ Handle a message that looks like:: CHAT_UPDATE_BUDDY:<room id>:<in room? T/F>:<user 1>:<user 2>... """ roomid=int(data[0]) inroom=(data[1]=='T') if self._receivedchatmembers[roomid]: for u in data[2:]: self.chatUpdate(roomid,u,inroom) else: self._receivedchatmembers[roomid]=1 self.chatJoined(roomid,self._roomnames[roomid],list(data[2:])) def tocCHAT_IN(self,data): """ Handle a message that looks like:: CHAT_IN:<room id>:<username>:<whisper T/F>:<message> whisper isn't used """ whisper=(data[2]=='T') if whisper: self.chatHearWhisper(int(data[0]),data[1],data[3]) else: self.chatHearMessage(int(data[0]),data[1],data[3]) def tocCHAT_INVITE(self,data): """ Handle a message that looks like:: CHAT_INVITE:<room name>:<room id>:<username>:<message> """ self.chatInvited(int(data[1]),data[0],data[2],data[3]) def tocCHAT_LEFT(self,data): """ Handle a message that looks like:: CHAT_LEFT:<room id> """ self.chatLeft(int(data[0])) del self._receivedchatmembers[int(data[0])] del self._roomnames[int(data[0])] def tocRVOUS_PROPOSE(self,data): """ Handle a message that looks like:: RVOUS_PROPOSE:<user>:<uuid>:<cookie>:<seq>:<rip>:<pip>:<vip>:<port> [:tlv tag1:tlv value1[:tlv tag2:tlv value2[:...]]] """ user,uid,cookie,seq,rip,pip,vip,port=data[:8] cookie=base64.decodestring(cookie) port=int(port) tlvs={} for i in range(8,len(data),2): key=data[i] value=base64.decodestring(data[i+1]) tlvs[key]=value name=UUIDS[uid] try: func=getattr(self,"toc%s"%name) except: self._debug("no function for UID %s" % uid) return func(user,cookie,seq,pip,vip,port,tlvs) def tocSEND_FILE(self,user,cookie,seq,pip,vip,port,tlvs): if tlvs.has_key('12'): description=tlvs['12'] else: description="" subtype,numfiles,size=struct.unpack("!HHI",tlvs['10001'][:8]) name=tlvs['10001'][8:-4] while name[-1]=='\000': name=name[:-1] self._cookies[cookie]=[user,SEND_FILE_UID,pip,port,{'name':name}] self.rvousProposal("send",cookie,user,vip,port,description=description, name=name,files=numfiles,size=size) def tocGET_FILE(self,user,cookie,seq,pip,vip,port,tlvs): return # XXX add this back in #reactor.clientTCP(pip,port,GetFileTransfer(self,cookie,os.path.expanduser("~"))) #self.rvous_accept(user,cookie,GET_FILE_UID) def onLine(self): """ called when we are first online """ pass def gotConfig(self,mode,buddylist,permit,deny): """ called when we get a configuration from the server mode := permit/deny mode buddylist := current buddylist permit := permit list deny := deny list """ pass def hearError(self,code,args): """ called when an error is received code := error code args := misc. arguments (username, etc.) """ pass def hearWarning(self,newamount,username): """ called when we get warned newamount := the current warning level username := the user who warned us, or '' if it's anonymous """ pass def hearMessage(self,username,message,autoreply): """ called when you receive an IM username := the user who the IM is from message := the message autoreply := true if the message is an autoreply from an away message """ pass def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away): """ called when a buddy changes state username := the user whos state changed online := true if the user is online evilness := the users current warning level signontime := the time the user signed on (UNIX epoch) idletime := the time the user has been idle (minutes) away := true if the user is away userclass := the class of the user (generally " O") """ pass def chatJoined(self,roomid,roomname,users): """ we just joined a chat room roomid := the AIM id for the room roomname := the name for the room users := a list of the users already in the room """ pass def chatUpdate(self,roomid,username,inroom): """ a user has joined the room roomid := the AIM id for the room username := the username inroom := true if the user is in the room """ pass def chatHearMessage(self,roomid,username,message): """ a message was sent to the room roomid := the AIM id for the room username := the user who sent the message message := the message """ pass def chatHearWhisper(self,roomid,username,message): """ someone whispered to us in a chatroom roomid := the AIM for the room username := the user who whispered to us message := the message """ pass def chatInvited(self,roomid,roomname,username,message): """ we were invited to a chat room roomid := the AIM id for the room roomname := the name of the room username := the user who invited us message := the invite message """ pass def chatLeft(self,roomid): """ we left the room roomid := the AIM id for the room """ pass def rvousProposal(self,type,cookie,user,vip,port,**kw): """ we were asked for a rondevouz type := the type of rondevous. currently, one of ["send"] cookie := the cookie. pass this to rvous_accept() user := the user who asked us vip := their verified_ip port := the port they want us to conenct to kw := misc. args """ pass #self.rvous_accept(cookie) def receiveBytes(self,user,file,chunk,sofar,total): """ we received part of a file from a file transfer file := the name of the file chunk := the chunk of data sofar := how much data we've gotten so far total := the total amount of data """ pass #print user,file,sofar,total def isaway(self): """ return our away status """ return len(self._awaymessage)>0 def set_config(self,mode,buddylist,permit,deny): """ set the server configuration mode := permit mode buddylist := buddy list permit := permit list deny := deny list """ s="m %s\n"%mode for g in buddylist.keys(): s=s+"g %s\n"%g for u in buddylist[g]: s=s+"b %s\n"%u for p in permit: s=s+"p %s\n"%p for d in deny: s=s+"d %s\n"%d #s="{\n"+s+"\n}" self.sendFlap(2,"toc_set_config %s"%quote(s)) def add_buddy(self,buddies): s="" if type(buddies)==type(""): buddies=[buddies] for b in buddies: s=s+" "+normalize(b) self.sendFlap(2,"toc_add_buddy%s"%s) def del_buddy(self,buddies): s="" if type(buddies)==type(""): buddies=[buddies] for b in buddies: s=s+" "+b self.sendFlap(2,"toc_remove_buddy%s"%s) def add_permit(self,users): if type(users)==type(""): users=[users] s="" if self._privacymode!=PERMITSOME: self._privacymode=PERMITSOME self._permitlist=[] for u in users: u=normalize(u) if u not in self._permitlist:self._permitlist.append(u) s=s+" "+u if not s: self._privacymode=DENYALL self._permitlist=[] self._denylist=[] self.sendFlap(2,"toc_add_permit"+s) def del_permit(self,users): if type(users)==type(""): users=[users] p=self._permitlist[:] for u in users: u=normalize(u) if u in p: p.remove(u) self.add_permit([]) self.add_permit(p) def add_deny(self,users): if type(users)==type(""): users=[users] s="" if self._privacymode!=DENYSOME: self._privacymode=DENYSOME self._denylist=[] for u in users: u=normalize(u) if u not in self._denylist:self._denylist.append(u) s=s+" "+u if not s: self._privacymode=PERMITALL self._permitlist=[] self._denylist=[] self.sendFlap(2,"toc_add_deny"+s) def del_deny(self,users): if type(users)==type(""): users=[users] d=self._denylist[:] for u in users: u=normalize(u) if u in d: d.remove(u) self.add_deny([]) if d: self.add_deny(d) def signon(self): """ called to finish the setup, and signon to the network """ self.sendFlap(2,"toc_init_done") self.sendFlap(2,"toc_set_caps %s" % (SEND_FILE_UID,)) # GET_FILE_UID) def say(self,user,message,autoreply=0): """ send a message user := the user to send to message := the message autoreply := true if the message is an autoreply (good for away messages) """ if autoreply: a=" auto" else: a='' self.sendFlap(2,"toc_send_im %s %s%s"%(normalize(user),quote(message),a)) def idle(self,idletime=0): """ change idle state idletime := the seconds that the user has been away, or 0 if they're back """ self.sendFlap(2,"toc_set_idle %s" % int(idletime)) def evil(self,user,anon=0): """ warn a user user := the user to warn anon := if true, an anonymous warning """ self.sendFlap(2,"toc_evil %s %s"%(normalize(user), (not anon and "anon") or "norm")) def away(self,message=''): """ change away state message := the message, or '' to come back from awayness """ self._awaymessage=message if message: message=' '+quote(message) self.sendFlap(2,"toc_set_away%s"%message) def chat_join(self,exchange,roomname): """ join a chat room exchange := should almost always be 4 roomname := room name """ roomname=string.replace(roomname," ","") self.sendFlap(2,"toc_chat_join %s %s"%(int(exchange),roomname)) def chat_say(self,roomid,message): """ send a message to a chatroom roomid := the AIM id for the room message := the message to send """ self.sendFlap(2,"toc_chat_send %s %s"%(int(roomid),quote(message))) def chat_whisper(self,roomid,user,message): """ whisper to another user in a chatroom roomid := the AIM id for the room user := the user to whisper to message := the message to send """ self.sendFlap(2,"toc_chat_whisper %s %s %s"%(int(roomid),normalize(user),quote(message))) def chat_leave(self,roomid): """ leave a chat room. roomid := the AIM id for the room """ self.sendFlap(2,"toc_chat_leave %s" % int(roomid)) def chat_invite(self,roomid,usernames,message): """ invite a user[s] to the chat room roomid := the AIM id for the room usernames := either a string (one username) or a list (more than one) message := the message to invite them with """ if type(usernames)==type(""): # a string, one username users=usernames else: users="" for u in usernames: users=users+u+" " users=users[:-1] self.sendFlap(2,"toc_chat_invite %s %s %s" % (int(roomid),quote(message),users)) def chat_accept(self,roomid): """ accept an invite to a chat room roomid := the AIM id for the room """ self.sendFlap(2,"toc_chat_accept %s"%int(roomid)) def rvous_accept(self,cookie): user,uuid,pip,port,d=self._cookies[cookie] self.sendFlap(2,"toc_rvous_accept %s %s %s" % (normalize(user), cookie,uuid)) if uuid==SEND_FILE_UID: protocol.ClientCreator(reactor, SendFileTransfer,self,cookie,user,d["name"]).connectTCP(pip,port) def rvous_cancel(self,cookie): user,uuid,pip,port,d=self._cookies[cookie] self.sendFlap(2,"toc_rvous_accept %s %s %s" % (normalize(user), cookie,uuid)) del self._cookies[cookie] class SendFileTransfer(protocol.Protocol): header_fmt="!4s2H8s6H10I32s3c69s16s2H64s" def __init__(self,client,cookie,user,filename): self.client=client self.cookie=cookie self.user=user self.filename=filename self.hdr=[0,0,0] self.sofar=0 def dataReceived(self,data): if not self.hdr[2]==0x202: self.hdr=list(struct.unpack(self.header_fmt,data[:256])) self.hdr[2]=0x202 self.hdr[3]=self.cookie self.hdr[4]=0 self.hdr[5]=0 self.transport.write(apply(struct.pack,[self.header_fmt]+self.hdr)) data=data[256:] if self.hdr[6]==1: self.name=self.filename else: self.name=self.filename+self.hdr[-1] while self.name[-1]=="\000": self.name=self.name[:-1] if not data: return self.sofar=self.sofar+len(data) self.client.receiveBytes(self.user,self.name,data,self.sofar,self.hdr[11]) if self.sofar==self.hdr[11]: # end of this file self.hdr[2]=0x204 self.hdr[7]=self.hdr[7]-1 self.hdr[9]=self.hdr[9]-1 self.hdr[19]=DUMMY_CHECKSUM # XXX really calculate this self.hdr[18]=self.hdr[18]+1 self.hdr[21]="\000" self.transport.write(apply(struct.pack,[self.header_fmt]+self.hdr)) self.sofar=0 if self.hdr[7]==0: self.transport.loseConnection() class GetFileTransfer(protocol.Protocol): header_fmt="!4s 2H 8s 6H 10I 32s 3c 69s 16s 2H 64s" def __init__(self,client,cookie,dir): self.client=client self.cookie=cookie self.dir=dir self.buf="" def connectionMade(self): def func(f,path,names): names.sort(lambda x,y:cmp(string.lower(x),string.lower(y))) for n in names: name=os.path.join(path,n) lt=time.localtime(os.path.getmtime(name)) size=os.path.getsize(name) f[1]=f[1]+size f.append("%02d/%02d/%4d %02d:%02d %8d %s" % (lt[1],lt[2],lt[0],lt[3],lt[4],size,name[f[0]:])) f=[len(self.dir)+1,0] os.path.walk(self.dir,func,f) size=f[1] self.listing=string.join(f[2:],"\r\n")+"\r\n" open("\\listing.txt","w").write(self.listing) hdr=["OFT2",256,0x1108,self.cookie,0,0,len(f)-2,len(f)-2,1,1,size, len(self.listing),os.path.getmtime(self.dir), checksum(self.listing),0,0,0,0,0,0,"OFT_Windows ICBMFT V1.1 32", "\002",chr(0x1a),chr(0x10),"","",0,0,""] self.transport.write(apply(struct.pack,[self.header_fmt]+hdr)) def dataReceived(self,data): self.buf=self.buf+data while len(self.buf)>=256: hdr=list(struct.unpack(self.header_fmt,self.buf[:256])) self.buf=self.buf[256:] if hdr[2]==0x1209: self.file=StringIO.StringIO(self.listing) self.transport.registerProducer(self,0) elif hdr[2]==0x120b: pass elif hdr[2]==0x120c: # file request file=hdr[-1] for k,v in [["\000",""],["\001",os.sep]]: file=string.replace(file,k,v) self.name=os.path.join(self.dir,file) self.file=open(self.name,'rb') hdr[2]=0x0101 hdr[6]=hdr[7]=1 hdr[10]=hdr[11]=os.path.getsize(self.name) hdr[12]=os.path.getmtime(self.name) hdr[13]=checksum_file(self.file) self.file.seek(0) hdr[18]=hdr[19]=0 hdr[21]=chr(0x20) self.transport.write(apply(struct.pack,[self.header_fmt]+hdr)) log.msg("got file request for %s"%file,hex(hdr[13])) elif hdr[2]==0x0202: log.msg("sending file") self.transport.registerProducer(self,0) elif hdr[2]==0x0204: log.msg("real checksum: %s"%hex(hdr[19])) del self.file elif hdr[2]==0x0205: # resume already=hdr[18] if already: data=self.file.read(already) else: data="" log.msg("restarting at %s"%already) hdr[2]=0x0106 hdr[19]=checksum(data) self.transport.write(apply(struct.pack,[self.header_fmt]+hdr)) elif hdr[2]==0x0207: self.transport.registerProducer(self,0) else: log.msg("don't understand 0x%04x"%hdr[2]) log.msg(hdr) def resumeProducing(self): data=self.file.read(4096) log.msg(len(data)) if not data: self.transport.unregisterProducer() self.transport.write(data) def pauseProducing(self): pass def stopProducing(self): del self.file # UUIDs SEND_FILE_UID = "09461343-4C7F-11D1-8222-444553540000" GET_FILE_UID = "09461348-4C7F-11D1-8222-444553540000" UUIDS={ SEND_FILE_UID:"SEND_FILE", GET_FILE_UID:"GET_FILE" } # ERRORS # general NOT_AVAILABLE=901 CANT_WARN=902 MESSAGES_TOO_FAST=903 # admin BAD_INPUT=911 BAD_ACCOUNT=912 REQUEST_ERROR=913 SERVICE_UNAVAILABLE=914 # chat NO_CHAT_IN=950 # im and info SEND_TOO_FAST=960 MISSED_BIG_IM=961 MISSED_FAST_IM=962 # directory DIR_FAILURE=970 TOO_MANY_MATCHES=971 NEED_MORE_QUALIFIERS=972 DIR_UNAVAILABLE=973 NO_EMAIL_LOOKUP=974 KEYWORD_IGNORED=975 NO_KEYWORDS=976 BAD_LANGUAGE=977 BAD_COUNTRY=978 DIR_FAIL_UNKNOWN=979 # authorization BAD_NICKNAME=980 SERVICE_TEMP_UNAVAILABLE=981 WARNING_TOO_HIGH=982 CONNECTING_TOO_QUICK=983 UNKNOWN_SIGNON=989 STD_MESSAGE={} STD_MESSAGE[NOT_AVAILABLE]="%s not currently available" STD_MESSAGE[CANT_WARN]="Warning of %s not currently available" STD_MESSAGE[MESSAGES_TOO_FAST]="A message has been dropped, you are exceeding the server speed limit" STD_MESSAGE[BAD_INPUT]="Error validating input" STD_MESSAGE[BAD_ACCOUNT]="Invalid account" STD_MESSAGE[REQUEST_ERROR]="Error encountered while processing request" STD_MESSAGE[SERVICE_UNAVAILABLE]="Service unavailable" STD_MESSAGE[NO_CHAT_IN]="Chat in %s is unavailable" STD_MESSAGE[SEND_TOO_FAST]="You are sending messages too fast to %s" STD_MESSAGE[MISSED_BIG_IM]="You missed an IM from %s because it was too big" STD_MESSAGE[MISSED_FAST_IM]="You missed an IM from %s because it was sent too fast" # skipping directory for now STD_MESSAGE[BAD_NICKNAME]="Incorrect nickname or password" STD_MESSAGE[SERVICE_TEMP_UNAVAILABLE]="The service is temporarily unavailable" STD_MESSAGE[WARNING_TOO_HIGH]="Your warning level is currently too high to sign on" STD_MESSAGE[CONNECTING_TOO_QUICK]="You have been connecting and disconnecting too frequently. Wait 10 minutes and try again. If you continue to try, you will need to wait even longer." STD_MESSAGE[UNKNOWN_SIGNON]="An unknown signon error has occurred %s"
import datetime from mongoengine import Document, EmbeddedDocument from mongoengine import StringField, ListField from mongoengine import EmbeddedDocumentField, IntField from django.conf import settings from cybox.objects.file_object import File from cybox.objects.artifact_object import Artifact, Base64Encoding, ZlibCompression from cybox.core import Observable from cybox.common import UnsignedLong, Hash from crits.samples.migrate import migrate_sample from crits.core.crits_mongoengine import CritsBaseAttributes, CritsDocumentFormatter from crits.core.crits_mongoengine import CritsSourceDocument from crits.core.fields import CritsDateTimeField, getFileField class Sample(CritsBaseAttributes, CritsSourceDocument, Document): """Sample object""" meta = { "collection": settings.COL_SAMPLES, "crits_type": 'Sample', "latest_schema_version": 4, "shard_key": ('md5',), "schema_doc": { 'filename': 'The name of the last file that was uploaded with this'\ 'MD5', 'filenames': 'A list of filenames this binary has gone by.', 'filetype': 'The filetype of the file', 'mimetype': 'The mimetype of the file', 'size': 'The size of the file', 'md5': 'The MD5 of the file', 'sha1': 'The SHA1 of the file', 'sha256': 'The SHA256 of the file', 'ssdeep': 'The ssdeep of the file', 'campaign': 'List [] of campaigns using this file', 'source': 'List [] of sources that provided this file', 'created': 'ISODate of when this file was uploaded', 'modified': 'ISODate of when the file metadata was last modified', 'filedata': 'The ObjectId of the file in GridFS' }, "jtable_opts": { 'details_url': 'crits.samples.views.detail', 'details_url_key': 'md5', 'default_sort': "created DESC", 'searchurl': 'crits.samples.views.samples_listing', 'fields': [ "filename", "size", "filetype", "created", "modified", "campaign", "source", "md5", "id", "status"], 'jtopts_fields': [ "details", "filename", "size", "filetype", "created", "campaign", "source", "md5", "status", "favorite", "id"], 'hidden_fields': ["md5"], 'linked_fields': ["filename", "source", "campaign", "filetype"], 'details_link': 'details', 'no_sort': ['details', 'id'] }, } filedata = getFileField(collection_name=settings.COL_SAMPLES) filename = StringField(required=True) filenames = ListField(StringField()) filetype = StringField() md5 = StringField(required=True) mimetype = StringField() sha1 = StringField() sha256 = StringField() size = IntField(default=0) ssdeep = StringField() def migrate(self): migrate_sample(self) def add_file_data(self, file_data): self._generate_file_metadata(file_data) self.filedata = file_data def add_file_obj(self, file_obj): data = file_obj.read() self._generate_file_metadata(data) self.filedata = data def _generate_file_metadata(self, data): import pydeep import magic from hashlib import md5, sha1, sha256 try: self.filetype = magic.from_buffer(data) except: self.filetype = "Unavailable" try: mimetype = magic.from_buffer(data, mime=True) if mimetype: self.mimetype = mimetype.split(";")[0] if not mimetype: self.mimetype = "unknown" except: self.mimetype = "Unavailable" self.size = len(data) # this is a shard key. you can't modify it once it's set. # MongoEngine will still mark the field as modified even if you set it # to the same value. if not self.md5: self.md5 = md5(data).hexdigest() self.sha1 = sha1(data).hexdigest() self.sha256 = sha256(data).hexdigest() try: self.ssdeep = pydeep.hash_bytes(data) except: self.ssdeep = None def is_pe(self): """ Is this a PE file. """ return self.filedata.grid_id != None and self.filedata.read(2) == "MZ" def is_pdf(self): """ Is this a PDF. """ return self.filedata.grid_id != None and "%PDF-" in self.filedata.read(1024) def to_cybox_observable(self, exclude=None, bin_fmt="raw"): if exclude == None: exclude = [] observables = [] f = File() for attr in ['md5', 'sha1', 'sha256']: if attr not in exclude: val = getattr(self, attr, None) if val: setattr(f, attr, val) if self.ssdeep and 'ssdeep' not in exclude: f.add_hash(Hash(self.ssdeep, Hash.TYPE_SSDEEP)) if 'size' not in exclude and 'size_in_bytes' not in exclude: f.size_in_bytes = UnsignedLong(self.size) if 'filename' not in exclude and 'file_name' not in exclude: f.file_name = self.filename # create an Artifact object for the binary if it exists if 'filedata' not in exclude and bin_fmt: data = self.filedata.read() if data: # if sample data available a = Artifact(data, Artifact.TYPE_FILE) # create artifact w/data if bin_fmt == "zlib": a.packaging.append(ZlibCompression()) a.packaging.append(Base64Encoding()) elif bin_fmt == "base64": a.packaging.append(Base64Encoding()) f.add_related(a, "Child_Of") # relate artifact to file if 'filetype' not in exclude and 'file_format' not in exclude: #NOTE: this doesn't work because the CybOX File object does not # have any support built in for setting the filetype to a # CybOX-binding friendly object (e.g., calling .to_dict() on # the resulting CybOX object fails on this field. f.file_format = self.filetype observables.append(Observable(f)) return (observables, self.releasability) @classmethod def from_cybox(cls, cybox_obs): """ Convert a Cybox DefinedObject to a MongoEngine Sample object. :param cybox_obs: The cybox object to create the Sample from. :type cybox_obs: :class:`cybox.core.Observable`` :returns: :class:`crits.samples.sample.Sample` """ cybox_object = cybox_obs.object_.properties if cybox_object.md5: db_obj = Sample.objects(md5=cybox_object.md5).first() if db_obj: # if a sample with md5 already exists return db_obj # don't modify, just return sample = cls() # else, start creating new sample record sample.filename = str(cybox_object.file_name) sample.size = cybox_object.size_in_bytes.value if cybox_object.size_in_bytes else 0 for hash_ in cybox_object.hashes: if hash_.type_.value.upper() in [Hash.TYPE_MD5, Hash.TYPE_SHA1, Hash.TYPE_SHA256, Hash.TYPE_SSDEEP]: setattr(sample, hash_.type_.value.lower(), str(hash_.simple_hash_value).strip().lower()) for obj in cybox_object.parent.related_objects: # attempt to find data in cybox if isinstance(obj.properties, Artifact) and obj.properties.type_ == Artifact.TYPE_FILE: sample.add_file_data(obj.properties.data) break return sample def discover_binary(self): """ Queries GridFS for a matching binary to this sample document. """ from crits.core.mongo_tools import mongo_connector fm = mongo_connector("%s.files" % self._meta['collection']) objectid = fm.find_one({'md5': self.md5}, {'_id': 1}) if objectid: self.filedata.grid_id = objectid['_id'] self.filedata._mark_as_changed() def set_filenames(self, filenames): """ Set the Sample filenames to a specified list. :param filenames: The filenames to set. :type filenames: list """ if isinstance(filenames, list): self.filenames = filenames
import warnings from django.contrib.sites.models import get_current_site from django.core.exceptions import ImproperlyConfigured from django.utils.encoding import smart_str from fluent_comments import appsettings from .compat import BASE_APP if BASE_APP == 'django.contrib.comments': from django.contrib.comments.moderation import moderator, CommentModerator elif BASE_APP == 'django_comments': from django_comments.moderation import moderator, CommentModerator else: raise NotImplementedError() try: from urllib.parse import urljoin # Python 3 except ImportError: from urlparse import urljoin # Python 2 # Optional dependency (for lacking Python 3 support) try: from akismet import Akismet except ImportError: Akismet = None if appsettings.FLUENT_CONTENTS_USE_AKISMET: warnings.warn("No `akismet` package has been installed, disabling Akismet checks for django-fluent-comments.", RuntimeWarning) # Akismet code originally based on django-comments-spamfighter. __all__ = ( 'FluentCommentsModerator', 'moderate_model', 'get_model_moderator', 'comments_are_open', 'comments_are_moderated', ) class FluentCommentsModerator(CommentModerator): """ Moderation policy for fluent-comments. """ auto_close_field = None auto_moderate_field = None enable_field = None close_after = appsettings.FLUENT_COMMENTS_CLOSE_AFTER_DAYS moderate_after = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS email_notification = False # Using signals instead akismet_check = appsettings.FLUENT_CONTENTS_USE_AKISMET and Akismet is not None akismet_check_action = appsettings.FLUENT_COMMENTS_AKISMET_ACTION def allow(self, comment, content_object, request): """ Determine whether a given comment is allowed to be posted on a given object. Returns ``True`` if the comment should be allowed, ``False`` otherwise. """ # Parent class check if not super(FluentCommentsModerator, self).allow(comment, content_object, request): return False # Akismet check if self.akismet_check and self.akismet_check_action == 'delete': if self._akismet_check(comment, content_object, request): return False # Akismet marked the comment as spam. return True def moderate(self, comment, content_object, request): """ Determine whether a given comment on a given object should be allowed to show up immediately, or should be marked non-public and await approval. Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise. """ # Parent class check if super(FluentCommentsModerator, self).moderate(comment, content_object, request): return True # Akismet check if self.akismet_check and self.akismet_check_action == 'moderate': # Return True if akismet marks this comment as spam and we want to moderate it. if self._akismet_check(comment, content_object, request): return True return False def _akismet_check(self, comment, content_object, request): """ Connects to Akismet and returns True if Akismet marks this comment as spam. Otherwise returns False. """ # Get Akismet data AKISMET_API_KEY = appsettings.AKISMET_API_KEY if not AKISMET_API_KEY: raise ImproperlyConfigured('You must set AKISMET_API_KEY to use comment moderation with Akismet.') current_domain = get_current_site(request).domain auto_blog_url = '{0}://{1}/'.format(request.is_secure() and 'https' or 'http', current_domain) blog_url = appsettings.AKISMET_BLOG_URL or auto_blog_url akismet_api = Akismet( key=AKISMET_API_KEY, blog_url=blog_url ) if akismet_api.verify_key(): akismet_data = self._get_akismet_data(blog_url, comment, content_object, request) if akismet_api.comment_check(smart_str(comment.comment), data=akismet_data, build_data=True): return True return False def _get_akismet_data(self, blog_url, comment, content_object, request): # Field documentation: # http://akismet.com/development/api/#comment-check akismet_data = { # Comment info 'permalink': urljoin(blog_url, content_object.get_absolute_url()), 'comment_type': 'comment', # comment, trackback, pingback, see http://blog.akismet.com/2012/06/19/pro-tip-tell-us-your-comment_type/ 'comment_author': getattr(comment, 'name', ''), 'comment_author_email': getattr(comment, 'email', ''), 'comment_author_url': getattr(comment, 'url', ''), # Request info 'referrer': request.META.get('HTTP_REFERER', ''), 'user_agent': request.META.get('HTTP_USER_AGENT', ''), 'user_ip': comment.ip_address, # Server info 'SERVER_ADDR': request.META.get('SERVER_ADDR', ''), 'SERVER_ADMIN': request.META.get('SERVER_ADMIN', ''), 'SERVER_NAME': request.META.get('SERVER_NAME', ''), 'SERVER_PORT': request.META.get('SERVER_PORT', ''), 'SERVER_SIGNATURE': request.META.get('SERVER_SIGNATURE', ''), 'SERVER_SOFTWARE': request.META.get('SERVER_SOFTWARE', ''), 'HTTP_ACCEPT': request.META.get('HTTP_ACCEPT', ''), } # Allow testing, see: # http://blog.akismet.com/2012/07/20/pro-tip-testing-testing/ if appsettings.AKISMET_IS_TEST: akismet_data['is_test'] = '1' return akismet_data def moderate_model(ParentModel, publication_date_field=None, enable_comments_field=None): """ Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation. :param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model. :param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date. :type publication_date_field: str :param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled. :type enable_comments_field: str """ attrs = { 'auto_close_field': publication_date_field, 'auto_moderate_field': publication_date_field, 'enable_field': enable_comments_field, } ModerationClass = type(ParentModel.__name__ + 'Moderator', (FluentCommentsModerator,), attrs) moderator.register(ParentModel, ModerationClass) def get_model_moderator(model): """ Return the moderator class that is registered with a content object. If there is no associated moderator with a class, None is returned. :param model: The Django model registered with :func:`moderate_model` :type model: :class:`~django.db.models.Model` :return: The moderator class which holds the moderation policies. :rtype: :class:`~django_comments.moderation.CommentModerator` """ try: return moderator._registry[model] except KeyError: return None def comments_are_open(content_object): """ Return whether comments are still open for a given target object. """ moderator = get_model_moderator(content_object.__class__) if moderator is None: return True # Check the 'enable_field', 'auto_close_field' and 'close_after', # by reusing the basic Django policies. return CommentModerator.allow(moderator, None, content_object, None) def comments_are_moderated(content_object): """ Return whether comments are moderated for a given target object. """ moderator = get_model_moderator(content_object.__class__) if moderator is None: return False # Check the 'auto_moderate_field', 'moderate_after', # by reusing the basic Django policies. return CommentModerator.moderate(moderator, None, content_object, None)
#!/usr/bin/env python import BaseHTTPServer import cgi import glob import json import mimetypes import os import os.path import urlparse # Various config settings for the python server SETTINGS = { 'port': 8080, 'logging': False, 'api-save': '/lib/weltmeister/api/save.php', 'api-browse': '/lib/weltmeister/api/browse.php', 'api-glob': '/lib/weltmeister/api/glob.php', 'image-types': ['.png', '.jpg', '.gif', '.jpeg'], 'mimetypes': { 'ogg': 'audio/ogg', 'mp3': 'audio/mp3' } } # Override port if we are on a Heroku server if os.environ.get('PORT'): SETTINGS['port'] = int(os.environ.get('PORT')) # Get the current directory BASE_DIR = os.path.dirname(os.path.abspath(__file__)) if BASE_DIR[-1] != '/': BASE_DIR += '/' # Blank favicon - prevents silly 404s from occuring if no favicon is supplied FAVICON_GIF = 'GIF89a\x01\x00\x01\x00\xf0\x00\x00\xff\xff\xff\x00\x00\x00!\xff\x0bXMP DataXMP\x02?x\x00!\xf9\x04\x05\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00@\x02\x02D\x01\x00;' class HTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler): def send_json(self, obj, code=200, headers=None): 'Send response as JSON' if not headers: headers = {} headers['Content-Type'] = 'application/json' self.send_response(json.dumps(obj), code, headers) def send_response(self, mesg, code=200, headers=None): 'Wraps sending a response down' if not headers: headers = {} if 'Content-Type' not in headers: headers['Content-Type'] = 'text/html' BaseHTTPServer.BaseHTTPRequestHandler.send_response(self, code) self.send_header('Content-Length', len(mesg)) if headers: for k, v in headers.iteritems(): self.send_header(k, v) self.end_headers() self.wfile.write(mesg) def log_request(self, *args, **kwargs): 'If logging is disabled ' if SETTINGS['logging']: BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, *args, **kwargs) def init_request(self): parts = self.path.split('?', 1) self.post_params = {} if len(parts) == 1: self.file_path = parts[0] self.query_params = {} else: self.file_path = parts[0] self.query_params = urlparse.parse_qs(parts[1]) def do_GET(self): self.init_request() self.route_request('GET') def do_POST(self): self.init_request() # From http://stackoverflow.com/questions/4233218/python-basehttprequesthandler-post-variables ctype, pdict = cgi.parse_header(self.headers.getheader('content-type')) if ctype == 'multipart/form-data': self.post_params = cgi.parse_multipart(self.rfile, pdict) elif ctype == 'application/x-www-form-urlencoded': length = int(self.headers.getheader('content-length')) self.post_params = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) self.route_request('POST') def route_request(self, method='GET'): if self.file_path == SETTINGS['api-save']: self.save() elif self.file_path == SETTINGS['api-browse']: self.browse() elif self.file_path == SETTINGS['api-glob']: self.glob() elif method == 'GET': self.serve_file() else: self.barf() def save(self): resp = {'error': 0} if 'path' in self.post_params and 'data' in self.post_params: path = self.post_params['path'][0] path = os.path.join(BASE_DIR, path.replace('..', '')) data = self.post_params['data'][0] if path.endswith('.js'): try: open(path, 'w').write(data) except: resp['error'] = 2 resp['msg'] = 'Couldn\'t write to file %d' % path else: resp['error'] = 3 resp['msg'] = 'File must have a .js suffix' else: resp['error'] = 1 resp['msg'] = 'No Data or Path specified' return self.send_json(resp) def browse(self): # Get the directory to scan dir = '' if 'dir' in self.query_params: dir = self.query_params['dir'][0].replace('..', '') if dir[-1] != '/': dir += '/' # Get the dir and files dirs = [os.path.join(dir, d) for d in os.listdir(os.path.join(BASE_DIR, dir)) if os.path.isdir(os.path.join(dir, d))] files = glob.glob(dir + '*.*') # Filter on file types if 'type' in self.query_params: types = self.query_params['type'] if 'images' in types: files = [f for f in files if os.path.splitext(f)[1] in SETTINGS['image-types']] elif 'scripts' in types: files = [f for f in files if os.path.splitext(f)[1] == '.js'] if os.name == 'nt': files = [f.replace('\\', '/') for f in files] dirs = [d.replace('\\', '/') for d in dirs] response = { 'files': files, 'dirs': dirs, 'parent': False if dir == './' else os.path.dirname(os.path.dirname(dir)) } return self.send_json(response) def glob(self): globs = self.query_params['glob[]'] files = [] for g in globs: g = g.replace('..', '') more = glob.glob(g) files.extend(more) if os.name == 'nt': files = [f.replace('\\', '/') for f in files] return self.send_json(files) def guess_type(self, path): type, _ = mimetypes.guess_type(path) if not type: ext = path.split('.')[-1] if ext in SETTINGS['mimetypes'].keys(): type = SETTINGS['mimetypes'][ext] # Winblows hack if os.name == "nt" and type.startswith("image"): type = type.replace("x-", "") return type def serve_file(self): path = self.file_path if path == '/': path = 'index.html' elif path == '/editor': path = 'weltmeister.html' # Remove the leading forward slash if path[0] == '/': path = path[1:] # Security, remove the .. path = path.replace('..', '') # Determine the fullpath path = os.path.join(BASE_DIR, path) try: data = open(path, 'rb').read() type = self.guess_type(path) self.send_response(data, 200, headers={'Content-Type': type}) except: if '/favicon.ico' in path: self.send_response(FAVICON_GIF, 200, headers={'Content-Type': 'image/gif'}) else: self.send_response('', 404) def barf(self): self.send_response('barf', 405) def main(): addr = ('', SETTINGS['port']) server = BaseHTTPServer.HTTPServer(addr, HTTPHandler) print 'Running ImpactJS Server\nGame: http://localhost:%d\nEditor: http://localhost:%d/editor' % (addr[1], addr[1]) server.serve_forever() if __name__ == '__main__': main()
import time import unittest import sqlalchemy as _sqla import sqlalchemy.orm as _sqla_orm from .. import models class BaseTestCase(unittest.TestCase): def setUp(self): self.engine = _sqla.create_engine('sqlite://') models.utils.Base.metadata.create_all(self.engine) self.Session = _sqla_orm.sessionmaker(bind=self.engine) self.session = self.Session() self.ent_type = 'some_ent_type' self.props = { 'str_prop': 'str', 'int_prop': 1, 'bool_prop': True, 'mapping_prop': {'some': {'nested': 'prop'}}, 'sequence_prop': ['some', 'sequence'] } self.tags = {'tag_%s' % i for i in range(3)} def generate_ent(self, **kwargs): return models.Ent(**{ 'ent_type': self.ent_type, 'props': self.props, 'tags': self.tags, **kwargs }) class CreateEntTestCase(BaseTestCase): def setUp(self): super().setUp() self.ent = self.generate_ent() self.session.add(self.ent) self.session.commit() def test_has_timestamps(self): self.assertTrue(self.ent.created is not None) self.assertTrue(self.ent.modified is not None) def test_default_key_startswith_ent_ent_type(self): self.assertTrue(self.ent.key.startswith('ent:' + self.ent_type)) def test_has_props(self): self.assertTrue(self.ent.props, self.props) def test_has_tags(self): self.assertTrue(self.ent.tags, self.tags) class QueryEntTestCase(BaseTestCase): def test_filter_created(self): ents = [] for i in range(3): ent = self.generate_ent() self.session.add(ent) self.session.commit() ents.append(ent) time.sleep(1e-3) newer_than_ent_0 = (self.session.query(models.Ent) .filter(models.Ent.created > ents[0].created) .all()) self.assertEqual(newer_than_ent_0, ents[1:]) older_than_ent_2 = (self.session.query(models.Ent) .filter(models.Ent.created < ents[2].created) .all()) self.assertEqual(older_than_ent_2, ents[:2]) def test_filter_props(self): ents = [] for i in range(3): ent = self.generate_ent( props={'int_prop': i, 'str_prop': 'str_%s' % i} ) self.session.add(ent) self.session.commit() ents.append(ent) ents_w_int_eq_1 = ( self.session.query(models.Ent) .filter(models.Ent.props_set.any(key='int_prop', value=1)) .all() ) self.assertEqual(ents_w_int_eq_1, [ents[1]]) ents_w_int_gt_1 = ( self.session.query(models.Ent) .filter( models.Ent.props_set.any( (models.Ent.Prop.key == 'int_prop') & (models.Ent.Prop.value > 1) ) ) .all() ) self.assertEqual(ents_w_int_gt_1, ents[2:]) def test_filter_tags(self): ents = [ self.generate_ent(tags={'tag_1', 'tag_2'}), self.generate_ent(tags={'tag_2', 'tag_3'}), self.generate_ent(tags={'tag_1', 'tag_3'}), ] self.session.add_all(ents) self.session.commit() ents_w_tag_1 = ( self.session.query(models.Ent) .filter(models.Ent.tags_set.any(name='tag_1')) .all() ) self.assertEqual(set(ents_w_tag_1), set([ents[0], ents[2]])) ents_w_tag_2 = ( self.session.query(models.Ent) .filter(models.Ent.tags_set.any(name='tag_2')) .all() ) self.assertEqual(set(ents_w_tag_2), set([ents[0], ents[1]])) ents_w_nonexistent_tag = ( self.session.query(models.Ent) .filter(models.Ent.tags_set.any(name='nonexistent_tag')) .all() ) self.assertEqual(set(ents_w_nonexistent_tag), set()) class LineageTestCase(BaseTestCase): def setUp(self): super().setUp() self.families = self._create_families() def _create_families(self): families = {} for i in range(2): family_key = ('family_%s' % i) families[family_key] = self._create_family(family_key=family_key) return families def _create_family(self, family_key=None): common_props = {'family_key': family_key} grandparents = [ self.generate_ent( key=('%s:grandparent_%s' % (family_key, i)), props={**common_props, 'generation': 'grandparents'} ) for i in range(4) ] grandparent_pairs = [ [grandparents[0], grandparents[1]], [grandparents[2], grandparents[3]] ] parents = [] for i, grandparent_pair in enumerate(grandparent_pairs): parents.append( self.generate_ent( key=('%s:parent_%s' % (family_key, i)), props={**common_props, 'generation': 'parents'}, parents=grandparent_pair, ancestors=grandparent_pair ) ) children = [ self.generate_ent( key=('%s:child_%s' % (family_key, i)), props={**common_props, 'generation': 'children'}, parents=parents, ancestors=(grandparents + parents) ) for i in range(3) ] self.session.add_all(grandparents + parents + children) self.session.commit() family = { 'grandparents': grandparents, 'grandparent_pairs': grandparent_pairs, 'parents': parents, 'children': children } return family def test_parents(self): for family in self.families.values(): for child in family['children']: self.assertEqual(child.parents, family['parents']) for i, parent in enumerate(family['parents']): self.assertEqual( parent.parents, family['grandparent_pairs'][i] ) def test_children(self): for family in self.families.values(): for parent in family['parents']: self.assertEqual( set(parent.children), set(family['children']) ) for i, gp_pair in enumerate(family['grandparent_pairs']): for grandparent in gp_pair: self.assertEqual( set(grandparent.children), set([family['parents'][i]]) ) def test_ancestors(self): for family in self.families.values(): for child in family['children']: self.assertEqual( set(child.ancestors), set(family['grandparents'] + family['parents']) ) def test_descendants(self): for family in self.families.values(): for grandparent in family['grandparents']: self.assertEqual( set(grandparent.descendants), set(grandparent.children + family['children']) ) def test_query_on_parents(self): for family in self.families.values(): children_of_grandparent_pair_0 = ( self.session.query(models.Ent) .join(models.Ent.parents, aliased=True, from_joinpoint=True) .filter( models.Ent.key.in_([ grandparent.key for grandparent in family['grandparent_pairs'][0] ]) ) .reset_joinpoint() .all() ) self.assertEqual(children_of_grandparent_pair_0, [family['parents'][0]]) def test_query_on_ancestors(self): for family_key, family in self.families.items(): descendants = ( self.session.query(models.Ent) .filter( models.Ent.props_set.any( key='generation', value='children' ) ) .join(models.Ent.ancestors, aliased=True, from_joinpoint=True) .filter( models.Ent.props_set.any( key='family_key', value=family_key ) ) .reset_joinpoint() .all() ) self.assertEqual(set(descendants), set(family['children']))
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Part of the Keras training engine related to distributed training. """ # pylint: disable=protected-access from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import errors from tensorflow.python.keras import backend as K from tensorflow.python.keras import callbacks as cbks from tensorflow.python.keras import optimizers from tensorflow.python.keras.engine import distributed_training_utils from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras.utils.generic_utils import Progbar from tensorflow.python.ops import array_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import distribute as distribute_lib # TODO(priyag, sourabhbajaj): Refactor this file to address code duplication. def fit_loop( model, iterator, epochs=100, verbose=1, callbacks=None, val_iterator=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None): """Fit loop for training with DistributionStrategy. Arguments: model: Keras Model instance. iterator: Iterator for input data. epochs: Number of times to iterate over the data verbose: Integer, Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training val_iterator: Iterator for validation data. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. Returns: `History` object. Raises: ValueError: in case of invalid arguments. """ current_strategy = model._distribution_strategy # TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged. if current_strategy.__class__.__name__ == 'TPUStrategy': return _experimental_fit_loop( model, iterator, epochs, verbose, callbacks, initial_epoch, steps_per_epoch) if not model._grouped_model: clone_model_on_towers(model, current_strategy, make_callback_model=True) def _per_device_train_function(model): model._make_train_function() return (model.train_function.inputs, model.train_function.outputs, model.train_function.updates_op, model.train_function.session_kwargs) inputs, targets = _get_input_from_iterator(iterator, model) with current_strategy.scope(): # Create train ops on each of the devices when we call # `_per_device_train_function`. (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = current_strategy.call_for_each_tower( _per_device_train_function, model._grouped_model) # Unwrap all the per device values returned from `call_for_each_tower`. # Unwrapping per device values gives you a list of values that can be # used to construct a new train function that is composed of update ops on # all the devices over which the model is distributed. (all_inputs, all_outputs, all_updates, all_session_args) = distributed_training_utils.unwrap_values( current_strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args, with_loss_tensor=True) # Dataset inputs and targets are also per devices values that need to be # unwrapped. dataset_inputs = distributed_training_utils.flatten_perdevice_values( current_strategy, inputs) dataset_targets = distributed_training_utils.flatten_perdevice_values( current_strategy, targets) # Create a train function that is composed of all the parameters above. distributed_train_function = K.Function( all_inputs, all_outputs, updates=all_updates, name='distributed_train_function', **all_session_args) # We need to set sample_weights to None since there are sample weight # placeholders that are created with default values. sample_weights = [None for _ in range(len(model.outputs) * current_strategy.num_towers)] if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = dataset_inputs + dataset_targets + sample_weights + [1] else: ins = dataset_inputs + dataset_targets do_validation = False if validation_steps: do_validation = True # Copy the weights from the original model to each of the replicated models. orig_model_weights = model.get_weights() distributed_model = current_strategy.unwrap(model._grouped_model)[0] distributed_training_utils.set_weights( current_strategy, distributed_model, orig_model_weights) callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, val_inputs=None, val_targets=None, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose) out_labels = model.metrics_names or [] callbacks.on_train_begin() assert steps_per_epoch is not None for epoch in range(initial_epoch, epochs): # Reset stateful metrics for m in model.stateful_metric_functions: m.reset_states() callbacks.on_epoch_begin(epoch) epoch_logs = {} for step_index in range(steps_per_epoch): batch_logs = {'batch': step_index, 'size': 1} callbacks.on_batch_begin(step_index, batch_logs) try: outs = distributed_train_function(ins) except errors.OutOfRangeError: logging.warning('Your dataset iterator ran out of data; ' 'interrupting training. Make sure that your dataset ' 'can generate at least `steps_per_epoch * epochs` ' 'batches (in this case, %d batches).' % steps_per_epoch * epochs) break if not isinstance(outs, list): outs = [outs] outs = _aggregate_metrics_across_towers(current_strategy.num_towers, out_labels, model.stateful_metric_names, outs) for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(step_index, batch_logs) if callbacks.model.stop_training: break if do_validation: val_outs = test_loop( model, val_iterator, steps=validation_steps, verbose=0) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o callbacks.on_epoch_end(epoch, epoch_logs) if callbacks.model.stop_training: break callbacks.on_train_end() # Copy the weights back from the replicated model to the original model. updated_weights = current_strategy.unwrap( model._grouped_model)[0].get_weights() model.set_weights(updated_weights) return model.history def _experimental_fit_loop( model, iterator, epochs=100, verbose=1, callbacks=None, initial_epoch=0, steps_per_epoch=None): """Fit loop for training with TPU DistributionStrategy. Arguments: model: Keras Model instance. iterator: Iterator that returns inputs and targets epochs: Number of times to iterate over the data verbose: Integer, Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. Returns: Returns `None`. Raises: ValueError: in case of invalid arguments. """ current_strategy = model._distribution_strategy K.get_session().run(current_strategy.initialize()) def _per_device_train_function(model): model._make_train_function() return (model.train_function.inputs, model.train_function.outputs, model.train_function.updates_op, model.train_function.session_kwargs) # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here. K.set_learning_phase(1) def step_fn(ctx, inputs, targets): """Clones the model and calls make_train_function.""" # TODO(priyag, sourabhbajaj): The model gets cloned every time # fit/test/predict is called. We should look into caching this keyed on # input shapes. clone_model_on_towers( model, current_strategy, make_callback_model=True, inputs=inputs, targets=targets) (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = current_strategy.call_for_each_tower( _per_device_train_function, model._grouped_model) (all_inputs, all_outputs, all_updates, all_session_args) = distributed_training_utils.unwrap_values( current_strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) combined_fn = K.Function( all_inputs, all_outputs, updates=all_updates, name='distributed_train_function', **all_session_args) out_labels = model.metrics_names or [] for label, output in zip(out_labels, combined_fn.outputs): if label == 'loss': aggregation = distribute_lib.get_loss_reduction() else: # We aggregate all other metrics using mean for now. This is temporary # workaround until new metrics are in place. aggregation = variable_scope.VariableAggregation.MEAN ctx.set_last_step_output(label, output, aggregation) # TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn: # feed_dict, session kwargs, run options, run_metadata for now. These should # be handled appropriately return combined_fn.updates_op # Add initial dummy values for loss and other metric tensors. initial_loop_values = {} initial_loop_values['loss'] = constant_op.constant(1e7) for name, tensor in zip(model.metrics_names[1:], model.metrics_tensors): initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype) if steps_per_epoch is None: raise ValueError('steps_per_epoch should be specified in the fit call.') steps_per_run_var = K.variable( value=min(steps_per_epoch, current_strategy.steps_per_run), dtype='int32', name='steps_per_run_var') with current_strategy.scope(): ctx = current_strategy.run_steps_on_dataset( step_fn, iterator, iterations=steps_per_run_var, initial_loop_values=initial_loop_values) train_op = ctx.run_op output_tensors = ctx.last_step_outputs # Copy the weights from the original model to each of the replicated models. orig_model_weights = model.get_weights() with current_strategy.scope(): distributed_model = current_strategy.unwrap(model._grouped_model)[0] distributed_training_utils.set_weights( current_strategy, distributed_model, orig_model_weights) callbacks = cbks.configure_callbacks( callbacks, model, do_validation=False, val_inputs=None, val_targets=None, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose) # TODO(priyag, sourabhbajaj): Add callbacks support for per step callback # TODO(priyag, sourabhbajaj): Add validation. # Calculate the steps each time on the device. steps_to_run = [current_strategy.steps_per_run] * ( steps_per_epoch // current_strategy.steps_per_run) if steps_per_epoch % current_strategy.steps_per_run: steps_to_run.append(steps_per_epoch % current_strategy.steps_per_run) callbacks.on_train_begin() for epoch in range(initial_epoch, epochs): callbacks.on_epoch_begin(epoch) epoch_logs = {} step_index = 0 prev_step_count = None for step_count in steps_to_run: batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count} callbacks.on_batch_begin(step_index, batch_logs) if prev_step_count is None or step_count != prev_step_count: steps_per_run_var.load(step_count, K.get_session()) prev_step_count = step_count try: _, outputs = K.get_session().run([train_op, output_tensors]) except errors.OutOfRangeError: logging.warning('Your dataset iterator ran out of data; ' 'interrupting training. Make sure that your dataset ' 'can generate at least `steps_per_epoch * epochs` ' 'batches (in this case, %d batches).' % steps_per_epoch * epochs) break batch_logs.update(outputs) callbacks.on_batch_end(step_index, batch_logs) step_index = step_index + step_count if callbacks.model.stop_training: break callbacks.on_epoch_end(epoch, epoch_logs) if callbacks.model.stop_training: break callbacks.on_train_end() # Copy the weights back from the replicated model to the original model. with current_strategy.scope(): updated_weights = current_strategy.unwrap( model._grouped_model)[0].get_weights() model.set_weights(updated_weights) K.get_session().run(current_strategy.finalize()) return model.history def test_loop(model, iterator, verbose=0, steps=None): """Test loop for evaluating with DistributionStrategy. Arguments: model: Keras Model instance. iterator: Iterator for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring predictions finished. Ignored with the default value of `None`. Returns: Scalar loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the outputs. """ current_strategy = model._distribution_strategy # TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged. if current_strategy.__class__.__name__ == 'TPUStrategy': return _experimental_test_loop(model, iterator, verbose, steps) if not model._grouped_model: clone_model_on_towers(model, current_strategy) def _per_device_test_function(model): model._make_test_function() return (model.test_function.inputs, model.test_function.outputs, model.test_function.updates_op, model.test_function.session_kwargs) inputs, targets = _get_input_from_iterator(iterator, model) with current_strategy.scope(): (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = current_strategy.call_for_each_tower( _per_device_test_function, model._grouped_model) (all_inputs, all_outputs, all_updates, all_session_args) = distributed_training_utils.unwrap_values( current_strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args, with_loss_tensor=True) dataset_inputs = distributed_training_utils.flatten_perdevice_values( current_strategy, inputs) dataset_targets = distributed_training_utils.flatten_perdevice_values( current_strategy, targets) distributed_test_function = K.Function( all_inputs, all_outputs, updates=all_updates, name='distributed_test_function', **all_session_args) # We need to set sample_weights to None since there are sample weight # placeholders that are created with default values. sample_weights = [None for _ in range(len(model.outputs) * current_strategy.num_towers)] if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = dataset_inputs + dataset_targets + sample_weights + [0] else: ins = dataset_inputs + dataset_targets for m in model.stateful_metric_functions: m.reset_states() stateful_metric_indices = [ i for i, name in enumerate(model.metrics_names) if str(name) in model.stateful_metric_names ] outs = [] if verbose == 1: progbar = Progbar(target=steps) # Copy the weights from the original model to each of the replicated models. orig_model_weights = model.get_weights() distributed_model = current_strategy.unwrap(model._grouped_model)[0] distributed_training_utils.set_weights( current_strategy, distributed_model, orig_model_weights) assert steps is not None for step in range(steps): batch_outs = distributed_test_function(ins) batch_outs = _aggregate_metrics_across_towers( current_strategy.num_towers, model.metrics_names, model.stateful_metric_names, batch_outs) if isinstance(batch_outs, list): if step == 0: outs = [0.] * len(batch_outs) for i, batch_out in enumerate(batch_outs): if i in stateful_metric_indices: outs[i] = batch_out else: outs[i] += batch_out else: if step == 0: outs.append(0.) outs[0] += batch_outs if verbose >= 1: progbar.update(step + 1) for i in range(len(outs)): if i not in stateful_metric_indices: outs[i] /= steps if len(outs) == 1: return outs[0] return outs def _experimental_test_loop(model, iterator, verbose=0, steps=None): """Test loop for evaluating with TPU DistributionStrategy. Arguments: model: Keras Model instance. iterator: Iterator for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring predictions finished. Ignored with the default value of `None`. Returns: Scalar loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the outputs. """ current_strategy = model._distribution_strategy K.get_session().run(current_strategy.initialize()) def _per_device_test_function(model): model._make_test_function() return (model.test_function.inputs, model.test_function.outputs, model.test_function.updates_op, model.test_function.session_kwargs) # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here. K.set_learning_phase(0) def step_fn(ctx, inputs, targets): """Clones the model and calls make_test_function.""" # TODO(priyag, sourabhbajaj): The model gets cloned every time # fit/test/predict is called. We should look into caching this keyed on # input shapes. clone_model_on_towers( model, current_strategy, make_callback_model=False, inputs=inputs, targets=targets) (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = current_strategy.call_for_each_tower( _per_device_test_function, model._grouped_model) (all_inputs, all_outputs, all_updates, all_session_args) = distributed_training_utils.unwrap_values( current_strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) combined_fn = K.Function( all_inputs, all_outputs, updates=all_updates, name='distributed_test_function', **all_session_args) for label, output in zip(model.metrics_names, combined_fn.outputs): if label == 'loss': aggregation = distribute_lib.get_loss_reduction() else: # We aggregate all other metrics using mean for now. This is temporary # workaround until new metrics are in place. aggregation = variable_scope.VariableAggregation.MEAN ctx.set_last_step_output(label, output, aggregation) return combined_fn.updates_op # Add initial dummy values for loss and other metric tensors. initial_loop_values = {} initial_loop_values['loss'] = constant_op.constant(1e7) for name, tensor in zip(model.metrics_names[1:], model.metrics_tensors): initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype) with current_strategy.scope(): # TODO(priyag): Use steps_per_run when we use new metrics as they will # allow handling metric computation at each step using variables. ctx = current_strategy.run_steps_on_dataset( step_fn, iterator, iterations=1, initial_loop_values=initial_loop_values) test_op = ctx.run_op output_tensors = ctx.last_step_outputs if verbose == 1: progbar = Progbar(target=steps) # Copy the weights from the original model to each of the replicated models. orig_model_weights = model.get_weights() with current_strategy.scope(): distributed_model = current_strategy.unwrap(model._grouped_model)[0] distributed_training_utils.set_weights( current_strategy, distributed_model, orig_model_weights) assert steps is not None outs = [0.] * len(model.metrics_names) for step in range(steps): _, batch_outs = K.get_session().run([test_op, output_tensors]) for i, label in enumerate(model.metrics_names): outs[i] += batch_outs[label] if verbose >= 1: progbar.update(step + 1) for i in range(len(outs)): outs[i] /= (steps) K.get_session().run(current_strategy.finalize()) if len(outs) == 1: return outs[0] return outs def predict_loop(model, iterator, verbose=0, steps=None): """Predict loop for predicting with DistributionStrategy. Arguments: model: Keras Model instance. iterator: Iterator for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. Returns: Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs). """ current_strategy = model._distribution_strategy # TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged. if current_strategy.__class__.__name__ == 'TPUStrategy': return _experimental_predict_loop(model, iterator, verbose, steps) if not model._grouped_model: clone_model_on_towers(model, current_strategy) def _per_device_predict_function(model): model._make_predict_function() return (model.predict_function.inputs, model.predict_function.outputs, model.predict_function.updates_op, model.predict_function.session_kwargs) inputs, _ = _get_input_from_iterator(iterator, model) with current_strategy.scope(): (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = current_strategy.call_for_each_tower( _per_device_predict_function, model._grouped_model) (all_inputs, all_outputs, all_updates, all_session_args) = distributed_training_utils.unwrap_values( current_strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) dataset_inputs = distributed_training_utils.flatten_perdevice_values( current_strategy, inputs) distributed_predict_function = K.Function( all_inputs, all_outputs, updates=all_updates, name='distributed_predict_function', **all_session_args) if model.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = dataset_inputs + [0] else: ins = dataset_inputs if verbose == 1: progbar = Progbar(target=steps) # Copy the weights from the original model to each of the replicated models. orig_model_weights = model.get_weights() distributed_model = current_strategy.unwrap(model._grouped_model)[0] distributed_training_utils.set_weights( current_strategy, distributed_model, orig_model_weights) if steps is not None: # Since we do not know how many samples we will see, we cannot # pre-allocate the returned Numpy arrays. Instead, we store one array per # batch seen and concatenate them upon returning. unconcatenated_outs = [] for step in range(steps): batch_outs = distributed_predict_function(ins) if not isinstance(batch_outs, list): batch_outs = [batch_outs] if step == 0: for _ in batch_outs: unconcatenated_outs.append([]) # TODO(anjalisridhar): Should combine the outputs from multiple towers # correctly here. for i, batch_out in enumerate(batch_outs): unconcatenated_outs[i].append(batch_out) if verbose >= 1: progbar.update(step + 1) if len(unconcatenated_outs) == 1: return np.concatenate(unconcatenated_outs[0], axis=0) return [ np.concatenate(unconcatenated_outs[i], axis=0) for i in range(len(unconcatenated_outs)) ] def _experimental_predict_loop(model, iterator, verbose=0, steps=None): """Predict loop for predicting with TPU DistributionStrategy. Arguments: model: Keras Model instance. iterator: Iterator for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. Returns: Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs). """ current_strategy = model._distribution_strategy K.get_session().run(current_strategy.initialize()) # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here. K.set_learning_phase(0) def _per_device_predict_function(model): model._make_predict_function() return (model.predict_function.inputs, model.predict_function.outputs, model.predict_function.updates_op, model.predict_function.session_kwargs) def step_fn(ctx, inputs, targets): """Clones the model and calls make_predict_function.""" # TODO(anjalisridhar): Support predict input correctly as it will not # contain targets, only inputs. del targets # TODO(priyag, sourabhbajaj): The model gets cloned every time # fit/test/predict is called. We should look into caching this keyed on # input shapes. clone_model_on_towers( model, current_strategy, make_callback_model=False, inputs=inputs) (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = current_strategy.call_for_each_tower( _per_device_predict_function, model._grouped_model) (all_inputs, all_outputs, all_updates, all_session_args) = distributed_training_utils.unwrap_values( current_strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) combined_fn = K.Function( all_inputs, all_outputs, updates=all_updates, name='distributed_predict_function', **all_session_args) for label, output in zip(model.output_names, combined_fn.outputs): ctx.set_last_step_output(label, output) return combined_fn.updates_op # Add initial dummy values for outputs. initial_loop_values = {} batch_dimension = distributed_training_utils.get_batch_dimension(iterator) for name, tensor in zip(model.output_names, model.outputs): # TODO(priyag): This is a workaround as we do not know the batch dimension # of the model's output at this point. shape = tensor_shape.TensorShape(tensor.shape.dims) shape.dims = [batch_dimension] + shape.dims[1:] initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype) with current_strategy.scope(): # TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed. ctx = current_strategy.run_steps_on_dataset( step_fn, iterator, iterations=1, initial_loop_values=initial_loop_values) predict_op = ctx.run_op output_tensors = ctx.last_step_outputs if verbose == 1: progbar = Progbar(target=steps) # Copy the weights from the original model to each of the replicated models. orig_model_weights = model.get_weights() with current_strategy.scope(): distributed_model = current_strategy.unwrap(model._grouped_model)[0] distributed_training_utils.set_weights( current_strategy, distributed_model, orig_model_weights) assert steps is not None # Since we do not know how many samples we will see, we cannot pre-allocate # the returned Numpy arrays. Instead, we store one array per batch seen # and concatenate them upon returning. unconcatenated_outs = [[] for _ in model.outputs] for step in range(steps): _, batch_outs = K.get_session().run([predict_op, output_tensors]) # TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy. for i, label in enumerate(model.output_names): unconcatenated_outs[i].extend(batch_outs[label]) if verbose >= 1: progbar.update(step + 1) K.get_session().run(current_strategy.finalize()) if len(unconcatenated_outs) == 1: return np.concatenate(unconcatenated_outs[0], axis=0) return [ np.concatenate(unconcatenated_outs[i], axis=0) for i in range(len(unconcatenated_outs)) ] def _clone_and_build_model(model, inputs=None, targets=None): """Clone and build the given keras_model.""" # We need to set the import here since we run into a circular dependency # error. from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top cloned_model = models.clone_model(model, input_tensors=inputs) # Compile and build model. if isinstance(model.optimizer, optimizers.TFOptimizer): optimizer = model.optimizer else: optimizer_config = model.optimizer.get_config() optimizer = model.optimizer.__class__.from_config(optimizer_config) # TODO(priyag): Is there a cleaner way to do this? The API doc suggests a # single tensor should be OK but it throws an error in that case. if (targets is not None and not isinstance(targets, list) and not isinstance(targets, dict)): targets = [targets] cloned_model.compile( optimizer, model.loss, metrics=metrics_module.clone_metrics(model.metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics(model.weighted_metrics), target_tensors=targets) return cloned_model def clone_model_on_towers( model, strategy, make_callback_model=False, inputs=None, targets=None): """Create a cloned model on each tower.""" with strategy.scope(): model._grouped_model = strategy.call_for_each_tower( _clone_and_build_model, model, inputs, targets) if make_callback_model: model._make_callback_model() def _aggregate_metrics_across_towers(num_devices, out_labels, stateful_metric_names, outs): """Aggregates stateless metrics values across towers. When using `MirroredStrategy`, the number of towers is equal to the number of devices over which training is distributed. This may not always be the case. Args: num_devices: Number of devices over which the model is being distributed. out_labels: The list of metric names passed to `compile`. stateful_metric_names: List of stateful metric names on the model. outs: The output from all the towers. Returns: The average value of each metric across the towers. """ # TODO(anjalisridhar): Temporary workaround for aggregating metrics # across towers. Replace with the new metrics module eventually. merged_output = [] # The first output is the total loss. merged_output.append(outs[0]) current_index = 1 # Each label in `out_labels` corresponds to one set of metrics. The # number of metric values corresponds to the number of devices. We # currently take the mean of the values. for metric_name in out_labels[1:]: if metric_name in stateful_metric_names: # For stateful metrics, we get one aggregated result value. merged_output.append(outs[current_index]) current_index += 1 else: m = np.mean(outs[current_index:current_index + num_devices]) merged_output.append(m) current_index += num_devices return merged_output def _get_input_from_iterator(iterator, model): """Get elements from the iterator and verify the input shape and type.""" next_element = iterator.get_next() if isinstance(next_element, tuple): x, y = next_element else: x = next_element y = None # Validate that all the elements in x and y are of the same type and shape. # We can then pass the first element of x and y to `_standardize_weights` # below and be confident of the output. x_values, y_values = distributed_training_utils.\ validate_distributed_dataset_inputs(model._distribution_strategy, x, y) # TODO(sourabhbajaj): Add support for sample weights in distribution # strategy. model._standardize_weights(x_values, y_values) return x, y
# -*- coding: utf-8 -*- import os import io import json import shutil import six from six.moves import zip_longest import zipfile from .. import base from girder.constants import AccessType from girder.models.assetstore import Assetstore from girder.models.folder import Folder from girder.models.item import Item from girder.models.token import Token from girder.models.user import User def setUpModule(): base.startServer() def tearDownModule(): base.stopServer() class ItemTestCase(base.TestCase): def setUp(self): base.TestCase.setUp(self) # Create a set of users so we can have some folders. self.users = [User().createUser( 'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@u.com' % num) for num in [0, 1]] folders = Folder().childFolders(self.users[0], 'user', user=self.users[0]) for folder in folders: if folder['name'] == 'Public': self.publicFolder = folder else: self.privateFolder = folder self.assetstore = Assetstore().getCurrent() root = self.assetstore['root'] # Clean out the test assetstore on disk shutil.rmtree(root) # First clean out the temp directory tmpdir = os.path.join(root, 'temp') if os.path.isdir(tmpdir): for tempname in os.listdir(tmpdir): os.remove(os.path.join(tmpdir, tempname)) def _createItem(self, parentId, name, description, user): params = { 'name': name, 'description': description, 'folderId': parentId } resp = self.request(path='/item', method='POST', params=params, user=user) self.assertStatusOk(resp) assert 'meta' in resp.json return resp.json def _testUploadFileToItem(self, item, name, user, contents): """ Uploads a non-empty file to the server. """ # Initialize the upload resp = self.request( path='/file', method='POST', user=user, params={ 'parentType': 'item', 'parentId': item['_id'], 'name': name, 'size': len(contents) }) self.assertStatusOk(resp) uploadId = resp.json['_id'] # Send the first chunk resp = self.request( path='/file/chunk', method='POST', body=contents, user=user, params={ 'uploadId': uploadId }, type='application/octet-stream') self.assertStatusOk(resp) def _testDownloadSingleFileItem(self, item, user, contents): """ Downloads a single-file item from the server :param item: The item to download. :type item: dict :param contents: The expected contents. :type contents: str """ resp = self.request(path='/item/%s/download' % item['_id'], method='GET', user=user, isJson=False) self.assertStatusOk(resp) self.assertEqual(contents, self.getBody(resp)) self.assertEqual(resp.headers['Content-Disposition'], 'attachment; filename="file_1"') # Test downloading the item with contentDisposition=inline. params = {'contentDisposition': 'inline'} resp = self.request(path='/item/%s/download' % item['_id'], method='GET', user=user, isJson=False, params=params) self.assertStatusOk(resp) self.assertEqual(contents, self.getBody(resp)) self.assertEqual(resp.headers['Content-Disposition'], 'inline; filename="file_1"') # Test downloading with an offset resp = self.request(path='/item/%s/download' % item['_id'], method='GET', user=user, isJson=False, params={'offset': 1}) self.assertStatus(resp, 206) self.assertEqual(contents[1:], self.getBody(resp)) def _testDownloadMultiFileItem(self, item, user, contents, format=None): params = None if format: params = {'format': format} resp = self.request(path='/item/%s/download' % item['_id'], method='GET', user=user, isJson=False, params=params) self.assertStatusOk(resp) zipFile = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)), 'r') prefix = os.path.split(zipFile.namelist()[0])[0] expectedZip = {} for name in contents: expectedZip[os.path.join(prefix, name)] = contents[name] self.assertHasKeys(expectedZip, zipFile.namelist()) self.assertHasKeys(zipFile.namelist(), expectedZip) for name in zipFile.namelist(): expected = expectedZip[name] if not isinstance(expected, six.binary_type): expected = expected.encode('utf8') self.assertEqual(expected, zipFile.read(name)) def testLegacyItems(self): folder = Folder().createFolder( parent=self.users[0], parentType='user', creator=self.users[0], name='New Folder') item = Item().createItem( name='LegacyItem', creator=self.users[0], folder=folder) del item['meta'] item = Item().save(item) assert 'meta' not in item item = Item().load(item['_id'], user=self.users[0]) assert 'meta' in item def testItemDownloadAndChildren(self): curItem = self._createItem(self.publicFolder['_id'], 'test_for_download', 'fake description', self.users[0]) self._testUploadFileToItem(curItem, 'file_1', self.users[0], 'foobar') self._testDownloadSingleFileItem(curItem, self.users[0], 'foobar') self._testDownloadMultiFileItem(curItem, self.users[0], {'file_1': 'foobar'}, format='zip') self._testUploadFileToItem(curItem, 'file_2', self.users[0], 'foobz') resp = self.request(path='/item/%s/files' % curItem['_id'], method='GET', user=self.users[0]) self.assertStatusOk(resp) self.assertEqual(resp.json[0]['name'], 'file_1') self.assertEqual(resp.json[1]['name'], 'file_2') self.assertEqual(resp.json[0]['size'], 6) self.assertEqual(resp.json[1]['size'], 5) self._testDownloadMultiFileItem(curItem, self.users[0], {'file_1': 'foobar', 'file_2': 'foobz'}) def testItemCrud(self): """ Test Create, Read, Update, and Delete of items. """ self.ensureRequiredParams( path='/item', method='POST', required=('folderId',), user=self.users[1]) # Attempt to create an item without write permission, should fail params = { 'name': ' ', 'description': ' a description ', 'folderId': self.publicFolder['_id'] } resp = self.request(path='/item', method='POST', params=params, user=self.users[1]) self.assertStatus(resp, 403) # Shouldn't be allowed to have an empty name resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertValidationError(resp, 'name') # Actually create the item in user 0's private folder params['name'] = ' my item name' params['folderId'] = self.privateFolder['_id'] resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertStatusOk(resp) item = resp.json self.assertEqual(item['name'], params['name'].strip()) self.assertEqual(item['description'], params['description'].strip()) # User 1 should not be able to see the item via find by folderId params = { 'folderId': self.privateFolder['_id'] } resp = self.request(path='/item', method='GET', user=self.users[1], params=params) self.assertStatus(resp, 403) # Or by just requesting the item itself by ID resp = self.request(path='/item/%s' % str(item['_id']), method='GET', user=self.users[1]) self.assertStatus(resp, 403) # User 0 should be able to see the item resp = self.request(path='/item/%s' % str(item['_id']), method='GET', user=self.users[0]) self.assertStatusOk(resp) self.assertEqual(resp.json['_id'], item['_id']) self.assertEqual(resp.json['_modelType'], 'item') # Also from the children call resp = self.request(path='/item', method='GET', user=self.users[0], params=params) self.assertStatusOk(resp) self.assertEqual(resp.json[0]['_id'], item['_id']) # Test finding the item using a text string with and without a folderId params['text'] = 'my item name' resp = self.request(path='/item', method='GET', user=self.users[0], params=params) self.assertStatusOk(resp) self.assertEqual(resp.json[0]['_id'], item['_id']) del params['folderId'] resp = self.request(path='/item', method='GET', user=self.users[0], params=params) self.assertStatusOk(resp) self.assertEqual(resp.json[0]['_id'], item['_id']) # A limit should work params['limit'] = 1 resp = self.request(path='/item', method='GET', user=self.users[0], params=params) self.assertStatusOk(resp) self.assertEqual(resp.json[0]['_id'], item['_id']) # An offset should give us nothing params['offset'] = 1 resp = self.request(path='/item', method='GET', user=self.users[0], params=params) self.assertStatusOk(resp) self.assertEqual(len(resp.json), 0) # Finding should fail with no parameters resp = self.request(path='/item', method='GET', user=self.users[0], params={}) self.assertStatus(resp, 400) self.assertEqual(resp.json['message'], 'Invalid search mode.') # Test update of the item params = { 'name': 'changed name', 'description': 'new description' } resp = self.request(path='/item/%s' % item['_id'], method='PUT', params=params, user=self.users[0]) self.assertStatusOk(resp) self.assertEqual(resp.json['name'], params['name']) self.assertEqual(resp.json['description'], params['description']) # Test moving an item to the public folder item = Item().load(item['_id'], force=True) self.assertFalse(Item().hasAccess(item)) resp = self.request(path='/item/%s' % item['_id'], method='PUT', user=self.users[0], params={ 'folderId': self.publicFolder['_id']}) self.assertStatusOk(resp) item = Item().load(resp.json['_id'], force=True) self.assertTrue(Item().hasAccess(item)) # Move should fail if we don't have write permission on the # destination folder self.publicFolder = Folder().setUserAccess( self.publicFolder, self.users[1], AccessType.WRITE, save=True) resp = self.request(path='/item/%s' % item['_id'], method='PUT', user=self.users[1], params={ 'folderId': self.privateFolder['_id']}) self.assertStatus(resp, 403) self.assertTrue(resp.json['message'].startswith( 'Write access denied for folder')) # Try to update/PUT without an id resp = self.request(path='/item/', method='PUT', params=params, user=self.users[0]) self.assertStatus(resp, 400) # Try a bad endpoint (should 400) resp = self.request(path='/item/%s/blurgh' % item['_id'], method='GET', user=self.users[1]) self.assertStatus(resp, 400) # Try delete with no ID (should 400) resp = self.request(path='/item/', method='DELETE', user=self.users[1]) self.assertStatus(resp, 400) # User 1 should not be able to delete the item with read access self.publicFolder = Folder().setUserAccess( self.publicFolder, self.users[1], AccessType.READ, save=True) resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE', user=self.users[1]) self.assertStatus(resp, 403) # User 1 should be able to delete the item with write access self.publicFolder = Folder().setUserAccess( self.publicFolder, self.users[1], AccessType.WRITE, save=True) resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE', user=self.users[1]) self.assertStatusOk(resp) # Verify that the item is deleted item = Item().load(item['_id']) self.assertEqual(item, None) def testItemMetadataDirect(self): params = { 'name': 'item with metadata via POST', 'description': ' a description ', 'folderId': self.privateFolder['_id'], 'metadata': 'not JSON' } resp = self.request( path='/item', method='POST', params=params, user=self.users[0]) self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'Parameter metadata must be valid JSON.') # Add some metadata metadata = { 'foo': 'bar', 'test': 2 } params['metadata'] = json.dumps(metadata) resp = self.request( path='/item', method='POST', params=params, user=self.users[0]) self.assertStatusOk(resp) item = resp.json self.assertEqual(item['meta']['foo'], metadata['foo']) self.assertEqual(item['meta']['test'], metadata['test']) metadata = { 'foo': None, 'test': 3, 'bar': 'baz' } resp = self.request( path='/item/{_id}'.format(**item), method='PUT', user=self.users[0], params={'metadata': json.dumps(metadata)} ) self.assertStatusOk(resp) item = resp.json self.assertNotHasKeys(item['meta'], ['foo']) self.assertEqual(item['meta']['test'], metadata['test']) self.assertEqual(item['meta']['bar'], metadata['bar']) def testItemMetadataCrud(self): """ Test CRUD of metadata. """ # Create an item params = { 'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id'] } resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertStatusOk(resp) item = resp.json # Try to delete metadata from an item that doesn't have any set on it # yet. resp = self.request(path='/item/%s/metadata' % (item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['foobar']), type='application/json') item = resp.json self.assertStatusOk(resp) self.assertEqual(item['meta'], {}) # Add some metadata metadata = { 'foo': 'bar', 'test': 2 } resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json') item = resp.json self.assertEqual(item['meta']['foo'], metadata['foo']) self.assertEqual(item['meta']['test'], metadata['test']) # Test invalid JSON constants body = '{"key": {"foo": Infinity}}' resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=body, type='application/json') self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'Error: "Infinity" is not valid JSON.') # Edit and remove metadata metadata['test'] = None metadata['foo'] = 'baz' resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json') item = resp.json self.assertEqual(item['meta']['foo'], metadata['foo']) self.assertNotHasKeys(item['meta'], ['test']) # Test insertion of null values metadata['nullVal'] = None resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), params={'allowNull': True}, type='application/json') item = resp.json self.assertEqual(item['meta']['nullVal'], None) # Adding an unrelated key should not affect existing keys del metadata['nullVal'] metadata['other'] = 'macguffin' resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json') item = resp.json self.assertEqual(item['meta']['other'], metadata['other']) self.assertEqual(item['meta']['nullVal'], None) # Test metadata deletion resp = self.request(path='/item/%s/metadata' % item['_id'], method='DELETE', user=self.users[0], body=json.dumps(['other']), type='application/json') item = resp.json self.assertNotHasKeys(item['meta'], ['other']) # Error when deletion field names contain a period. resp = self.request(path='/item/%s/metadata' % item['_id'], method='DELETE', user=self.users[0], body=json.dumps(['foo', 'foo.bar']), type='application/json') self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.') # Error when deletion field names begin with a dollar-sign. resp = self.request(path='/item/%s/metadata' % item['_id'], method='DELETE', user=self.users[0], body=json.dumps(['foo', '$bar']), type='application/json') self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'Invalid key $bar: keys must not start with the "$" character.') # Make sure metadata cannot be added with invalid JSON metadata = { 'test': 'allowed' } resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata).replace('"', "'"), type='application/json') self.assertStatus(resp, 400) self.assertEqual(resp.json['message'], 'Invalid JSON passed in request body.') # Make sure metadata cannot be added if there is a period in the key # name metadata = { 'foo.bar': 'notallowed' } resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json') self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.') # Make sure metadata cannot be added if the key begins with a # dollar sign metadata = { '$foobar': 'alsonotallowed' } resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json') self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'Invalid key $foobar: keys must not start with the "$" character.') # Make sure metadata cannot be added with a blank key metadata = { '': 'stillnotallowed' } resp = self.request(path='/item/%s/metadata' % item['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json') self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'Key names must not be empty.') def testItemFiltering(self): """ Test filtering private metadata from items. """ # Create an item params = { 'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id'] } resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertStatusOk(resp) # get the item object from the database item = Item().load(resp.json['_id'], force=True) # set a private property item['private'] = 'very secret metadata' item = Item().save(item) # get the item from the rest api resp = self.request(path='/item/%s' % str(item['_id']), method='GET', user=self.users[0]) self.assertStatusOk(resp) # assert that the private data is not included self.assertNotHasKeys(resp.json, ['private']) def testPathToRoot(self): firstChildName = 'firstChild' firstChildDesc = 'firstDesc' secondChildName = 'secondChild' secondChildDesc = 'secondDesc' firstChild = Folder().createFolder( self.publicFolder, firstChildName, firstChildDesc, creator=self.users[0]) secondChild = Folder().createFolder( firstChild, secondChildName, secondChildDesc, creator=self.users[0]) baseItem = Item().createItem('blah', self.users[0], secondChild, 'foo') resp = self.request(path='/item/%s/rootpath' % baseItem['_id'], method='GET') self.assertStatusOk(resp) pathToRoot = resp.json self.assertEqual(pathToRoot[0]['type'], 'user') self.assertEqual(pathToRoot[0]['object']['login'], self.users[0]['login']) self.assertEqual(pathToRoot[1]['type'], 'folder') self.assertEqual(pathToRoot[1]['object']['name'], self.publicFolder['name']) self.assertEqual(pathToRoot[2]['type'], 'folder') self.assertEqual(pathToRoot[2]['object']['name'], firstChild['name']) self.assertEqual(pathToRoot[3]['type'], 'folder') self.assertEqual(pathToRoot[3]['object']['name'], secondChild['name']) def testLazyFieldComputation(self): """ Demonstrate that an item that is saved in the database without derived fields (like lowerName or baseParentId) get those values computed at load() time. """ item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder) self.assertEqual(item['lowerName'], 'my item name') self.assertEqual(item['baseParentId'], self.users[0]['_id']) # Force the item to be saved without lowerName and baseParentType fields del item['lowerName'] del item['baseParentType'] item = Item().save(item, validate=False) item = Item().find({'_id': item['_id']})[0] self.assertNotHasKeys(item, ('lowerName', 'baseParentType')) # Now ensure that calling load() actually populates those fields and # saves the results persistently Item().load(item['_id'], force=True) item = Item().find({'_id': item['_id']})[0] self.assertHasKeys(item, ('lowerName', 'baseParentType')) self.assertEqual(item['lowerName'], 'my item name') self.assertEqual(item['baseParentType'], 'user') self.assertEqual(item['baseParentId'], self.users[0]['_id']) # Also test that this works for a duplicate item, such that the # automatically renamed item still has the correct lowerName, and a # None description is changed to an empty string. item = Item().createItem( 'My Item Name', creator=self.users[0], folder=self.publicFolder, description=None) # test if non-strings are coerced self.assertEqual(item['description'], '') item['description'] = 1 item = Item().save(item) item = Item().findOne({'_id': item['_id']}) self.assertEqual(item['description'], '1') # test if just missing lowerName is corrected. self.assertEqual(item['lowerName'], 'my item name (1)') del item['lowerName'] item = Item().save(item, validate=False) item = Item().findOne({'_id': item['_id']}) self.assertNotHasKeys(item, ('lowerName', )) Item().load(item['_id'], force=True) item = Item().findOne({'_id': item['_id']}) self.assertHasKeys(item, ('lowerName', )) self.assertEqual(item['lowerName'], 'my item name (1)') def testParentsToRoot(self): """ Demonstrate that forcing parentsToRoot will cause it to skip the filtering process. """ item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder) parents = Item().parentsToRoot(item, force=True) for parent in parents: self.assertNotIn('_accessLevel', parent['object']) parents = Item().parentsToRoot(item) for parent in parents: self.assertIn('_accessLevel', parent['object']) def testItemCopy(self): origItem = self._createItem(self.publicFolder['_id'], 'test_for_copy', 'fake description', self.users[0]) # Add metadata and files, since we want to make sure those get copied metadata = { 'foo': 'value1', 'test': 2 } resp = self.request( path='/item/%s/metadata' % origItem['_id'], method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json') self.assertStatusOk(resp) self._testUploadFileToItem(origItem, 'file_1', self.users[0], 'foobar') self._testUploadFileToItem(origItem, 'file_2', self.users[0], 'foobz') # Also upload a link params = { 'parentType': 'item', 'parentId': origItem['_id'], 'name': 'link_file', 'linkUrl': 'http://www.google.com' } resp = self.request(path='/file', method='POST', user=self.users[0], params=params) self.assertStatusOk(resp) # Copy to a new item. It will be in the same folder, but we want a # different name. params = { 'name': 'copied_item' } resp = self.request(path='/item/%s/copy' % origItem['_id'], method='POST', user=self.users[0], params=params) self.assertStatusOk(resp) # Make sure size was returned correctly self.assertEqual(resp.json['size'], 11) # Now ask for the new item explicitly and check its metadata self.request(path='/item/%s' % resp.json['_id'], user=self.users[0], type='application/json') self.assertStatusOk(resp) newItem = resp.json self.assertEqual(newItem['name'], 'copied_item') self.assertEqual(newItem['meta']['foo'], metadata['foo']) self.assertEqual(newItem['meta']['test'], metadata['test']) # Check if we can download the files from the new item resp = self.request(path='/item/%s/files' % newItem['_id'], method='GET', user=self.users[0]) self.assertStatusOk(resp) newFiles = resp.json self.assertEqual(newFiles[0]['name'], 'file_1') self.assertEqual(newFiles[1]['name'], 'file_2') self.assertEqual(newFiles[2]['name'], 'link_file') self.assertEqual(newFiles[0]['size'], 6) self.assertEqual(newFiles[1]['size'], 5) self._testDownloadMultiFileItem(newItem, self.users[0], {'file_1': 'foobar', 'file_2': 'foobz', 'link_file': 'http://www.google.com'}) # Check to make sure the original item is still present resp = self.request(path='/item', method='GET', user=self.users[0], params={'folderId': self.publicFolder['_id'], 'text': 'test_for_copy'}) self.assertStatusOk(resp) self.assertEqual(origItem['_id'], resp.json[0]['_id']) # Check to make sure the new item is still present resp = self.request(path='/item', method='GET', user=self.users[0], params={'folderId': self.publicFolder['_id'], 'text': 'copied_item'}) self.assertStatusOk(resp) self.assertEqual(newItem['_id'], resp.json[0]['_id']) # Check that the provenance tag correctly points back # to the original item self.assertEqual(newItem['copyOfItem'], origItem['_id']) # Check if we can download the files from the old item and that they # are distinct from the files in the original item resp = self.request(path='/item/%s/files' % origItem['_id'], method='GET', user=self.users[0]) self.assertStatusOk(resp) origFiles = resp.json self._testDownloadMultiFileItem(origItem, self.users[0], {'file_1': 'foobar', 'file_2': 'foobz', 'link_file': 'http://www.google.com'}) for origFile, newFile in zip_longest(origFiles, newFiles): self.assertNotEqual(origFile['_id'], newFile['_id']) def testCookieAuth(self): """ We make sure a cookie is sufficient for authentication for the item download endpoint. Also, while we're at it, we make sure it's not sufficient for other endpoints. """ item = self._createItem(self.privateFolder['_id'], 'cookie_auth_download', '', self.users[0]) self._testUploadFileToItem(item, 'file', self.users[0], 'foo') token = Token().createToken(self.users[0]) cookie = 'girderToken=%s' % token['_id'] # We should be able to download a private item using a cookie token resp = self.request(path='/item/%s/download' % item['_id'], isJson=False, cookie=cookie) self.assertStatusOk(resp) self.assertEqual(self.getBody(resp), 'foo') # We should not be able to call GET /item/:id with a cookie token resp = self.request(path='/item/%s' % item['_id'], cookie=cookie) self.assertStatus(resp, 401) # Make sure the cookie has to be a valid token resp = self.request(path='/item/%s/download' % item['_id'], cookie='girderToken=invalid_token') self.assertStatus(resp, 401) def testReuseExisting(self): item1 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder) item2 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder) item3 = Item().createItem( 'to be reused', creator=self.users[0], folder=self.publicFolder, reuseExisting=True) self.assertNotEqual(item1['_id'], item2['_id']) self.assertEqual(item1['_id'], item3['_id']) self.assertEqual(item2['name'], 'to be reused (1)') self.assertEqual(item3['name'], 'to be reused') def testUpdateDuplicatedName(self): item1 = Item().createItem('foo', creator=self.users[0], folder=self.publicFolder) item2 = Item().createItem('bar', creator=self.users[0], folder=self.publicFolder) item2['name'] = 'foo' Item().save(item2, validate=False) self.assertEqual(item2['name'], 'foo') item1['size'] = 3 Item().save(item1) self.assertEqual(item1['name'], 'foo')
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base CCDPROC functions from __future__ import (absolute_import, division, print_function, unicode_literals) import numbers import numpy as np from astropy.extern import six from astropy.units.quantity import Quantity from astropy import units as u from astropy.modeling import fitting from astropy import stats from astropy.nddata import StdDevUncertainty from scipy import ndimage from .ccddata import CCDData from .utils.slices import slice_from_string from .log_meta import log_to_metadata __all__ = ['background_deviation_box', 'background_deviation_filter', 'cosmicray_median', 'cosmicray_lacosmic', 'create_deviation', 'flat_correct', 'gain_correct', 'rebin', 'sigma_func', 'subtract_bias', 'subtract_dark', 'subtract_overscan', 'transform_image', 'trim_image', 'Keyword'] # The dictionary below is used to translate actual function names to names # that are FITS compliant, i.e. 8 characters or less. _short_names = { 'background_deviation_box': 'bakdevbx', 'background_deviation_filter': 'bakdfilt', 'cosmicray_median': 'crmedian', 'create_deviation': 'creatvar', 'flat_correct': 'flatcor', 'gain_correct': 'gaincor', 'subtract_bias': 'subbias', 'subtract_dark': 'subdark', 'subtract_overscan': 'suboscan', 'trim_image': 'trimim', 'transform_image': 'tranim', } @log_to_metadata def create_deviation(ccd_data, gain=None, readnoise=None): """ Create a uncertainty frame. The function will update the uncertainty plane which gives the standard deviation for the data. Gain is used in this function only to scale the data in constructing the deviation; the data is not scaled. Parameters ---------- ccd_data : `~ccdproc.CCDData` Data whose deviation will be calculated. gain : `~astropy.units.Quantity`, optional Gain of the CCD; necessary only if ``ccd_data`` and ``readnoise`` are not in the same units. In that case, the units of ``gain`` should be those that convert ``ccd_data.data`` to the same units as ``readnoise``. readnoise : `~astropy.units.Quantity` Read noise per pixel. {log} Raises ------ UnitsError Raised if ``readnoise`` units are not equal to product of ``gain`` and ``ccd_data`` units. Returns ------- ccd : `~ccdproc.CCDData` CCDData object with uncertainty created; uncertainty is in the same units as the data in the parameter ``ccd_data``. """ if gain is not None and not isinstance(gain, Quantity): raise TypeError('gain must be a astropy.units.Quantity') if readnoise is None: raise ValueError('Must provide a readnoise.') if not isinstance(readnoise, Quantity): raise TypeError('readnoise must be a astropy.units.Quantity') if gain is None: gain = 1.0 * u.dimensionless_unscaled if gain.unit * ccd_data.unit != readnoise.unit: raise u.UnitsError("Units of data, gain and readnoise do not match") # Need to convert Quantity to plain number because NDData data is not # a Quantity. All unit checking should happen prior to this point. gain_value = float(gain / gain.unit) readnoise_value = float(readnoise / readnoise.unit) var = (gain_value * ccd_data.data + readnoise_value ** 2) ** 0.5 ccd = ccd_data.copy() # ensure uncertainty and image data have same unit var /= gain_value ccd.uncertainty = StdDevUncertainty(var) return ccd @log_to_metadata def subtract_overscan(ccd, overscan=None, overscan_axis=1, fits_section=None, median=False, model=None): """ Subtract the overscan region from an image. Parameters ---------- ccd : `~ccdproc.CCDData` Data to have overscan frame corrected overscan : `~ccdproc.CCDData` Slice from ``ccd`` that contains the overscan. Must provide either this argument or ``fits_section``, but not both. overscan_axis : 0 or 1, optional Axis along which overscan should combined with mean or median. Axis numbering follows the *python* convention for ordering, so 0 is the first axis and 1 is the second axis. fits_section : str Region of ``ccd`` from which the overscan is extracted, using the FITS conventions for index order and index start. See Notes and Examples below. Must provide either this argument or ``overscan``, but not both. median : bool, optional If true, takes the median of each line. Otherwise, uses the mean model : `~astropy.modeling.Model`, optional Model to fit to the data. If None, returns the values calculated by the median or the mean. {log} Raises ------ TypeError A TypeError is raised if either ``ccd`` or ``overscan`` are not the correct objects. Returns ------- ccd : `~ccdproc.CCDData` CCDData object with overscan subtracted Notes ----- The format of the ``fits_section`` string follow the rules for slices that are consistent with the FITS standard (v3) and IRAF usage of keywords like TRIMSEC and BIASSEC. Its indexes are one-based, instead of the python-standard zero-based, and the first index is the one that increases most rapidly as you move through the array in memory order, opposite the python ordering. The 'fits_section' argument is provided as a convenience for those who are processing files that contain TRIMSEC and BIASSEC. The preferred, more pythonic, way of specifying the overscan is to do it by indexing the data array directly with the ``overscan`` argument. Examples -------- >>> import numpy as np >>> from astropy import units as u >>> arr1 = CCDData(np.ones([100, 100]), unit=u.adu) The statement below uses all rows of columns 90 through 99 as the overscan. >>> no_scan = subtract_overscan(arr1, overscan=arr1[:, 90:100]) >>> assert (no_scan.data == 0).all() This statement does the same as the above, but with a FITS-style section. >>> no_scan = subtract_overscan(arr1, fits_section='[91:100, :]') >>> assert (no_scan.data == 0).all() Spaces are stripped out of the ``fits_section`` string. """ if not (isinstance(ccd, CCDData) or isinstance(ccd, np.ndarray)): raise TypeError('ccddata is not a CCDData or ndarray object') if ((overscan is not None and fits_section is not None) or (overscan is None and fits_section is None)): raise TypeError('Specify either overscan or fits_section, but not both') if (overscan is not None) and (not isinstance(overscan, CCDData)): raise TypeError('overscan is not a CCDData object') if (fits_section is not None) and not isinstance(fits_section, six.string_types): raise TypeError('overscan is not a string') if fits_section is not None: overscan = ccd[slice_from_string(fits_section, fits_convention=True)] if median: oscan = np.median(overscan.data, axis=overscan_axis) else: oscan = np.mean(overscan.data, axis=overscan_axis) if model is not None: of = fitting.LinearLSQFitter() yarr = np.arange(len(oscan)) oscan = of(model, yarr, oscan) oscan = oscan(yarr) if overscan_axis == 1: oscan = np.reshape(oscan, (oscan.size, 1)) else: oscan = np.reshape(oscan, (1, oscan.size)) else: if overscan_axis == 1: oscan = np.reshape(oscan, oscan.shape + (1,)) else: oscan = np.reshape(oscan, (1,) + oscan.shape) subtracted = ccd.copy() # subtract the overscan subtracted.data = ccd.data - oscan return subtracted @log_to_metadata def trim_image(ccd, fits_section=None): """ Trim the image to the dimensions indicated. Parameters ---------- ccd : `~ccdproc.CCDData` CCD image to be trimmed, sliced if desired. fits_section : str Region of ``ccd`` from which the overscan is extracted; see `~ccdproc.subtract_overscan` for details. {log} Returns ------- trimmed_ccd : `~ccdproc.CCDData` Trimmed image. Examples -------- Given an array that is 100x100, >>> import numpy as np >>> from astropy import units as u >>> arr1 = CCDData(np.ones([100, 100]), unit=u.adu) the syntax for trimming this to keep all of the first index but only the first 90 rows of the second index is >>> trimmed = trim_image(arr1[:, :90]) >>> trimmed.shape (100, 90) >>> trimmed.data[0, 0] = 2 >>> arr1.data[0, 0] 1.0 This both trims *and makes a copy* of the image. Indexing the image directly does *not* do the same thing, quite: >>> not_really_trimmed = arr1[:, :90] >>> not_really_trimmed.data[0, 0] = 2 >>> arr1.data[0, 0] 2.0 In this case, ``not_really_trimmed`` is a view of the underlying array ``arr1``, not a copy. """ if fits_section is not None and not isinstance(fits_section, six.string_types): raise TypeError("fits_section must be a string.") trimmed = ccd.copy() if fits_section: python_slice = slice_from_string(fits_section, fits_convention=True) trimmed.data = trimmed.data[python_slice] if trimmed.mask is not None: trimmed.mask = trimmed.mask[python_slice] if trimmed.uncertainty is not None: trimmed.uncertainty.array = trimmed.uncertainty.array[python_slice] return trimmed @log_to_metadata def subtract_bias(ccd, master): """ Subtract master bias from image. Parameters ---------- ccd : `~ccdproc.CCDData` Image from which bias will be subtracted master : `~ccdproc.CCDData` Master image to be subtracted from ``ccd`` {log} Returns ------- result : `~ccdproc.CCDData` CCDData object with bias subtracted """ result = ccd.subtract(master) result.meta = ccd.meta.copy() return result @log_to_metadata def subtract_dark(ccd, master, dark_exposure=None, data_exposure=None, exposure_time=None, exposure_unit=None, scale=False): """ Subtract dark current from an image. Parameters ---------- ccd : `~ccdproc.CCDData` Image from which dark will be subtracted master : `~ccdproc.CCDData` Dark image dark_exposure : `~astropy.units.Quantity` Exposure time of the dark image; if specified, must also provided ``data_exposure``. data_exposure : `~astropy.units.Quantity` Exposure time of the science image; if specified, must also provided ``dark_exposure``. exposure_time : str or `~ccdproc.Keyword` Name of key in image metadata that contains exposure time. exposure_unit : `~astropy.units.Unit` Unit of the exposure time if the value in the meta data does not include a unit. {log} Returns ------- result : `~ccdproc.CCDData` Dark-subtracted image """ if not (isinstance(ccd, CCDData) and isinstance(master, CCDData)): raise TypeError("ccd and master must both be CCDData objects") if (data_exposure is not None and dark_exposure is not None and exposure_time is not None): raise TypeError("Specify either exposure_time or " "(dark_exposure and data_exposure), not both.") if data_exposure is None and dark_exposure is None: if exposure_time is None: raise TypeError("Must specify either exposure_time or both " "dark_exposure and data_exposure.") if isinstance(exposure_time, Keyword): data_exposure = exposure_time.value_from(ccd.header) dark_exposure = exposure_time.value_from(master.header) else: data_exposure = ccd.header[exposure_time] dark_exposure = master.header[exposure_time] if not (isinstance(dark_exposure, Quantity) and isinstance(data_exposure, Quantity)): if exposure_time: try: data_exposure *= exposure_unit dark_exposure *= exposure_unit except TypeError: raise TypeError("Must provide unit for exposure time") else: raise TypeError("exposure times must be astropy.units.Quantity " "objects") if scale: master_scaled = master.copy() # data_exposure and dark_exposure are both quantities, # so we can just have subtract do the scaling master_scaled = master_scaled.multiply(data_exposure / dark_exposure) result = ccd.subtract(master_scaled) else: result = ccd.subtract(master) result.meta = ccd.meta.copy() return result @log_to_metadata def gain_correct(ccd, gain, gain_unit=None): """Correct the gain in the image. Parameters ---------- ccd : `~ccdproc.CCDData` Data to have gain corrected gain : `~astropy.units.Quantity` or `~ccdproc.Keyword` gain value for the image expressed in electrons per adu gain_unit : `~astropy.units.Unit`, optional Unit for the ``gain``; used only if ``gain`` itself does not provide units. {log} Returns ------- result : `~ccdproc.CCDData` CCDData object with gain corrected """ if isinstance(gain, Keyword): gain_value = gain.value_from(ccd.header) elif isinstance(gain, numbers.Number) and gain_unit is not None: gain_value = gain * u.Unit(gain_unit) else: gain_value = gain result = ccd.multiply(gain_value) return result @log_to_metadata def flat_correct(ccd, flat, min_value=None): """Correct the image for flat fielding. The flat field image is normalized by its mean before flat correcting. Parameters ---------- ccd : `~ccdproc.CCDData` Data to be flatfield corrected flat : `~ccdproc.CCDData` Flatfield to apply to the data min_value : None or float Minimum value for flat field. The value can either be None and no minimum value is applied to the flat or specified by a float which will replace all values in the flat by the min_value. {log} Returns ------- ccd : `~ccdproc.CCDData` CCDData object with flat corrected """ # Use the min_value to replace any values in the flat use_flat = flat if min_value is not None: flat_min = flat.copy() flat_min.data[flat_min.data < min_value] = min_value use_flat = flat_min # divide through the flat flat_corrected = ccd.divide(use_flat) # multiply by the mean of the flat flat_corrected = flat_corrected.multiply(use_flat.data.mean() * use_flat.unit) flat_corrected.meta = ccd.meta.copy() return flat_corrected @log_to_metadata def transform_image(ccd, transform_func, **kwargs): """Transform the image Using the function specified by transform_func, the transform will be applied to data, uncertainty, and mask in ccd. Parameters ---------- ccd : `~ccdproc.CCDData` Data to be flatfield corrected transform_func : function Function to be used to transform the data kwargs: dict Dictionary of arguments to be used by the transform_func. {log} Returns ------- ccd : `~ccdproc.CCDData` A transformed CCDData object Notes ----- At this time, transform will be applied to the uncertainy data but it will only transform the data. This will not properly handle uncertainties that arise due to correlation between the pixels. These should only be geometric transformations of the images. Other methods should be used if the units of ccd need to be changed. Examples -------- Given an array that is 100x100, >>> import numpy as np >>> from astropy import units as u >>> arr1 = CCDData(np.ones([100, 100]), unit=u.adu) the syntax for transforming the array using scipy.ndimage.interpolation.shift >>> from scipy.ndimage.interpolation import shift >>> from ccdproc import transform_image >>> transformed = transform_image(arr1, shift, shift=(5.5, 8.1)) """ # check that it is a ccddata object if not (isinstance(ccd, CCDData)): raise TypeError('ccd is not a CCDData') # check that transform is a callable function if not hasattr(transform_func, '__call__'): raise TypeError('transform is not a function') # make a copy of the object nccd = ccd.copy() # transform the image plane nccd.data = transform_func(nccd.data, **kwargs) # transform the uncertainty plane if it exists if nccd.uncertainty is not None: nccd.uncertainty.array = transform_func(nccd.uncertainty.array, **kwargs) # transform the mask plane if nccd.mask is not None: mask = transform_func(nccd.mask, **kwargs) nccd.mask = (mask > 0) return nccd def sigma_func(arr): """ Robust method for calculating the deviation of an array. ``sigma_func`` uses the median absolute deviation to determine the standard deviation. Parameters ---------- arr : `~ccdproc.CCDData` or `~numpy.ndarray` Array whose deviation is to be calculated. Returns ------- float standard deviation of array """ return 1.4826 * stats.median_absolute_deviation(arr) def setbox(x, y, mbox, xmax, ymax): """Create a box of length mbox around a position x,y. If the box will be out of [0,len] then reset the edges of the box to be within the boundaries Parameters ---------- x : int Central x-position of box y : int Central y-position of box mbox : int Width of box xmax : int Maximum x value ymax : int Maximum y value Returns ------- x1 : int Lower x corner of box x2 : int Upper x corner of box y1 : int Lower y corner of box y2 : int Upper y corner of box """ mbox = max(int(0.5 * mbox), 1) y1 = max(0, y - mbox) y2 = min(y + mbox + 1, ymax - 1) x1 = max(0, x - mbox) x2 = min(x + mbox + 1, xmax - 1) return x1, x2, y1, y2 def background_deviation_box(data, bbox): """ Determine the background deviation with a box size of bbox. The algorithm steps through the image and calculates the deviation within each box. It returns an array with the pixels in each box filled with the deviation value. Parameters ---------- data : `~numpy.ndarray` or `~numpy.ma.MaskedArray` Data to measure background deviation bbox : int Box size for calculating background deviation Raises ------ ValueError A value error is raised if bbox is less than 1 Returns ------- background : `~numpy.ndarray` or `~numpy.ma.MaskedArray` An array with the measured background deviation in each pixel """ # Check to make sure the background box is an appropriate size # If it is too small, then insufficient statistics are generated if bbox < 1: raise ValueError('bbox must be greater than 1') # make the background image barr = data * 0.0 + data.std() ylen, xlen = data.shape for i in range(int(0.5 * bbox), xlen, bbox): for j in range(int(0.5 * bbox), ylen, bbox): x1, x2, y1, y2 = setbox(i, j, bbox, xlen, ylen) barr[y1:y2, x1:x2] = sigma_func(data[y1:y2, x1:x2]) return barr def background_deviation_filter(data, bbox): """ Determine the background deviation for each pixel from a box with size of bbox. Parameters ---------- data : `~numpy.ndarray` Data to measure background deviation bbox : int Box size for calculating background deviation Raises ------ ValueError A value error is raised if bbox is less than 1 Returns ------- background : `~numpy.ndarray` or `~numpy.ma.MaskedArray` An array with the measured background deviation in each pixel """ # Check to make sure the background box is an appropriate size if bbox < 1: raise ValueError('bbox must be greater than 1') return ndimage.generic_filter(data, sigma_func, size=(bbox, bbox)) def rebin(ccd, newshape): """ Rebin an array to have a new shape. Parameters ---------- data : `~ccdproc.CCDData` or `~numpy.ndarray` Data to rebin newshape : tuple Tuple containing the new shape for the array Returns ------- output : `~ccdproc.CCDData` or `~numpy.ndarray` An array with the new shape. It will have the same type as the input object. Raises ------ TypeError A type error is raised if data is not an `~numpy.ndarray` or `~ccdproc.CCDData` ValueError A value error is raised if the dimenisions of new shape is not equal to data Notes ----- This is based on the scipy cookbook for rebinning: http://wiki.scipy.org/Cookbook/Rebinning If rebinning a CCDData object to a smaller shape, the masking and uncertainty are not handled correctly. Examples -------- Given an array that is 100x100, >>> import numpy as np >>> from astropy import units as u >>> arr1 = CCDData(np.ones([10, 10]), unit=u.adu) the syntax for rebinning an array to a shape of (20,20) is >>> rebinned = rebin(arr1, (20,20)) """ #check to see that is in a nddata type if isinstance(ccd, np.ndarray): #check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): raise ValueError('newshape does not have the same dimensions as ccd') slices = [slice(0, old, old/new) for old, new in zip(ccd.shape, newshape)] coordinates = np.mgrid[slices] indices = coordinates.astype('i') return ccd[tuple(indices)] elif isinstance(ccd, CCDData): #check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): raise ValueError('newshape does not have the same dimensions as ccd') nccd = ccd.copy() #rebin the data plane nccd.data = rebin(nccd.data, newshape) #rebin the uncertainty plane if nccd.uncertainty is not None: nccd.uncertainty.array = rebin(nccd.uncertainty.array, newshape) #rebin the mask plane if nccd.mask is not None: nccd.mask = rebin(nccd.mask, newshape) return nccd else: raise TypeError('ccd is not an ndarray or a CCDData object') def _blkavg(data, newshape): """ Block average an array such that it has the new shape Parameters ---------- data : `~numpy.ndarray` or `~numpy.ma.MaskedArray` Data to average newshape : tuple Tuple containing the new shape for the array Returns ------- output : `~numpy.ndarray` or `~numpy.ma.MaskedArray` An array with the new shape and the average of the pixels Raises ------ TypeError A type error is raised if data is not an `numpy.ndarray` ValueError A value error is raised if the dimensions of new shape is not equal to data Notes ----- This is based on the scipy cookbook for rebinning: http://wiki.scipy.org/Cookbook/Rebinning """ #check to see that is in a nddata type if not isinstance(data, np.ndarray): raise TypeError('data is not a ndarray object') #check to see that the two arrays are going to be the same length if len(data.shape) != len(newshape): raise ValueError('newshape does not have the same dimensions as data') shape = data.shape lenShape = len(shape) factor = np.asarray(shape)/np.asarray(newshape) evList = ['data.reshape('] + \ ['newshape[%d],factor[%d],' % (i, i) for i in range(lenShape)] + \ [')'] + ['.mean(%d)' % (i + 1) for i in range(lenShape)] return eval(''.join(evList)) def cosmicray_lacosmic(ccd, error_image=None, thresh=5, fthresh=5, gthresh=1.5, b_factor=2, mbox=5, min_limit=0.01, gbox=0, rbox=0, f_conv=None): """ Identify cosmic rays through the lacosmic technique. The lacosmic technique identifies cosmic rays by identifying pixels based on a variation of the Laplacian edge detection. The algorithm is an implementation of the code describe in van Dokkum (2001) [1]_. Parameters ---------- ccd: `~ccdproc.CCDData` or `~numpy.ndarray` Data to have cosmic ray cleaned error_image : `~numpy.ndarray` Error level in the image. It should have the same shape as data as data. This is the same as the noise array in lacosmic.cl thresh : float Threshold for detecting cosmic rays. This is the same as sigmaclip in lacosmic.cl fthresh : float Threshold for differentiating compact sources from cosmic rays. This is the same as objlim in lacosmic.cl gthresh : float Threshold for checking for surrounding cosmic rays from source. This is the same as sigclip*sigfrac from lacosmic.cl b_factor : int Factor for block replication mbox : int Median box for detecting cosmic rays min_limit: float Minimum value for all pixels so as to avoid division by zero errors gbox : int Box size to grow cosmic rays. If zero, no growing will be done. rbox : int Median box for calculating replacement values. If zero, no pixels will be replaced. f_conv: `~numpy.ndarray`, optional Convolution kernal for detecting edges. The default kernel is ``np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])``. {log} Notes ----- Implementation of the cosmic ray identification L.A.Cosmic: http://www.astro.yale.edu/dokkum/lacosmic/ Returns ------- nccd : `~ccdproc.CCDData` or `~numpy.ndarray` An object of the same type as ccd is returned. If it is a `~ccdproc.CCDData`, the mask attribute will also be updated with areas identified with cosmic rays masked. nccd : `~numpy.ndarray` If an `~numpy.ndarray` is provided as ccd, a boolean ndarray with the cosmic rays identified will also be returned. References ---------- .. [1] van Dokkum, P; 2001, "Cosmic-Ray Rejection by Laplacian Edge Detection". The Publications of the Astronomical Society of the Pacific, Volume 113, Issue 789, pp. 1420-1427. doi: 10.1086/323894 Examples -------- 1. Given an numpy.ndarray object, the syntax for running cosmicrar_lacosmic would be: >>> newdata, mask = cosmicray_lacosmic(data, error_image=error_image, ... thresh=5, mbox=11, rbox=11, ... gbox=5) # doctest: +SKIP where the error is an array that is the same shape as data but includes the pixel error. This would return a data array, newdata, with the bad pixels replaced by the local median from a box of 11 pixels; and it would return a mask indicating the bad pixels. 2. Given an `~ccdproc.CCDData` object with an uncertainty frame, the syntax for running cosmicrar_lacosmic would be: >>> newccd = cosmicray_lacosmic(ccd, thresh=5, mbox=11, ... rbox=11, gbox=5) # doctest: +SKIP The newccd object will have bad pixels in its data array replace and the mask of the object will be created if it did not previously exist or be updated with the detected cosmic rays. """ if f_conv is None: f_conv = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]) if isinstance(ccd, np.ndarray): data = ccd if not isinstance(error_image, np.ndarray): raise TypeError('error_image is not a ndarray object') if data.shape != error_image.shape: raise ValueError('error_image is not the same shape as data') #set up a copy of the array and original shape shape = data.shape #rebin the data newshape = (b_factor*shape[0], b_factor*shape[1]) ldata = rebin(data, newshape) #convolve with f_conv ldata = ndimage.filters.convolve(ldata, f_conv) ldata[ldata <= 0] = 0 #return to the original binning ldata = _blkavg(ldata, shape) #median the noise image med_noise = ndimage.median_filter(error_image, size=(mbox, mbox)) #create S/N image sndata = ldata / med_noise / b_factor #remove extended objects mdata = ndimage.median_filter(sndata, size=(mbox, mbox)) sndata = sndata - mdata #select objects masks = (sndata > thresh) #remove compact bright sources fdata = ndimage.median_filter(data, size=(mbox-2, mbox-2)) fdata = fdata - ndimage.median_filter(data, size=(mbox+2, mbox+2)) fdata = fdata / med_noise # set a minimum value for all pixels so no divide by zero problems fdata[fdata < min_limit] = min_limit fdata = sndata * masks / fdata #make the list of cosmic rays crarr = masks * (fdata > fthresh) #check any of the neighboring pixels gdata = sndata * ndimage.filters.maximum_filter(crarr, size=(3, 3)) crarr = crarr * (gdata > gthresh) # grow the pixels if gbox > 0: crarr = ndimage.maximum_filter(crarr, gbox) #replace bad pixels in the image ndata = data.copy() if rbox > 0: maskdata = np.ma.masked_array(data, crarr) mdata = ndimage.median_filter(maskdata, rbox) ndata[crarr == 1] = mdata[crarr == 1] return ndata, crarr elif isinstance(ccd, CCDData): #set up the error image if error_image is None and ccd.uncertainty is not None: error_image = ccd.uncertainty.array if ccd.data.shape != error_image.shape: raise ValueError('error_image is not the same shape as data') data, crarr = cosmicray_lacosmic(ccd.data, error_image=error_image, thresh=thresh, fthresh=fthresh, gthresh=gthresh, b_factor=b_factor, mbox=mbox, min_limit=min_limit, gbox=gbox, rbox=rbox, f_conv=f_conv) #create the new ccd data object nccd = ccd.copy() nccd.data = data if nccd.mask is None: nccd.mask = crarr else: nccd.mask = nccd.mask + crarr return nccd else: raise TypeError('ccddata is not a CCDData or ndarray object') def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): """ Identify cosmic rays through median technique. The median technique identifies cosmic rays by identifying pixels by subtracting a median image from the initial data array. Parameters ---------- ccd : `~ccdproc.CCDData` or numpy.ndarray or numpy.MaskedArary Data to have cosmic ray cleaned thresh : float Threshold for detecting cosmic rays error_image : None, float, or `~numpy.ndarray` Error level. If None, the task will use the standard deviation of the data. If an ndarray, it should have the same shape as data. mbox : int Median box for detecting cosmic rays gbox : int Box size to grow cosmic rays. If zero, no growing will be done. rbox : int Median box for calculating replacement values. If zero, no pixels will be replaced. {log} Notes ----- Similar implementation to crmedian in iraf.imred.crutil.crmedian Returns ------- nccd : `~ccdproc.CCDData` or `~numpy.ndarray` An object of the same type as ccd is returned. If it is a `~ccdproc.CCDData`, the mask attribute will also be updated with areas identified with cosmic rays masked. nccd : `~numpy.ndarray` If an `~numpy.ndarray` is provided as ccd, a boolean ndarray with the cosmic rays identified will also be returned. Examples -------- 1) Given an numpy.ndarray object, the syntax for running cosmicray_median would be: >>> newdata, mask = cosmicray_median(data, error_image=error, ... thresh=5, mbox=11, ... rbox=11, gbox=5) # doctest: +SKIP where error is an array that is the same shape as data but includes the pixel error. This would return a data array, newdata, with the bad pixels replaced by the local median from a box of 11 pixels; and it would return a mask indicating the bad pixels. 2) Given an `~ccdproc.CCDData` object with an uncertainty frame, the syntax for running cosmicray_median would be: >>> newccd = cosmicray_median(ccd, thresh=5, mbox=11, ... rbox=11, gbox=5) # doctest: +SKIP The newccd object will have bad pixels in its data array replace and the mask of the object will be created if it did not previously exist or be updated with the detected cosmic rays. """ if isinstance(ccd, np.ndarray): data = ccd if error_image is None: error_image = data.std() else: if not isinstance(error_image, (float, np.ndarray)): raise TypeError('error_image is not a float or ndarray') # create the median image marr = ndimage.median_filter(data, size=(mbox, mbox)) # Only look at the data array if isinstance(data, np.ma.MaskedArray): data = data.data # Find the residual image rarr = (data - marr) / error_image # identify all sources crarr = (rarr > thresh) # grow the pixels if gbox > 0: crarr = ndimage.maximum_filter(crarr, gbox) #replace bad pixels in the image ndata = data.copy() if rbox > 0: data = np.ma.masked_array(data, (crarr == 1)) mdata = ndimage.median_filter(data, rbox) ndata[crarr == 1] = mdata[crarr == 1] return ndata, crarr elif isinstance(ccd, CCDData): #set up the error image if error_image is None and ccd.uncertainty is not None: error_image = ccd.uncertainty.array if ccd.data.shape != error_image.shape: raise ValueError('error_image is not the same shape as data') data, crarr = cosmicray_median(ccd.data, error_image=error_image, thresh=thresh, mbox=mbox, gbox=gbox, rbox=rbox) #create the new ccd data object nccd = ccd.copy() nccd.data = data if nccd.mask is None: nccd.mask = crarr else: nccd.mask = nccd.mask + crarr return nccd else: raise TypeError('ccd is not an ndarray or a CCDData object') class Keyword(object): """ """ def __init__(self, name, unit=None, value=None): self._name = name self._unit = unit self.value = value @property def name(self): return self._name @property def unit(self): return self._unit @property def value(self): return self._value @value.setter def value(self, value): if value is None: self._value = value elif isinstance(value, Quantity): self._unit = value.unit self._value = value elif isinstance(value, six.string_types): if self.unit is not None: raise ValueError("Keyword with a unit cannot have a " "string value.") else: self._value = value else: if self.unit is None: raise ValueError("No unit provided. Set value with " "an astropy.units.Quantity") self._value = value * self.unit def value_from(self, header): """ Set value of keyword from FITS header Parameters ---------- header : `~astropy.io.fits.Header` FITS header containing a value for this keyword """ value_from_header = header[self.name] self.value = value_from_header return self.value
# -*- coding:utf-8 -*- __author__ = 'chenjun' import theano import theano.tensor as T from rnn import GRU, ReLU from optimizer import Optimizer from utils.util import * class Encoder(object): """ seq2seq encoder. """ def __init__(self, rng, embedding, hidden_size, num_layers=1): """ model init. :param rng: np random with seed. :param embedding: encoder embedding :param hidden_size: hidden_size for gru. :param num_layers: num of layers. """ self.embedding = embedding self.num_layers = num_layers self.hidden_size = hidden_size self.gru_layer = GRU(rng, hidden_size, hidden_size) self.params = [] self.params += self.gru_layer.params def __call__(self, inputs, mask, h=None): """ encoder using gru layer :param inputs: inputs word indices. (batch_size, max_length). :param mask: mask for input. :param h: final state :return: """ out = self.embedding[inputs.flatten()].reshape((inputs.shape[0], inputs.shape[1], self.hidden_size)) for i in xrange(self.num_layers): out, h = self.gru_layer(out, mask, h) return out, h class Decoder(object): """ seq2seq decoder. """ def __init__(self, rng, embedding, vocab_size, hidden_size, max_length, num_layers=1): """ model init :param rng: np random with seed. :param embedding: decoder embedding :param vocab_size: target vocab_size :param hidden_size: hidden size for gru layer :param max_length: sequence max length :param num_layers: num of layers """ self.embedding = embedding self.hidden_size = hidden_size self.vocab_size = vocab_size self.num_layers = num_layers self.max_length = max_length self.gru_layer = GRU(rng, hidden_size, hidden_size) self.linear = theano.shared(value=(rng.randn(hidden_size, vocab_size) * 0.1).astype(theano.config.floatX), name="linear", borrow=True) self.params = [self.linear] self.params += self.gru_layer.params def __call__(self, inputs, mask, h): """ decoder using gru layer :param inputs: input :param mask: mask :param h: final state :return: """ output = self.embedding[inputs.flatten()].reshape((-1, 1, self.hidden_size)) # batch*1*hidden_size for i in xrange(self.num_layers): output = ReLU(output) output, h = self.gru_layer(output, mask, h) output = T.tensordot(output, self.linear, axes=[2, 0]) # b*1*hidden_size hidden_size*vocab_size return output, h # b*1*vocab_size(unscaled), b*hidden_size class AttentionDecoder(object): """ seq2seq decoder with soft-attention. """ def __init__(self, rng, embedding, vocab_size, hidden_size, max_length, num_layers=1): """ model init :param rng: np random with seed. :param embedding: decoder embedding :param vocab_size: target vocab_size :param hidden_size: hidden size for gru layer :param max_length: sequence max length :param num_layers: num of layers """ self.embedding = embedding self.hidden_size = hidden_size self.vocab_size = vocab_size self.num_layers = num_layers self.max_length = max_length self.gru_layer = GRU(rng, hidden_size, hidden_size) # params self.attn_W = theano.shared(value=(rng.randn(hidden_size*2, max_length) * 0.1).astype(theano.config.floatX), name="attn_W", borrow=True) self.attn_b = theano.shared(value=np.zeros(shape=(max_length, ), dtype=theano.config.floatX), name="attn_W", borrow=True) self.attn_combine_W = theano.shared(value=(rng.randn(hidden_size*2, hidden_size) * 0.1).astype(theano.config.floatX), name="attn_c_w", borrow=True) self.attn_combine_b = theano.shared(value=np.zeros(shape=(hidden_size, ), dtype=theano.config.floatX), name="attn_c_b", borrow=True) self.linear = theano.shared(value=(rng.randn(hidden_size, vocab_size) * 0.1).astype(theano.config.floatX), name="linear", borrow=True) self.params = [self.attn_W, self.attn_b, self.attn_combine_W, self.attn_combine_b, self.linear] self.params += self.gru_layer.params def linear_func(self, x, w, b): """ linear function, (x * W + b) :param x: input :param w: w param :param b: bias :return: """ linear_out = T.dot(x, w) + b return linear_out def __call__(self, inputs, mask, h, encoder_outputs): """ decoder using gru layer :param inputs: input word indices, (batch_size, 1) :param mask: mask for inputs, (batch_size, 1) :param h: final state, (batch_size, hidden_size) :param encoder_outputs: output of encoder, (batch_size, max_length, hidden_size) :return: """ embedded = self.embedding[inputs.flatten()].reshape((-1, self.hidden_size)) # batch*hidden_size attn_weights = T.nnet.softmax(self.linear_func(T.concatenate([embedded, h], 1), self.attn_W, self.attn_b)) # batch*(hidden_size*2)-> batch * max_length attn_weights = attn_weights.reshape((-1, 1, self.max_length)) attn_applied = T.batched_dot(attn_weights, encoder_outputs) # batch*1*max_length * batch*max_length*hidden_size -> batch*1*hidden_size output = T.concatenate([embedded, attn_applied[:, 0, :]], 1) # b*(hidden_size*2) output = self.linear_func(output, self.attn_combine_W, self.attn_combine_b) # b*hidden_size output = output.reshape((-1, 1, self.hidden_size)) for i in xrange(self.num_layers): output = ReLU(output) output, h = self.gru_layer(output, mask, h) output = T.tensordot(output, self.linear, axes=[2, 0]) return output, h, attn_weights # b*1*vocab_size(unscaled), b*hidden_size, b*max_length class Seq2SeqTranslate(object): """ seq2seq model for translate. """ def __init__(self, rng, vocab_size_src, vocab_size_tar, hidden_size, max_length, num_layers=1, learning_rate=0.1): """ seq2seq model init. :param rng: np random with seed :param vocab_size_src: source vocab size :param vocab_size_tar: target vocab size :param hidden_size: hidden size for gru layer :param max_length: sequence max length :param num_layers: num of layers :param learning_rate: learning rate for updates """ # embedding self.src_embedding = theano.shared( name="src_embedding", value=(rng.randn(vocab_size_src, hidden_size) * 0.1).astype( theano.config.floatX), borrow=True ) self.tar_embedding = theano.shared( name="tar_embedding", value=(rng.randn(vocab_size_tar, hidden_size) * 0.1).astype( theano.config.floatX), borrow=True ) # encoder & decoder & params self.encoder = Encoder(rng, self.src_embedding, hidden_size, num_layers) self.decoder = AttentionDecoder(rng, self.tar_embedding, vocab_size_tar, hidden_size, max_length, num_layers) self.opt = Optimizer() self.params = [self.src_embedding, self.tar_embedding] self.params += self.encoder.params self.params += self.decoder.params # place_holder self.encoder_inputs = T.lmatrix(name='encoder_input') self.encoder_mask = T.lmatrix(name='encoder_mask') self.decoder_inputs = T.lmatrix(name='decoder_input') # SOS + target = teacher force self.decoder_mask = T.lmatrix(name='decoder_mask') self.decoder_target = T.lmatrix(name='decoder_target') # seq2seq train encoder_outputs, encoder_h = self.encoder(self.encoder_inputs, self.encoder_mask) decoder_outputs = [] decoder_h = encoder_h for i in xrange(max_length): word_indices = self.decoder_inputs[:, i].reshape((-1, 1)) # teacher force mask = self.decoder_mask[:, i].reshape((-1, 1)) decoder_out, decoder_h, attn_w = self.decoder(word_indices, mask, decoder_h, encoder_outputs) decoder_outputs.append(decoder_out) decoder_outputs = T.concatenate(decoder_outputs, 1) # batch_size * max_length * hidden_size softmax_outputs, _ = theano.scan(fn=lambda x: T.nnet.softmax(x), sequences=decoder_outputs) batch_cost, _ = theano.scan(fn=self.NLLLoss, sequences=[softmax_outputs, self.decoder_target, self.decoder_mask]) cost = batch_cost.sum() / self.decoder_mask.sum() updates = self.opt.RMSprop(self.params, cost, learning_rate) self.train_model = theano.function( inputs=[self.encoder_inputs, self.encoder_mask, self.decoder_inputs, self.decoder_mask, self.decoder_target], outputs=cost, updates=updates ) # seq2seq generate generate_outputs = [] decoder_h = encoder_h word_indices = self.decoder_inputs[:, 0].reshape((-1, 1)) # SOS=1 mask = word_indices # 1 for i in xrange(max_length): decoder_out, decoder_h, attn_w = self.decoder(word_indices, mask, decoder_h, encoder_outputs) # predict softmax_out = T.nnet.softmax(decoder_out[:, 0, :]) word_indices = T.cast(T.argmax(softmax_out, -1), dtype="int64") generate_outputs.append(word_indices) generate_outputs = T.concatenate(generate_outputs, 0) self.generate_model = theano.function( inputs=[self.encoder_inputs, self.encoder_mask, self.decoder_inputs], outputs=generate_outputs ) def NLLLoss(self, pred, y, m): """ negative log likelihood loss. :param pred: predict label :param y: true label :param m: mask :return: """ return - (m * T.log(pred)[T.arange(y.shape[0]), y]) def train(self, encoder_inputs, encoder_mask, decoder_inputs, decoder_mask, decoder_target): """ model train. :param encoder_inputs: :param encoder_mask: :param decoder_inputs: :param decoder_mask: :param decoder_target: :return: """ return self.train_model(encoder_inputs, encoder_mask, decoder_inputs, decoder_mask, decoder_target) def generate(self, encoder_inputs, encoder_mask): """ model generate :param encoder_inputs: :param encoder_mask: :return: """ decoder_inputs = np.asarray([[SOS_token]], dtype="int64") if encoder_inputs.ndim == 1: encoder_inputs = encoder_inputs.reshape((1, -1)) encoder_mask = encoder_mask.reshape((1, -1)) rez = self.generate_model(encoder_inputs, encoder_mask, decoder_inputs) return rez class Seq2SeqChatBot(object): """ seq2seq model for chatbot. """ def __init__(self, rng, vocab_size, hidden_size, max_length, num_layers=1, learning_rate=0.1): """ seq2seq model init. :param rng: np random with seed :param vocab_size: vocab size :param hidden_size: hidden size for gru layer :param max_length: sequence max length :param num_layers: num of layers :param learning_rate: learning rate for updates """ # embedding self.embedding = theano.shared( name="embedding", value=(rng.randn(vocab_size, hidden_size) * 0.1).astype( theano.config.floatX), borrow=True ) # encoder & decoder & params self.encoder = Encoder(rng, self.embedding, hidden_size, num_layers) self.decoder = AttentionDecoder(rng, self.embedding, vocab_size, hidden_size, max_length, num_layers) self.opt = Optimizer() self.params = [self.embedding] self.params += self.encoder.params self.params += self.decoder.params # place_holder self.encoder_inputs = T.lmatrix(name='encoder_input') self.encoder_mask = T.lmatrix(name='encoder_mask') self.decoder_inputs = T.lmatrix(name='decoder_input') # SOS + target = teacher force self.decoder_mask = T.lmatrix(name='decoder_mask') self.decoder_target = T.lmatrix(name='decoder_target') # seq2seq train encoder_outputs, encoder_h = self.encoder(self.encoder_inputs, self.encoder_mask) decoder_outputs = [] decoder_h = encoder_h for i in xrange(max_length): word_indices = self.decoder_inputs[:, i].reshape((-1, 1)) # teacher force mask = self.decoder_mask[:, i].reshape((-1, 1)) decoder_out, decoder_h, attn_w = self.decoder(word_indices, mask, decoder_h, encoder_outputs) decoder_outputs.append(decoder_out) decoder_outputs = T.concatenate(decoder_outputs, 1) # batch_size * max_length * hidden_size softmax_outputs, _ = theano.scan(fn=lambda x: T.nnet.softmax(x), sequences=decoder_outputs) batch_cost, _ = theano.scan(fn=self.NLLLoss, sequences=[softmax_outputs, self.decoder_target, self.decoder_mask]) cost = batch_cost.sum() / self.decoder_mask.sum() updates = self.opt.RMSprop(self.params, cost, learning_rate) self.train_model = theano.function( inputs=[self.encoder_inputs, self.encoder_mask, self.decoder_inputs, self.decoder_mask, self.decoder_target], outputs=cost, updates=updates ) # seq2seq generate generate_outputs = [] decoder_h = encoder_h word_indices = self.decoder_inputs[:, 0].reshape((-1, 1)) # SOS=1 mask = word_indices # 1 for i in xrange(max_length): decoder_out, decoder_h, attn_w = self.decoder(word_indices, mask, decoder_h, encoder_outputs) # predict softmax_out = T.nnet.softmax(decoder_out[:, 0, :]) word_indices = T.cast(T.argmax(softmax_out, -1), dtype="int64") generate_outputs.append(word_indices) generate_outputs = T.concatenate(generate_outputs, 0) self.generate_model = theano.function( inputs=[self.encoder_inputs, self.encoder_mask, self.decoder_inputs], outputs=generate_outputs ) def NLLLoss(self, pred, y, m): """ negative log likelihood loss. :param pred: predict label :param y: true label :param m: mask :return: """ return - (m * T.log(pred)[T.arange(y.shape[0]), y]) def train(self, encoder_inputs, encoder_mask, decoder_inputs, decoder_mask, decoder_target): """ model train. :param encoder_inputs: :param encoder_mask: :param decoder_inputs: :param decoder_mask: :param decoder_target: :return: """ return self.train_model(encoder_inputs, encoder_mask, decoder_inputs, decoder_mask, decoder_target) def generate(self, encoder_inputs, encoder_mask): """ model generate :param encoder_inputs: :param encoder_mask: :return: """ decoder_inputs = np.asarray([[SOS_token]], dtype="int64") if encoder_inputs.ndim == 1: encoder_inputs = encoder_inputs.reshape((1, -1)) encoder_mask = encoder_mask.reshape((1, -1)) rez = self.generate_model(encoder_inputs, encoder_mask, decoder_inputs) return rez
__author__ = 'Neil Butcher' from PyQt4 import QtCore from PyQt4.QtCore import pyqtSignal, QObject class UndefinedRoleError(Exception): def __init__(self, value): self.value = value def __str__(self): return 'This is an invalid role ', self.value def role(role_code_desc): if role_code_desc in GlobalRoleList.roles: return role_code_desc try: return GlobalRoleList.role_from_code(role_code_desc) except UndefinedRoleError: try: return GlobalRoleList.role_from_desc(role_code_desc) except UndefinedRoleError: return None def role_from_code(code): return GlobalRoleList.role_from_code(code) def role_from_desc(desc): return GlobalRoleList.role_from_desc(desc) def roles_from_codes(codes): return map((lambda code: role_from_code(code)), codes.split()) class Role(QObject): compatibilitiesChanged = pyqtSignal() descriptionChanged = pyqtSignal(str) priorityChanged = pyqtSignal(int) def strip(self): return self.code.strip() def __str__(self): return self.description def equals(self, a_role): if self == a_role: return True if self.code == a_role.strip(): return True return self.code == a_role.code.strip() def __init__(self, desc, code, pri, parent=None): super(Role, self).__init__(parent) self._description = desc self.code = code self._priority = pri self.compatibilities = RoleList('', self) self.compatibilities.rolesChanged.connect(self._compatibilities_changed) @property def description(self): return self._description @description.setter def description(self, value): self._description = value self.descriptionChanged.emit(value) @property def priority(self): return self._priority @priority.setter def priority(self, value): self._priority = value self.priorityChanged.emit(value) @QtCore.pyqtSlot() def _compatibilities_changed(self): self.compatibilitiesChanged.emit() def compatible_with(self, role2): return self.compatibilities.includes(role2) class GlobalRoleListClass(QObject): roles = [] roleAdded = pyqtSignal(str) roleRemoved = pyqtSignal(str) rolesCleared = pyqtSignal() rolesChanged = pyqtSignal() def clear(self): self.rolesCleared.emit() self.roles = [] self.rolesChanged.emit() def add_role(self, new_role): self.roles.append(new_role) new_role.descriptionChanged.connect(self._emit_roles_changed) self.roleAdded.emit(new_role.code) self.rolesChanged.emit() def remove_role(self, removed_role): removed_role.descriptionChanged.disconnect(self._emit_roles_changed) self.roles.remove(removed_role) self.roleRemoved.emit(removed_role.code) self.rolesChanged.emit() def _emit_roles_changed(self): self.rolesChanged.emit() def role_from_code(self, code): stripped_code = code.strip() possibilities = filter((lambda a: a.code == stripped_code), self.roles) if len(possibilities) == 0: raise UndefinedRoleError(code) else: return possibilities[0] def role_from_desc(self, desc): desc = desc.strip() possibilities = filter((lambda a: a.description == desc), self.roles) if len(possibilities) == 0: raise UndefinedRoleError(desc) else: return possibilities[0] def new_code(self): for i in range(len(self.roles) + 1): a = str(i) try: self.role_from_code(a) except UndefinedRoleError: return a GlobalRoleList = GlobalRoleListClass() class RoleList(QObject): rolesChanged = pyqtSignal() def __init__(self, args='', parent=None): super(RoleList, self).__init__(parent) self.roles = set() self.populate_from_codes(args) GlobalRoleList.rolesCleared.connect(self.none) GlobalRoleList.roleRemoved.connect(self.remove_code) GlobalRoleList.roleAdded.connect(self.rolesChanged) GlobalRoleList.rolesChanged.connect(self.rolesChanged) def all(self): self.roles = set() self.roles.update(GlobalRoleList.roles) self.rolesChanged.emit() @QtCore.pyqtSlot() def none(self): self.roles = set() self.rolesChanged.emit() def role_from_code(self, code): possibilities = filter((lambda a: a.code in code), self.roles) if len(possibilities) == 0: raise UndefinedRoleError(code) else: return possibilities.pop() def role_from_desc(self, desc): possibilities = filter((lambda a: a.description in desc), self.roles) if len(possibilities) == 0: raise UndefinedRoleError(desc) else: return possibilities.pop() def includes(self, role_or_code): if role_or_code in GlobalRoleList.roles: return role_or_code in self.roles try: self.role_from_code(role_or_code) except UndefinedRoleError: try: self.role_from_desc(role_or_code) except UndefinedRoleError: return False return True return True def add(self, a_role): if a_role not in self.roles: self.roles.add(a_role) self.rolesChanged.emit() def remove(self, a_role): if a_role in self.roles: self.roles.remove(a_role) self.rolesChanged.emit() @QtCore.pyqtSlot(str) def add_code(self, code): for a_role in GlobalRoleList.roles: if a_role.code == code.strip(): self.roles.add(a_role) self.rolesChanged.emit() @QtCore.pyqtSlot(str) def remove_code(self, code): try: r = self.role_from_code(code) except UndefinedRoleError: return self self.remove(r) def populate_from_codes(self, array_of_strings): self.roles = set() codes = array_of_strings.split() for code in codes: self.add_code(code) self.rolesChanged.emit() def list_of_codes(self): result = '' for a_role in self.roles: result = ' '.join([result, a_role.code]) return result def number_of_roles(self): return len(self.roles) def _clean(self): new_roles = filter((lambda a_role: (self.includes(a_role))), GlobalRoleList.roles) self.roles = new_roles return self GlobalRoleList.clear()
import sys import sysconfig import os import re import errno import shlex import shutil import subprocess import itertools import concurrent.futures from site import USER_SITE from glob import iglob from collections import namedtuple, deque from xml.sax.saxutils import escape from distutils import version import pkg_resources try: import docutils.core except ImportError: docutils = None from PyQt4.QtGui import ( QWidget, QDialog, QLabel, QLineEdit, QTreeView, QHeaderView, QTextBrowser, QTextOption, QDialogButtonBox, QProgressDialog, QVBoxLayout, QPalette, QStandardItemModel, QStandardItem, QSortFilterProxyModel, QItemSelectionModel, QStyle, QStyledItemDelegate, QStyleOptionViewItemV4, QApplication, QHBoxLayout ) from PyQt4.QtCore import ( Qt, QObject, QMetaObject, QEvent, QSize, QTimer, QThread, Q_ARG ) from PyQt4.QtCore import pyqtSignal as Signal, pyqtSlot as Slot from ..gui.utils import message_warning, message_information, \ message_critical as message_error from ..help.manager import get_dist_meta, trim OFFICIAL_ADDONS = [ "Orange-Bioinformatics", "Orange3-DataFusion", "Orange3-Prototypes", "Orange3-Text", "Orange3-Network", "Orange3-Associate", ] Installable = namedtuple( "Installable", ["name", "version", "summary", "description", "package_url", "release_urls"] ) ReleaseUrl = namedtuple( "ReleaseUrl", ["filename", "url", "size", "python_version", "package_type" ] ) Available = namedtuple( "Available", ["installable"] ) Installed = namedtuple( "Installed", ["installable", "local"] ) def is_updatable(item): if isinstance(item, Available): return False elif item.installable is None: return False else: inst, dist = item try: v1 = version.StrictVersion(dist.version) v2 = version.StrictVersion(inst.version) except ValueError: pass else: return v1 < v2 return (version.LooseVersion(dist.version) < version.LooseVersion(inst.version)) class TristateCheckItemDelegate(QStyledItemDelegate): """ A QStyledItemDelegate which properly toggles Qt.ItemIsTristate check state transitions on user interaction. """ def editorEvent(self, event, model, option, index): flags = model.flags(index) if not flags & Qt.ItemIsUserCheckable or \ not option.state & QStyle.State_Enabled or \ not flags & Qt.ItemIsEnabled: return False checkstate = model.data(index, Qt.CheckStateRole) if checkstate is None: return False widget = option.widget style = widget.style() if widget else QApplication.style() if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonRelease, QEvent.MouseButtonDblClick}: pos = event.pos() opt = QStyleOptionViewItemV4(option) self.initStyleOption(opt, index) rect = style.subElementRect( QStyle.SE_ItemViewItemCheckIndicator, opt, widget) if event.button() != Qt.LeftButton or not rect.contains(pos): return False if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonDblClick}: return True elif event.type() == QEvent.KeyPress: if event.key() != Qt.Key_Space and event.key() != Qt.Key_Select: return False else: return False if model.flags(index) & Qt.ItemIsTristate: checkstate = (checkstate + 1) % 3 else: checkstate = \ Qt.Unchecked if checkstate == Qt.Checked else Qt.Checked return model.setData(index, checkstate, Qt.CheckStateRole) class AddonManagerWidget(QWidget): statechanged = Signal() def __init__(self, parent=None, **kwargs): super(AddonManagerWidget, self).__init__(parent, **kwargs) self.setLayout(QVBoxLayout()) self.__header = QLabel( wordWrap=True, textFormat=Qt.RichText ) self.__search = QLineEdit( placeholderText=self.tr("Filter") ) self.layout().addWidget(self.__search) self.__view = view = QTreeView( rootIsDecorated=False, editTriggers=QTreeView.NoEditTriggers, selectionMode=QTreeView.SingleSelection, alternatingRowColors=True ) self.__view.setItemDelegateForColumn(0, TristateCheckItemDelegate()) self.layout().addWidget(view) self.__model = model = QStandardItemModel() model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"]) model.dataChanged.connect(self.__data_changed) proxy = QSortFilterProxyModel( filterKeyColumn=1, filterCaseSensitivity=Qt.CaseInsensitive ) proxy.setSourceModel(model) self.__search.textChanged.connect(proxy.setFilterFixedString) view.setModel(proxy) view.selectionModel().selectionChanged.connect( self.__update_details ) header = self.__view.header() header.setResizeMode(0, QHeaderView.Fixed) header.setResizeMode(2, QHeaderView.ResizeToContents) self.__details = QTextBrowser( frameShape=QTextBrowser.NoFrame, readOnly=True, lineWrapMode=QTextBrowser.WidgetWidth, openExternalLinks=True, ) self.__details.setWordWrapMode(QTextOption.WordWrap) palette = QPalette(self.palette()) palette.setColor(QPalette.Base, Qt.transparent) self.__details.setPalette(palette) self.layout().addWidget(self.__details) def set_items(self, items): self.__items = items model = self.__model model.clear() model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"]) for item in items: if isinstance(item, Installed): installed = True ins, dist = item name = dist.project_name summary = get_dist_meta(dist).get("Summary", "") version = ins.version if ins is not None else dist.version else: installed = False (ins,) = item dist = None name = ins.name summary = ins.summary version = ins.version updatable = is_updatable(item) item1 = QStandardItem() item1.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | (Qt.ItemIsTristate if updatable else 0)) if installed and updatable: item1.setCheckState(Qt.PartiallyChecked) elif installed: item1.setCheckState(Qt.Checked) else: item1.setCheckState(Qt.Unchecked) item2 = QStandardItem(name) item2.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) item2.setToolTip(summary) item2.setData(item, Qt.UserRole) if updatable: version = "{} < {}".format(dist.version, ins.version) item3 = QStandardItem(version) item3.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) item4 = QStandardItem() item4.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) model.appendRow([item1, item2, item3, item4]) self.__view.resizeColumnToContents(0) self.__view.setColumnWidth( 1, max(150, self.__view.sizeHintForColumn(1))) self.__view.setColumnWidth( 2, max(150, self.__view.sizeHintForColumn(2))) if self.__items: self.__view.selectionModel().select( self.__view.model().index(0, 0), QItemSelectionModel.Select | QItemSelectionModel.Rows ) def item_state(self): steps = [] for i, item in enumerate(self.__items): modelitem = self.__model.item(i, 0) state = modelitem.checkState() if modelitem.flags() & Qt.ItemIsTristate and state == Qt.Checked: steps.append((Upgrade, item)) elif isinstance(item, Available) and state == Qt.Checked: steps.append((Install, item)) elif isinstance(item, Installed) and state == Qt.Unchecked: steps.append((Uninstall, item)) return steps def __selected_row(self): indices = self.__view.selectedIndexes() if indices: proxy = self.__view.model() indices = [proxy.mapToSource(index) for index in indices] return indices[0].row() else: return -1 def __data_changed(self, topleft, bottomright): rows = range(topleft.row(), bottomright.row() + 1) proxy = self.__view.model() map_to_source = proxy.mapToSource for i in rows: sourceind = map_to_source(proxy.index(i, 0)) modelitem = self.__model.itemFromIndex(sourceind) actionitem = self.__model.item(modelitem.row(), 3) item = self.__items[modelitem.row()] state = modelitem.checkState() flags = modelitem.flags() if flags & Qt.ItemIsTristate and state == Qt.Checked: actionitem.setText("Update") elif isinstance(item, Available) and state == Qt.Checked: actionitem.setText("Install") elif isinstance(item, Installed) and state == Qt.Unchecked: actionitem.setText("Uninstall") else: actionitem.setText("") self.statechanged.emit() def __update_details(self): index = self.__selected_row() if index == -1: self.__details.setText("") else: item = self.__model.item(index, 1) item = item.data(Qt.UserRole) assert isinstance(item, (Installed, Available)) # if isinstance(item, Available): # self.__installed_label.setText("") # self.__available_label.setText(str(item.available.version)) # elif item.installable is not None: # self.__installed_label.setText(str(item.local.version)) # self.__available_label.setText(str(item.available.version)) # else: # self.__installed_label.setText(str(item.local.version)) # self.__available_label.setText("") text = self._detailed_text(item) self.__details.setText(text) def _detailed_text(self, item): if isinstance(item, Installed): remote, dist = item if remote is None: description = get_dist_meta(dist).get("Description") description = description else: description = remote.description else: description = item[0].description if docutils is not None: try: html = docutils.core.publish_string( trim(description), writer_name="html", settings_overrides={ "output-encoding": "utf-8", # "embed-stylesheet": False, # "stylesheet": [], # "stylesheet_path": [] } ).decode("utf-8") except docutils.utils.SystemMessage: html = "<pre>{}<pre>".format(escape(description)) except Exception: html = "<pre>{}<pre>".format(escape(description)) else: html = "<pre>{}<pre>".format(escape(description)) return html def sizeHint(self): return QSize(480, 420) def method_queued(method, sig, conntype=Qt.QueuedConnection): name = method.__name__ obj = method.__self__ assert isinstance(obj, QObject) def call(*args): args = [Q_ARG(atype, arg) for atype, arg in zip(sig, args)] return QMetaObject.invokeMethod(obj, name, conntype, *args) return call class AddonManagerDialog(QDialog): _packages = None def __init__(self, parent=None, **kwargs): super().__init__(parent, **kwargs) self.setLayout(QVBoxLayout()) self.layout().setContentsMargins(0, 0, 0, 0) self.addonwidget = AddonManagerWidget() self.layout().addWidget(self.addonwidget) info_bar = QWidget() info_layout = QHBoxLayout() info_bar.setLayout(info_layout) self.layout().addWidget(info_bar) buttons = QDialogButtonBox( orientation=Qt.Horizontal, standardButtons=QDialogButtonBox.Ok | QDialogButtonBox.Cancel ) buttons.accepted.connect(self.__accepted) buttons.rejected.connect(self.reject) self.layout().addWidget(buttons) # No system access => install into user site-packages self.user_install = not os.access(sysconfig.get_path("purelib"), os.W_OK) self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) if AddonManagerDialog._packages is None: self._f_pypi_addons = self._executor.submit(list_pypi_addons) else: self._f_pypi_addons = concurrent.futures.Future() self._f_pypi_addons.set_result(AddonManagerDialog._packages) self._f_pypi_addons.add_done_callback( method_queued(self._set_packages, (object,)) ) self.__progress = QProgressDialog( self, Qt.Sheet, minimum=0, maximum=0, labelText=self.tr("Retrieving package list"), sizeGripEnabled=False, windowTitle="Progress" ) self.__progress.rejected.connect(self.reject) self.__thread = None self.__installer = None @Slot(object) def _set_packages(self, f): if self.__progress.isVisible(): self.__progress.close() try: packages = f.result() except (IOError, OSError) as err: message_warning( "Could not retrieve package list", title="Error", informative_text=str(err), parent=self ) packages = [] except Exception: raise else: AddonManagerDialog._packages = packages installed = list_installed_addons() dists = {dist.project_name: dist for dist in installed} packages = {pkg.name: pkg for pkg in packages} # For every pypi available distribution not listed by # list_installed_addons, check if it is actually already # installed. ws = pkg_resources.WorkingSet() for pkg_name in set(packages.keys()).difference(set(dists.keys())): try: d = ws.find(pkg_resources.Requirement.parse(pkg_name)) except pkg_resources.VersionConflict: pass except ValueError: # Requirements.parse error ? pass else: if d is not None: dists[d.project_name] = d project_names = unique( itertools.chain(packages.keys(), dists.keys()) ) items = [] for name in project_names: if name in dists and name in packages: item = Installed(packages[name], dists[name]) elif name in dists: item = Installed(None, dists[name]) elif name in packages: item = Available(packages[name]) else: assert False items.append(item) self.addonwidget.set_items(items) def showEvent(self, event): super().showEvent(event) if not self._f_pypi_addons.done(): QTimer.singleShot(0, self.__progress.show) def done(self, retcode): super().done(retcode) self._f_pypi_addons.cancel() self._executor.shutdown(wait=False) if self.__thread is not None: self.__thread.quit() self.__thread.wait(1000) def closeEvent(self, event): super().closeEvent(event) self._f_pypi_addons.cancel() self._executor.shutdown(wait=False) if self.__thread is not None: self.__thread.quit() self.__thread.wait(1000) def __accepted(self): steps = self.addonwidget.item_state() if steps: # Move all uninstall steps to the front steps = sorted( steps, key=lambda step: 0 if step[0] == Uninstall else 1 ) self.__installer = Installer(steps=steps, user_install=self.user_install) self.__thread = QThread(self) self.__thread.start() self.__installer.moveToThread(self.__thread) self.__installer.finished.connect(self.__on_installer_finished) self.__installer.error.connect(self.__on_installer_error) self.__installer.installStatusChanged.connect( self.__progress.setLabelText) self.__progress.show() self.__progress.setLabelText("Installing") self.__installer.start() else: self.accept() def __on_installer_error(self, command, pkg, retcode, output): message_error( "An error occurred while running a subprocess", title="Error", informative_text="{} exited with non zero status.".format(command), details="".join(output), parent=self ) self.reject() def __on_installer_finished(self): message = ( ("Changes successfully applied in <i>{}</i>.<br>".format( USER_SITE) if self.user_install else '') + "Please restart Orange for changes to take effect.") message_information(message, parent=self) self.accept() def list_pypi_addons(): """ List add-ons available on pypi. """ from ..config import ADDON_PYPI_SEARCH_SPEC import xmlrpc.client pypi = xmlrpc.client.ServerProxy("http://pypi.python.org/pypi") addons = pypi.search(ADDON_PYPI_SEARCH_SPEC) for addon in OFFICIAL_ADDONS: if not any(a for a in addons if a['name'] == addon): versions = pypi.package_releases(addon) if versions: addons.append({"name": addon, "version": max(versions)}) multicall = xmlrpc.client.MultiCall(pypi) for addon in addons: name, version = addon["name"], addon["version"] multicall.release_data(name, version) multicall.release_urls(name, version) results = list(multicall()) release_data = results[::2] release_urls = results[1::2] packages = [] for release, urls in zip(release_data, release_urls): if release and urls: # ignore releases without actual source/wheel/egg files, # or with empty metadata (deleted from PyPi?). urls = [ReleaseUrl(url["filename"], url["url"], url["size"], url["python_version"], url["packagetype"]) for url in urls] packages.append( Installable(release["name"], release["version"], release["summary"], release["description"], release["package_url"], urls) ) return packages def list_installed_addons(): from ..config import ADDON_ENTRY workingset = pkg_resources.WorkingSet(sys.path) return [ep.dist for ep in workingset.iter_entry_points(ADDON_ENTRY)] def unique(iterable): seen = set() def observed(el): observed = el in seen seen.add(el) return observed return (el for el in iterable if not observed(el)) Install, Upgrade, Uninstall = 1, 2, 3 class Installer(QObject): installStatusChanged = Signal(str) started = Signal() finished = Signal() error = Signal(str, object, int, list) def __init__(self, parent=None, steps=[], user_install=False): QObject.__init__(self, parent) self.__interupt = False self.__queue = deque(steps) self.__user_install = user_install def start(self): QTimer.singleShot(0, self._next) def interupt(self): self.__interupt = True def setStatusMessage(self, message): self.__statusMessage = message self.installStatusChanged.emit(message) @Slot() def _next(self): def fmt_cmd(cmd): return "Command failed: python " + " ".join(map(shlex.quote, cmd)) command, pkg = self.__queue.popleft() if command == Install: inst = pkg.installable self.setStatusMessage("Installing {}".format(inst.name)) cmd = (["-m", "pip", "install"] + (["--user"] if self.__user_install else []) + [inst.name]) process = python_process(cmd, bufsize=-1, universal_newlines=True) retcode, output = self.__subprocessrun(process) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return elif command == Upgrade: inst = pkg.installable self.setStatusMessage("Upgrading {}".format(inst.name)) cmd = (["-m", "pip", "install", "--upgrade", "--no-deps"] + (["--user"] if self.__user_install else []) + [inst.name]) process = python_process(cmd, bufsize=-1, universal_newlines=True) retcode, output = self.__subprocessrun(process) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return # Why is this here twice?? cmd = (["-m", "pip", "install"] + (["--user"] if self.__user_install else []) + [inst.name]) process = python_process(cmd, bufsize=-1, universal_newlines=True) retcode, output = self.__subprocessrun(process) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return elif command == Uninstall: dist = pkg.local self.setStatusMessage("Uninstalling {}".format(dist.project_name)) cmd = ["-m", "pip", "uninstall", "--yes", dist.project_name] process = python_process(cmd, bufsize=-1, universal_newlines=True) retcode, output = self.__subprocessrun(process) if self.__user_install: # Remove the package forcefully; pip doesn't (yet) uninstall # --user packages (or any package outside sys.prefix?) # google: pip "Not uninstalling ?" "outside environment" install_path = os.path.join( USER_SITE, re.sub('[^\w]', '_', dist.project_name)) pip_record = next(iglob(install_path + '*.dist-info/RECORD'), None) if pip_record: with open(pip_record) as f: files = [line.rsplit(',', 2)[0] for line in f] else: files = [os.path.join( USER_SITE, 'orangecontrib', dist.project_name.split('-')[-1].lower()),] for match in itertools.chain(files, iglob(install_path + '*')): print('rm -rf', match) if os.path.isdir(match): shutil.rmtree(match) elif os.path.exists(match): os.unlink(match) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return if self.__queue: QTimer.singleShot(0, self._next) else: self.finished.emit() def __subprocessrun(self, process): output = [] while process.poll() is None: try: line = process.stdout.readline() except IOError as ex: if ex.errno != errno.EINTR: raise else: output.append(line) print(line, end="") # Read remaining output if any line = process.stdout.read() if line: output.append(line) print(line, end="") return process.returncode, output def python_process(args, script_name=None, cwd=None, env=None, **kwargs): """ Run a `sys.executable` in a subprocess with `args`. """ executable = sys.executable if os.name == "nt" and os.path.basename(executable) == "pythonw.exe": # Don't run the script with a 'gui' (detached) process. dirname = os.path.dirname(executable) executable = os.path.join(dirname, "python.exe") # by default a new console window would show up when executing the # script startupinfo = subprocess.STARTUPINFO() if hasattr(subprocess, "STARTF_USESHOWWINDOW"): startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW else: # This flag was missing in inital releases of 2.7 startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW kwargs["startupinfo"] = startupinfo if script_name is not None: script = script_name else: script = executable process = subprocess.Popen( [script] + args, executable=executable, cwd=cwd, env=env, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, **kwargs ) return process
# All Rights Reserved 2020 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils as osc_utils from osc_lib.utils import columns as column_util from neutronclient._i18n import _ from neutronclient.neutron import v2_0 as neutronv20 from neutronclient.osc import utils as nc_osc_utils LOG = logging.getLogger(__name__) TAP_SERVICE = 'tap_service' TAP_SERVICES = '%ss' % TAP_SERVICE path = 'taas' object_path = '/%s/' % path resource_path = '/%s/%%s/%%s' % path _attr_map = ( ('id', 'ID', column_util.LIST_BOTH), ('tenant_id', 'Tenant', column_util.LIST_LONG_ONLY), ('name', 'Name', column_util.LIST_BOTH), ('port_id', 'Port', column_util.LIST_BOTH), ('status', 'Status', column_util.LIST_BOTH), ) def _add_updatable_args(parser): parser.add_argument( '--name', help=_('Name of this Tap service.')) parser.add_argument( '--description', help=_('Description for this Tap service.')) def _updatable_args2body(parsed_args, body): neutronv20.update_dict(parsed_args, body, ['name', 'description']) class CreateTapService(command.ShowOne): _description = _("Create a tap service") def get_parser(self, prog_name): parser = super(CreateTapService, self).get_parser(prog_name) nc_osc_utils.add_project_owner_option_to_parser(parser) _add_updatable_args(parser) parser.add_argument( '--port', dest='port_id', required=True, metavar="PORT", help=_('Port to which the Tap service is connected.')) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient attrs = {} if parsed_args.name is not None: attrs['name'] = str(parsed_args.name) if parsed_args.description is not None: attrs['description'] = str(parsed_args.description) if parsed_args.port_id is not None: port_id = client.find_resource('port', parsed_args.port_id)['id'] attrs['port_id'] = port_id if 'project' in parsed_args and parsed_args.project is not None: project_id = nc_osc_utils.find_project( self.app.client_manager.identity, parsed_args.project, parsed_args.project_domain, ).id attrs['tenant_id'] = project_id body = {TAP_SERVICE: attrs} obj = client.post('%s%s' % (object_path, TAP_SERVICES), body=body)[TAP_SERVICE] columns, display_columns = column_util.get_columns(obj, _attr_map) data = osc_utils.get_dict_properties(obj, columns) return display_columns, data class ListTapService(command.Lister): _description = _("List tap services that belong to a given tenant") def get_parser(self, prog_name): parser = super(ListTapService, self).get_parser(prog_name) nc_osc_utils.add_project_owner_option_to_parser(parser) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient params = {} if parsed_args.project is not None: project_id = nc_osc_utils.find_project( self.app.client_manager.identity, parsed_args.project, parsed_args.project_domain, ).id params['tenant_id'] = project_id objs = client.list(TAP_SERVICES, '%s%s' % (object_path, TAP_SERVICES), retrieve_all=True, params=params)[TAP_SERVICES] headers, columns = column_util.get_column_definitions( _attr_map, long_listing=True) return (headers, (osc_utils.get_dict_properties( s, columns) for s in objs)) class ShowTapService(command.ShowOne): _description = _("Show information of a given tap service") def get_parser(self, prog_name): parser = super(ShowTapService, self).get_parser(prog_name) parser.add_argument( TAP_SERVICE, metavar="<%s>" % TAP_SERVICE, help=_("ID or name of tap service to look up."), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient id = client.find_resource(TAP_SERVICE, parsed_args.tap_service)['id'] obj = client.get(resource_path % (TAP_SERVICES, id))[TAP_SERVICE] columns, display_columns = column_util.get_columns(obj, _attr_map) data = osc_utils.get_dict_properties(obj, columns) return display_columns, data class DeleteTapService(command.Command): _description = _("Delete a tap service") def get_parser(self, prog_name): parser = super(DeleteTapService, self).get_parser(prog_name) parser.add_argument( TAP_SERVICE, metavar="<%s>" % TAP_SERVICE, nargs="+", help=_("ID(s) or name(s) of tap service to delete."), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient fails = 0 for id_or_name in parsed_args.tap_service: try: id = client.find_resource(TAP_SERVICE, id_or_name)['id'] client.delete(resource_path % (TAP_SERVICES, id)) LOG.warning("Tap service %(id)s deleted", {'id': id}) except Exception as e: fails += 1 LOG.error("Failed to delete tap service with name or ID " "'%(id_or_name)s': %(e)s", {'id_or_name': id_or_name, 'e': e}) if fails > 0: msg = (_("Failed to delete %(fails)s of %(total)s tap service.") % {'fails': fails, 'total': len(parsed_args.tap_service)}) raise exceptions.CommandError(msg) class UpdateTapService(command.ShowOne): _description = _("Update a tap service.") def get_parser(self, prog_name): parser = super(UpdateTapService, self).get_parser(prog_name) parser.add_argument( TAP_SERVICE, metavar="<%s>" % TAP_SERVICE, help=_("ID or name of tap service to update."), ) _add_updatable_args(parser) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient id = client.find_resource(TAP_SERVICE, parsed_args.tap_service)['id'] attrs = {} if parsed_args.name is not None: attrs['name'] = str(parsed_args.name) if parsed_args.description is not None: attrs['description'] = str(parsed_args.description) body = {TAP_SERVICE: attrs} obj = client.put(resource_path % (TAP_SERVICES, id), body)[TAP_SERVICE] columns, display_columns = column_util.get_columns(obj, _attr_map) data = osc_utils.get_dict_properties(obj, columns) return display_columns, data
# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ec2api base exception handling. Includes decorator for re-raising ec2api-type exceptions. SHOULD include dedicated exception logging. """ import sys from oslo_config import cfg from oslo_log import log as logging import six from ec2api.i18n import _ LOG = logging.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='Make exception message format errors fatal'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts) class EC2APIException(Exception): """Base EC2 API Exception To correctly use this class, inherit from it and define a 'msg_fmt' property. That msg_fmt will get printf'd with the keyword arguments provided to the constructor. """ msg_fmt = _('An unknown exception occurred.') def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: message = self.msg_fmt % kwargs except Exception: exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation for ' '%s exception'), self.__class__.__name__) for name, value in kwargs.iteritems(): LOG.error('%s: %s' % (name, value)) if CONF.fatal_exception_format_errors: raise exc_info[0], exc_info[1], exc_info[2] else: # at least get the core message out if something happened message = self.msg_fmt elif not isinstance(message, six.string_types): LOG.error(_("Message '%(msg)s' for %(ex)s exception is not " "a string"), {'msg': message, 'ex': self.__class__.__name__}) if CONF.fatal_exception_format_errors: raise TypeError(_('Invalid exception message format')) else: message = self.msg_fmt super(EC2APIException, self).__init__(message) def format_message(self): # NOTE(mrodden): use the first argument to the python Exception object # which should be our full EC2APIException message, (see __init__) return self.args[0] # Internal ec2api exceptions class EC2APIConfigNotFound(EC2APIException): msg_fmt = _("Could not find config at %(path)s") class EC2APIPasteAppNotFound(EC2APIException): msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") class EC2KeystoneDiscoverFailure(EC2APIException): msg_fmt = _("Could not discover keystone versions.") # Internal ec2api metadata exceptions class EC2MetadataException(EC2APIException): pass class EC2MetadataNotFound(EC2MetadataException): pass class EC2MetadataInvalidAddress(EC2MetadataException): pass # Intermediate exception classes to organize AWS exception hierarchy class EC2Exception(EC2APIException): """Base EC2 compliant exception To correctly use this class, inherit from it and define a 'ec2_code' property if a new class name doesn't coincide with AWS Error Code. """ code = 400 class EC2InvalidException(EC2Exception): pass class EC2IncorrectStateException(EC2Exception): pass class EC2DuplicateException(EC2InvalidException): pass class EC2ReservedException(EC2InvalidException): pass class EC2InUseException(EC2InvalidException): pass class EC2NotFoundException(EC2InvalidException): pass class EC2OverlimitException(EC2Exception): pass # AWS compliant exceptions class Unsupported(EC2Exception): msg_fmt = _("The specified request is unsupported. %(reason)s") class UnsupportedOperation(EC2Exception): msg_fmt = _('The specified request includes an unsupported operation.') class OperationNotPermitted(EC2Exception): msg_fmt = _('The specified operation is not allowed.') class InvalidRequest(EC2InvalidException): msg_fmt = _('The request received was invalid.') class InvalidAttribute(EC2InvalidException): msg_fmt = _("Attribute not supported: %(attr)s") class InvalidID(EC2InvalidException): msg_fmt = _("The ID '%(id)s' is not valid") class InvalidInput(EC2InvalidException): msg_fmt = _("Invalid input received: %(reason)s") class AuthFailure(EC2InvalidException): msg_fmt = _('Not authorized.') class ValidationError(EC2InvalidException): msg_fmt = _("%(reason)s") class MissingParameter(EC2InvalidException): msg_fmt = _("The required parameter '%(param)s' is missing") class InvalidParameter(EC2InvalidException): msg_fmt = _("The property '%(name)s' is not valid") class InvalidParameterValue(EC2InvalidException): msg_fmt = _("Value (%(value)s) for parameter %(parameter)s is invalid. " "%(reason)s") class InvalidFilter(EC2InvalidException): msg_fmt = _('The filter is invalid.') class InvalidParameterCombination(EC2InvalidException): msg_fmt = _('The combination of parameters in incorrect') class InvalidVpcRange(EC2InvalidException): ec2_code = 'InvalidVpc.Range' msg_fmt = _("The CIDR '%(cidr_block)s' is invalid, kindly input a netmask between /16 and /28. " "Please refer VPC API Reference Guide for more details.") class OverlappedVpcRange(EC2InvalidException): ec2_code = 'OverlappedVpc.Range' msg_fmt = _("This CIDR '%(new_cidr_block)s' overlaps with the existing CIDR '%(cidr_block)s' having vpc-id '%(vpc_id)s'.Please enter a non-overlapping CIDR.") class InvalidSubnetRange(EC2InvalidException): ec2_code = 'InvalidSubnet.Range' msg_fmt = _("The CIDR '%(cidr_block)s' is invalid, kindly input a netmask between /16 and /28. " "Please refer VPC API Reference Guide for more details.") class InvalidCidrRange(EC2InvalidException): ec2_code = 'InvalidCidrRange.range' msg_fmt = _("The CIDR '%(cidr_block)s' is invalid, kindly input a valid CIDR such as 172.0.0.0/16. " "Please refer VPC API Reference Guide for more details.") class OutOfVpcSubnetRange(EC2InvalidException): ec2_code = 'OutOfVpcSubnet.Range' msg_fmt = _("Invalid subnet range '%(cidr_block)s', kindly input a valid subnet within the VPC range '%(vpc_ipnet)s'. " "Please refer VPC API Reference Guide for more details.") class ReservedSubnetRange(EC2InvalidException): ec2_code = 'InvalidCidr.reserved' msg_fmt = _("The CIDR '%(cidr_block)s' is reserved, kindly input a valid CIDR such as 172.0.0.0/16. " "Please refer VPC API Reference Guide for more details.") class InvalidNetworkId(EC2InvalidException): ec2_code = 'InvalidNetworkId' msg_fmt = _("The CIDR '%(cidr_block)s' is invalid, kindly input a valid CIDR such as '%(ex_cidr_block)s'. " "Please refer VPC API Reference Guide for more details.") class InvalidSubnetConflict(EC2InvalidException): ec2_code = 'InvalidSubnet.Conflict' msg_fmt = _("The CIDR '%(cidr_block)s' conflicts with another subnet.") class InvalidInstanceId(EC2InvalidException): ec2_code = 'InvalidInstanceID' msg_fmt = _("There are multiple interfaces attached to instance " "'%(instance_id)s'. Please specify an interface ID for " "the operation instead.") class InvalidSnapshotIDMalformed(EC2InvalidException): ec2_code = 'InvalidSnapshotID.Malformed' # TODO(ft): Change the message with the real AWS message msg_fmt = _('The snapshot %(id)s ID is not valid') class IncorrectState(EC2IncorrectStateException): msg_fmt = _("The resource is in incorrect state for the request - reason: " "'%(reason)s'") class DependencyViolation(EC2IncorrectStateException): msg_fmt = _('Object %(obj1_id)s has dependent resource %(obj2_id)s') class CannotDelete(EC2IncorrectStateException): msg_fmt = _('Cannot delete the default VPC security group') class ResourceAlreadyAssociated(EC2IncorrectStateException): ec2_code = 'Resource.AlreadyAssociated' class AlreadyRjilIPAssociated(EC2IncorrectStateException): ec2_code = 'AlreadyAssociated.RJILIP' msg_fmt = _("Only one RJIL Routable IP can be associated with an instance. This instance already has an associated RJIL Routable IP '%(public_ip)s' having allocation-id '%(allocation_id)s'.") class GatewayNotAttached(EC2IncorrectStateException): ec2_code = 'Gateway.NotAttached' msg_fmt = _("resource %(igw_id)s is not attached to network %(vpc_id)s") class IncorrectInstanceState(EC2IncorrectStateException): msg_fmt = _("The instance '%(instance_id)s' is not in a state from which " "the requested operation can be performed.") class InvalidAMIIDUnavailable(EC2IncorrectStateException): ec2_code = 'InvalidAMIID.Unavailable' # TODO(ft): Change the message with the real AWS message msg_fmt = _("Image %(image_id)s is not active.") class InvalidNetworkInterfaceInUse(EC2InUseException): ec2_code = 'InvalidNetworkInterface.InUse' msg_fmt = _('Interface: %(interface_ids)s in use.') class InvalidIPAddressInUse(EC2InUseException): ec2_code = 'InvalidIPAddress.InUse' msg_fmt = _('Address %(ip_address)s is in use.') class InvalidKeyPairDuplicate(EC2DuplicateException): ec2_code = 'InvalidKeyPair.Duplicate' msg_fmt = _("Key pair '%(key_name)s' already exists.") class InvalidPermissionDuplicate(EC2DuplicateException): ec2_code = 'InvalidPermission.Duplicate' msg_fmt = _('The specified rule already exists for that security group.') class InvalidGroupDuplicate(EC2DuplicateException): ec2_code = 'InvalidGroup.Duplicate' msg_fmt = _("Security group '%(name)s' already exists.") class InvalidGroupReserved(EC2ReservedException): ec2_code = 'InvalidGroup.Duplicate' msg_fmt = _("This security group name '%(name)s' is reserved and cannot be used to create a new security group. Please refer VPC API Reference Guide for more details.") class RouteAlreadyExists(EC2DuplicateException): msg_fmt = _('The route identified by %(destination_cidr_block)s ' 'already exists.') class InvalidVpcIDNotFound(EC2NotFoundException): ec2_code = 'InvalidVpcID.NotFound' msg_fmt = _("The vpc ID '%(id)s' does not exist") class InvalidInternetGatewayIDNotFound(EC2NotFoundException): ec2_code = 'InvalidInternetGatewayID.NotFound' msg_fmt = _("The internetGateway ID '%(id)s' does not exist") class InvalidSubnetIDNotFound(EC2NotFoundException): ec2_code = 'InvalidSubnetID.NotFound' msg_fmt = _("The subnet ID '%(id)s' does not exist") class InvalidNetworkInterfaceIDNotFound(EC2NotFoundException): ec2_code = 'InvalidNetworkInterfaceID.NotFound' msg_fmt = _("Network interface %(id)s could not " "be found.") class InvalidAttachmentIDNotFound(EC2NotFoundException): ec2_code = 'InvalidAttachmentID.NotFound' msg_fmt = _("Attachment %(id)s could not " "be found.") class InvalidInstanceIDNotFound(EC2NotFoundException): ec2_code = 'InvalidInstanceID.NotFound' msg_fmt = _("The instance ID '%(id)s' does not exist") class InvalidDhcpOptionsIDNotFound(EC2NotFoundException): ec2_code = 'InvalidDhcpOptionsID.NotFound' msg_fmt = _("The dhcp options ID '%(id)s' does not exist") class InvalidAddressNotFound(EC2NotFoundException): ec2_code = 'InvalidAddress.NotFound' msg_fmt = _('The specified elastic IP address %(ip)s cannot be found.') class InvalidAllocationIDNotFound(EC2NotFoundException): ec2_code = 'InvalidAllocationID.NotFound' msg_fmt = _("The allocation ID '%(id)s' does not exist") class InvalidAssociationIDNotFound(EC2NotFoundException): ec2_code = 'InvalidAssociationID.NotFound' msg_fmt = _("The association ID '%(id)s' does not exist") class InvalidSecurityGroupIDNotFound(EC2NotFoundException): ec2_code = 'InvalidSecurityGroupID.NotFound' msg_fmt = _("The securityGroup ID '%(id)s' does not exist") class InvalidGroupNotFound(EC2NotFoundException): ec2_code = 'InvalidGroup.NotFound' msg_fmt = _("The security group ID '%(id)s' does not exist, kindly enter a valid security group ID.") class InvalidPermissionNotFound(EC2NotFoundException): ec2_code = 'InvalidPermission.NotFound' msg_fmt = _("This rule does not exist in the security group '%(sg_id)s'") class InvalidRouteTableIDNotFound(EC2NotFoundException): ec2_code = 'InvalidRouteTableID.NotFound' msg_fmt = _("The routeTable ID '%(id)s' does not exist") class InvalidRouteNotFound(EC2NotFoundException): ec2_code = 'InvalidRoute.NotFound' msg_fmt = _('No route with destination-cidr-block ' '%(destination_cidr_block)s in route table %(route_table_id)s') class InvalidAMIIDNotFound(EC2NotFoundException): ec2_code = 'InvalidAMIID.NotFound' msg_fmt = _("The image id '[%(id)s]' does not exist") class InvalidVolumeNotFound(EC2NotFoundException): ec2_code = 'InvalidVolume.NotFound' msg_fmt = _("The volume '%(id)s' does not exist.") class InvalidSnapshotNotFound(EC2NotFoundException): ec2_code = 'InvalidSnapshot.NotFound' msg_fmt = _("Snapshot %(id)s could not be found.") class InvalidKeypairNotFound(EC2NotFoundException): ec2_code = 'InvalidKeyPair.NotFound' msg_fmt = _("Keypair %(id)s is not found") class InvalidAvailabilityZoneNotFound(EC2NotFoundException): ec2_code = 'InvalidAvailabilityZone.NotFound' msg_fmt = _("Availability zone %(id)s not found") class ResourceLimitExceeded(EC2OverlimitException): msg_fmt = _('You have reached the limit of %(resource)s') class VpcLimitExceeded(EC2OverlimitException): msg_fmt = _('The maximum number of VPCs has been reached.') class SubnetLimitExceeded(EC2OverlimitException): msg_fmt = _('You have reached the limit on the number of subnets that you ' 'can create') class NetworkInterfaceLimitExceeded(EC2OverlimitException): msg_fmt = _('You have reached the limit of network interfaces for subnet' '%(subnet_id)s.') class AddressLimitExceeded(EC2OverlimitException): msg_fmt = _('The maximum number of addresses has been reached.') class SecurityGroupLimitExceeded(EC2OverlimitException): msg_fmt = _('You have reached the limit of security groups') class RulesPerSecurityGroupLimitExceeded(EC2OverlimitException): msg_fmt = _("You've reached the limit on the number of rules that " "you can add to a security group.")
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=no-member import abc import copy from mongoengine import ValidationError import pecan from pecan import rest import six from six.moves import http_client from st2common.models.api.base import jsexpose from st2common import log as logging from st2common.models.system.common import InvalidResourceReferenceError from st2common.models.system.common import ResourceReference LOG = logging.getLogger(__name__) RESERVED_QUERY_PARAMS = { 'id': 'id', 'name': 'name', 'sort': 'order_by' } @six.add_metaclass(abc.ABCMeta) class ResourceController(rest.RestController): model = abc.abstractproperty access = abc.abstractproperty supported_filters = abc.abstractproperty # Maximum value of limit which can be specified by user max_limit = 100 query_options = { 'sort': [] } # A list of optional transformation functions for user provided filter values filter_transform_functions = {} def __init__(self): self.supported_filters = copy.deepcopy(self.__class__.supported_filters) self.supported_filters.update(RESERVED_QUERY_PARAMS) @jsexpose() def get_all(self, **kwargs): return self._get_all(**kwargs) @jsexpose(arg_types=[str]) def get_one(self, id): return self._get_one(id) def _get_all(self, exclude_fields=None, **kwargs): """ :param exclude_fields: A list of object fields to exclude. :type exclude_fields: ``list`` """ exclude_fields = exclude_fields or [] # TODO: Why do we use comma delimited string, user can just specify # multiple values using ?sort=foo&sort=bar and we get a list back sort = kwargs.get('sort').split(',') if kwargs.get('sort') else [] db_sort_values = [] for sort_key in sort: if sort_key.startswith('-'): direction = '-' sort_key = sort_key[1:] elif sort_key.startswith('+'): direction = '+' sort_key = sort_key[1:] else: direction = '' if sort_key not in self.supported_filters: # Skip unsupported sort key continue sort_value = direction + self.supported_filters[sort_key] db_sort_values.append(sort_value) default_sort_values = copy.copy(self.query_options.get('sort')) kwargs['sort'] = db_sort_values if db_sort_values else default_sort_values # TODO: To protect us from DoS, we need to make max_limit mandatory offset = int(kwargs.pop('offset', 0)) limit = kwargs.pop('limit', None) if limit and int(limit) > self.max_limit: limit = self.max_limit eop = offset + int(limit) if limit else None filters = {} for k, v in six.iteritems(self.supported_filters): filter_value = kwargs.get(k, None) if not filter_value: continue value_transform_function = self.filter_transform_functions.get(k, None) value_transform_function = value_transform_function or (lambda value: value) filter_value = value_transform_function(value=filter_value) filters['__'.join(v.split('.'))] = filter_value LOG.info('GET all %s with filters=%s', pecan.request.path, filters) instances = self.access.query(exclude_fields=exclude_fields, **filters) if limit: pecan.response.headers['X-Limit'] = str(limit) pecan.response.headers['X-Total-Count'] = str(instances.count()) return [self.model.from_model(instance) for instance in instances[offset:eop]] def _get_one(self, id, exclude_fields=None): """ :param exclude_fields: A list of object fields to exclude. :type exclude_fields: ``list`` """ LOG.info('GET %s with id=%s', pecan.request.path, id) instance = None try: instance = self.access.get(id=id, exclude_fields=exclude_fields) except ValidationError: instance = None # Someone supplied a mongo non-comformant id. if not instance: msg = 'Unable to identify resource with id "%s".' % id pecan.abort(http_client.NOT_FOUND, msg) result = self.model.from_model(instance) LOG.debug('GET %s with id=%s, client_result=%s', pecan.request.path, id, result) return result def _get_by_name_or_id(self, name_or_id): """ Retrieve resource object by an id of a name. """ resource_db = self._get_by_id(resource_id=name_or_id) if not resource_db: # Try name resource_db = self._get_by_name(resource_name=name_or_id) if not resource_db: msg = 'Resource with a name of id "%s" not found' % (name_or_id) raise Exception(msg) return resource_db def _get_by_id(self, resource_id): try: resource_db = self.access.get_by_id(resource_id) except Exception: resource_db = None return resource_db def _get_by_name(self, resource_name): try: resource_db = self.access.get_by_name(resource_name) except Exception: resource_db = None return resource_db class ContentPackResourceController(ResourceController): include_reference = False @jsexpose(arg_types=[str]) def get_one(self, ref_or_id): return self._get_one(ref_or_id) @jsexpose() def get_all(self, **kwargs): return self._get_all(**kwargs) def _get_one(self, ref_or_id): LOG.info('GET %s with ref_or_id=%s', pecan.request.path, ref_or_id) try: instance = self._get_by_ref_or_id(ref_or_id=ref_or_id) except Exception as e: LOG.exception(e.message) pecan.abort(http_client.NOT_FOUND, e.message) return result = self.model.from_model(instance) if result and self.include_reference: pack = getattr(result, 'pack', None) name = getattr(result, 'name', None) result.ref = ResourceReference(pack=pack, name=name).ref LOG.debug('GET %s with ref_or_id=%s, client_result=%s', pecan.request.path, ref_or_id, result) return result def _get_all(self, **kwargs): result = super(ContentPackResourceController, self)._get_all(**kwargs) if self.include_reference: for item in result: pack = getattr(item, 'pack', None) name = getattr(item, 'name', None) item.ref = ResourceReference(pack=pack, name=name).ref return result def _get_by_ref_or_id(self, ref_or_id): """ Retrieve resource object by an id of a reference. """ if ResourceReference.is_resource_reference(ref_or_id): # references always contain a dot and id's can't contain it is_reference = True else: is_reference = False if is_reference: resource_db = self._get_by_ref(resource_ref=ref_or_id) else: resource_db = self._get_by_id(resource_id=ref_or_id) if not resource_db: msg = 'Resource with a reference of id "%s" not found' % (ref_or_id) raise Exception(msg) return resource_db def _get_by_id(self, resource_id): try: resource_db = self.access.get_by_id(resource_id) except Exception: resource_db = None return resource_db def _get_by_ref(self, resource_ref): try: ref = ResourceReference.from_string_reference(ref=resource_ref) except Exception: return None resource_db = self.access.query(name=ref.name, pack=ref.pack).first() return resource_db def _get_filters(self, **kwargs): filters = copy.deepcopy(kwargs) ref = filters.get('ref', None) if ref: try: ref_obj = ResourceReference.from_string_reference(ref=ref) except InvalidResourceReferenceError: raise filters['name'] = ref_obj.name filters['pack'] = ref_obj.pack del filters['ref'] return filters
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ExpressRouteCrossConnectionsOperations: """ExpressRouteCrossConnectionsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_07_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs: Any ) -> AsyncIterable["_models.ExpressRouteCrossConnectionListResult"]: """Retrieves all the ExpressRouteCrossConnections in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnectionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.ExpressRouteCrossConnectionListResult"]: """Retrieves all the ExpressRouteCrossConnections in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnectionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore async def get( self, resource_group_name: str, cross_connection_name: str, **kwargs: Any ) -> "_models.ExpressRouteCrossConnection": """Gets details about the specified ExpressRouteCrossConnection. :param resource_group_name: The name of the resource group (peering location of the circuit). :type resource_group_name: str :param cross_connection_name: The name of the ExpressRouteCrossConnection (service key of the circuit). :type cross_connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ExpressRouteCrossConnection, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, cross_connection_name: str, parameters: "_models.ExpressRouteCrossConnection", **kwargs: Any ) -> "_models.ExpressRouteCrossConnection": cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ExpressRouteCrossConnection') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, cross_connection_name: str, parameters: "_models.ExpressRouteCrossConnection", **kwargs: Any ) -> AsyncLROPoller["_models.ExpressRouteCrossConnection"]: """Update the specified ExpressRouteCrossConnection. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cross_connection_name: The name of the ExpressRouteCrossConnection. :type cross_connection_name: str :param parameters: Parameters supplied to the update express route crossConnection operation. :type parameters: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnection :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnection or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnection] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, cross_connection_name=cross_connection_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore async def update_tags( self, resource_group_name: str, cross_connection_name: str, cross_connection_parameters: "_models.TagsObject", **kwargs: Any ) -> "_models.ExpressRouteCrossConnection": """Updates an express route cross connection tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cross_connection_name: The name of the cross connection. :type cross_connection_name: str :param cross_connection_parameters: Parameters supplied to update express route cross connection tags. :type cross_connection_parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :return: ExpressRouteCrossConnection, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_tags.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(cross_connection_parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore async def _list_arp_table_initial( self, resource_group_name: str, cross_connection_name: str, peering_name: str, device_path: str, **kwargs: Any ) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" accept = "application/json" # Construct URL url = self._list_arp_table_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'devicePath': self._serialize.url("device_path", device_path, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore async def begin_list_arp_table( self, resource_group_name: str, cross_connection_name: str, peering_name: str, device_path: str, **kwargs: Any ) -> AsyncLROPoller["_models.ExpressRouteCircuitsArpTableListResult"]: """Gets the currently advertised ARP table associated with the express route cross connection in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cross_connection_name: The name of the ExpressRouteCrossConnection. :type cross_connection_name: str :param peering_name: The name of the peering. :type peering_name: str :param device_path: The path of the device. :type device_path: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCircuitsArpTableListResult] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._list_arp_table_initial( resource_group_name=resource_group_name, cross_connection_name=cross_connection_name, peering_name=peering_name, device_path=device_path, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'devicePath': self._serialize.url("device_path", device_path, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore async def _list_routes_table_summary_initial( self, resource_group_name: str, cross_connection_name: str, peering_name: str, device_path: str, **kwargs: Any ) -> Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" accept = "application/json" # Construct URL url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'devicePath': self._serialize.url("device_path", device_path, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore async def begin_list_routes_table_summary( self, resource_group_name: str, cross_connection_name: str, peering_name: str, device_path: str, **kwargs: Any ) -> AsyncLROPoller["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]: """Gets the route table summary associated with the express route cross connection in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cross_connection_name: The name of the ExpressRouteCrossConnection. :type cross_connection_name: str :param peering_name: The name of the peering. :type peering_name: str :param device_path: The path of the device. :type device_path: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionsRoutesTableSummaryListResult or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._list_routes_table_summary_initial( resource_group_name=resource_group_name, cross_connection_name=cross_connection_name, peering_name=peering_name, device_path=device_path, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'devicePath': self._serialize.url("device_path", device_path, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore async def _list_routes_table_initial( self, resource_group_name: str, cross_connection_name: str, peering_name: str, device_path: str, **kwargs: Any ) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" accept = "application/json" # Construct URL url = self._list_routes_table_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'devicePath': self._serialize.url("device_path", device_path, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore async def begin_list_routes_table( self, resource_group_name: str, cross_connection_name: str, peering_name: str, device_path: str, **kwargs: Any ) -> AsyncLROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]: """Gets the currently advertised routes table associated with the express route cross connection in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cross_connection_name: The name of the ExpressRouteCrossConnection. :type cross_connection_name: str :param peering_name: The name of the peering. :type peering_name: str :param device_path: The path of the device. :type device_path: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCircuitsRoutesTableListResult] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._list_routes_table_initial( resource_group_name=resource_group_name, cross_connection_name=cross_connection_name, peering_name=peering_name, device_path=device_path, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'devicePath': self._serialize.url("device_path", device_path, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
""" GFcalc module Code to compute the lattice Green function for diffusion; this entails inverting the "diffusion" matrix, which is infinite, singular, and has translational invariance. The solution involves fourier transforming to reciprocal space, inverting, and inverse fourier transforming back to real (lattice) space. The complication is that the inversion produces a second order pole which must be treated analytically. Subtracting off the pole then produces a discontinuity at the gamma-point (q=0), which also should be treated analytically. Then, the remaining function can be numerically inverse fourier transformed. """ __author__ = 'Dallas R. Trinkle' import numpy as np from onsager import PowerExpansion as PE import itertools from copy import deepcopy from numpy import linalg as LA from scipy.special import hyp1f1, gamma, expi #, gammainc # two quick shortcuts T3D, T2D = PE.Taylor3D, PE.Taylor2D factorial = PE.factorial # Some "helper objects"; mostly collected here so that YAML has full access to them class Fnl_p(object): def __init__(self, n, pm): """ Exponential cutoff function in Fourier space (p) :param n: power :param pm: pmax value """ self.n = n self.inv_pmax = 1 / pm def __call__(self, p): return (p ** self.n) * np.exp(-(p * self.inv_pmax) ** 2) class Fnl_u(object): def __init__(self, n, l, pm, prefactor, d=3): """ Inverse Fourier transform of exponential cutoff function into real space (u) for 3d and 2d :param n: power > -2 :param l: angular momentum >= 0 :param pm: pmax value :param prefactor: V/sqrt(prod_i d_i) :param d: dimensionality == 2, 3 """ self.a = (d + l + n) / 2 self.b = d / 2 + l self.l = l self.half_pm = 0.5 * pm self.log = (self.a == 0) # (n == -2 and l == 0 and d == 2) self.pre = (-1j) ** l * prefactor * (pm ** (d + n + l)) * gamma(self.a) / \ ((np.pi ** (d/2)) * (2 ** (d + l)) * gamma(self.b)) if not self.log else \ prefactor/(2*np.pi) def __call__(self, u): # return self.pre * u ** self.l * hyp1f1(self.a, self.b, -(u * self.half_pm) ** 2) if not self.log: return self.pre * u ** self.l * hyp1f1(self.a, self.b, -(u * self.half_pm) ** 2) else: if u == 0: return self.pre * (-0.5*np.euler_gamma + np.log(self.half_pm)) else: # incomplete Gamma(0,x) = -Ei(-x) (exponential integral), turns out... # return self.pre * (-np.euler_gamma - np.log(u) -0.5*gammainc(0, (u*self.half_pm)**2)) return self.pre * (-np.euler_gamma - np.log(u) + 0.5*expi(-(u*self.half_pm)**2)) class GFCrystalcalc(object): """ Class calculator for the Green function, designed to work with the Crystal class. This computes the bare vacancy GF. It requires a crystal, chemical identity for the vacancy, list of symmetry unique sites (to define energies / entropies uniquely), and a corresponding jumpnetwork for that vacancy. """ def __init__(self, crys, chem, sitelist, jumpnetwork, Nmax=4, kptwt = None): """ Initializes our calculator with the appropriate topology / connectivity. Doesn't require, at this point, the site probabilities or transition rates to be known. :param crys: Crystal object :param chem: index identifying the diffusing species :param sitelist: list, grouped into Wyckoff common positions, of unique sites :param jumpnetwork: list of unique transitions as lists of ((i,j), dx) :param Nmax: maximum range as estimator for kpt mesh generation :param kptwt: (optional) tuple of (kpts, wts) to short-circuit kpt mesh generation """ # this is really just used by loadHDF5() to circumvent __init__ if all(x is None for x in (crys, chem, sitelist, jumpnetwork)): return self.crys = crys self.chem = chem self.sitelist = sitelist.copy() # self.N = sum(1 for w in sitelist for i in w) # self.invmap = [0 for w in sitelist for i in w] self.N = sum(len(w) for w in sitelist) self.Ndiff = self.networkcount(jumpnetwork, self.N) # if self.Ndiff>1: # raise NotImplementedError('Cannot currently have {} disconnected networks'.format(self.Ndiff)) self.invmap = np.zeros(self.N, dtype=int) for ind, w in enumerate(sitelist): for i in w: self.invmap[i] = ind self.NG = len(self.crys.G) # number of group operations self.grouparray, self.indexpair = self.BreakdownGroups() # note: currently, we don't store jumpnetwork. If we want to rewrite the class # to allow a new kpoint mesh to be generated "on the fly", we'd need to store # a copy for regeneration # self.jumpnetwork = jumpnetwork # generate a kptmesh: now we try to make the mesh more "uniform" ?? bmagn = np.array([np.sqrt(np.dot(crys.reciplatt[:, i], crys.reciplatt[:, i])) for i in range(self.crys.dim)]) bmagn /= np.power(np.product(bmagn), 1 / self.crys.dim) # make sure we have even meshes self.kptgrid = np.array([2 * np.int(np.ceil(2 * Nmax * b)) for b in bmagn], dtype=int) \ if kptwt is None else np.zeros(self.crys.dim, dtype=int) self.kpts, self.wts = crys.reducekptmesh(crys.fullkptmesh(self.kptgrid)) \ if kptwt is None else deepcopy(kptwt) self.Nkpt = self.kpts.shape[0] # generate the Fourier transformation for each jump # also includes the multiplicity for the onsite terms (site expansion) self.FTjumps, self.SEjumps = self.FourierTransformJumps(jumpnetwork, self.N, self.kpts) # generate the Taylor expansion coefficients for each jump self.Taylorjumps = self.TaylorExpandJumps(jumpnetwork, self.N) # tuple of the Wyckoff site indices for each jump (needed to make symmrate) self.jumppairs = tuple((self.invmap[jumplist[0][0][0]], self.invmap[jumplist[0][0][1]]) for jumplist in jumpnetwork) self.D, self.eta = 0, 0 # we don't yet know the diffusivity @staticmethod def networkcount(jumpnetwork, N): """Return a count of how many separate connected networks there are""" jngraph = np.zeros((N, N), dtype=bool) for jlist in jumpnetwork: for (i, j), dx in jlist: jngraph[i,j] = True connectivity = 0 # had been a list... if we want to return the list of sets disconnected = {i for i in range(N)} while len(disconnected)>0: # take the "first" element out, and find everything it's connected to: i = min(disconnected) cset = {i} disconnected.remove(i) while True: clen = len(cset) for n in cset.copy(): for m in disconnected.copy(): if jngraph[n,m]: cset.add(m) disconnected.remove(m) # check if we've stopped adding new members: if clen == len(cset): break connectivity += 1 # connectivity.append(cset) # if we want to keep lists of connectivity sets return connectivity # this is part of our *class* definition: __HDF5list__ = ('N', 'Ndiff', 'invmap', 'NG', 'grouparray', 'indexpair', 'kptgrid', 'kpts', 'wts', 'Nkpt', 'FTjumps', 'SEjumps') def __str__(self): return 'GFcalc for crystal (chemistry={}):\n{}\nkpt grid: {} ({})'.format(self.chem, self.crys, self.kptgrid, self.Nkpt) def addhdf5(self, HDF5group): """ Adds an HDF5 representation of object into an HDF5group (needs to already exist). Example: if f is an open HDF5, then GFcalc.addhdf5(f.create_group('GFcalc')) will (1) create the group named 'GFcalc', and then (2) put the GFcalc representation in that group. :param HDF5group: HDF5 group """ HDF5group.attrs['type'] = self.__class__.__name__ HDF5group.attrs['crystal'] = self.crys.__repr__() HDF5group.attrs['chem'] = self.chem # arrays that we can deal with: for internal in self.__HDF5list__: HDF5group[internal] = getattr(self, internal) # note: we don't store sitelist; we reconstruct it from invmap # we need to deal with Taylorjumps and jumppairs separately NTaylorjumps = len(self.Taylorjumps) TaylorTag = 'T3D' if self.crys.dim == 3 else 'T2D' HDF5group['N' + TaylorTag + 'jumps'] = NTaylorjumps for i, t3d in enumerate(self.Taylorjumps): coeffstr = TaylorTag + 'jump-{}'.format(i) t3d.addhdf5(HDF5group.create_group(coeffstr)) HDF5group['jumppairs'] = np.array(self.jumppairs) @classmethod def loadhdf5(cls, crys, HDF5group): """ Creates a new GFcalc from an HDF5 group. :param crys: crystal object--MUST BE PASSED IN as it is not stored with the GFcalc :param HDFgroup: HDF5 group :return GFcalc: new GFcalc object """ GFcalc = cls(None, None, None, None) # initialize GFcalc.crys = crys GFcalc.chem = HDF5group.attrs['chem'] for internal in cls.__HDF5list__: setattr(GFcalc, internal, HDF5group[internal].value) GFcalc.Taylorjumps = [] Taylor = T3D if crys.dim == 3 else T2D TaylorTag = 'T3D' if crys.dim == 3 else 'T2D' for i in range(HDF5group['N' + TaylorTag + 'jumps'].value): coeffstr = TaylorTag + 'jump-{}'.format(i) GFcalc.Taylorjumps.append(Taylor.loadhdf5(HDF5group[coeffstr])) # construct sitelist and jumppairs GFcalc.sitelist = [[] for i in range(max(GFcalc.invmap) + 1)] for i, site in enumerate(GFcalc.invmap): GFcalc.sitelist[site].append(i) GFcalc.jumppairs = tuple((pair[0], pair[1]) for pair in HDF5group['jumppairs']) GFcalc.D, GFcalc.eta = 0, 0 # we don't yet know the diffusivity return GFcalc def FourierTransformJumps(self, jumpnetwork, N, kpts): """ Generate the Fourier transform coefficients for each jump :param jumpnetwork: list of unique transitions, as lists of ((i,j), dx) :param N: number of sites :param kpts: array[Nkpt][3], in Cartesian (same coord. as dx) :return FTjumps: array[Njump][Nkpt][Nsite][Nsite] of FT of the jump network :return SEjumps: array[Nsite][Njump] multiplicity of jump on each site """ FTjumps = np.zeros((len(jumpnetwork), self.Nkpt, N, N), dtype=complex) SEjumps = np.zeros((N, len(jumpnetwork)), dtype=int) for J, jumplist in enumerate(jumpnetwork): for (i, j), dx in jumplist: FTjumps[J, :, i, j] += np.exp(1.j * np.dot(kpts, dx)) SEjumps[i, J] += 1 return FTjumps, SEjumps def TaylorExpandJumps(self, jumpnetwork, N): """ Generate the Taylor expansion coefficients for each jump :param jumpnetwork: list of unique transitions, as lists of ((i,j), dx) :param N: number of sites :return T3Djumps: list of Taylor3D expansions of the jump network """ Taylor = T3D if self.crys.dim == 3 else T2D Taylor() # need to do just to initialize the class; if already initialized, won't do anything # Taylor expansion coefficients for exp(1j*x) = (1j)^n/n! pre = np.array([(1j) ** n / factorial(n, True) for n in range(Taylor.Lmax + 1)]) Taylorjumps = [] for jumplist in jumpnetwork: # coefficients; we use tuples because we'll be successively adding to the coefficients in place c = [(n, n, np.zeros((Taylor.powlrange[n], N, N), dtype=complex)) for n in range(Taylor.Lmax + 1)] for (i, j), dx in jumplist: pexp = Taylor.powexp(dx, normalize=False) for n in range(Taylor.Lmax + 1): (c[n][2])[:, i, j] += pre[n] * (Taylor.powercoeff[n] * pexp)[:Taylor.powlrange[n]] Taylorjumps.append(Taylor(c)) return Taylorjumps def BreakdownGroups(self): """ Takes in a crystal, and a chemistry, and constructs the indexing breakdown for each (i,j) pair. :return grouparray: array[NG][3][3] of the NG group operations :return indexpair: array[N][N][NG][2] of the index pair for each group operation """ grouparray = np.zeros((self.NG, self.crys.dim, self.crys.dim)) indexpair = np.zeros((self.N, self.N, self.NG, 2), dtype=int) for ng, g in enumerate(self.crys.G): grouparray[ng, :, :] = g.cartrot[:, :] indexmap = g.indexmap[self.chem] for i in range(self.N): for j in range(self.N): indexpair[i, j, ng, 0], indexpair[i, j, ng, 1] = indexmap[i], indexmap[j] return grouparray, indexpair def SymmRates(self, pre, betaene, preT, betaeneT): """Returns a list of lists of symmetrized rates, matched to jumpnetwork""" return np.array([pT * np.exp(0.5 * betaene[w0] + 0.5 * betaene[w1] - beT) / np.sqrt(pre[w0] * pre[w1]) for (w0, w1), pT, beT in zip(self.jumppairs, preT, betaeneT)]) def SetRates(self, pre, betaene, preT, betaeneT, pmaxerror=1.e-8): """ (Re)sets the rates, given the prefactors and Arrhenius factors for the sites and transitions, using the ordering according to sitelist and jumpnetwork. Initiates all of the calculations so that GF calculation is (fairly) efficient for each input. :param pre: list of prefactors for site probabilities :param betaene: list of beta*E (energy/kB T) for each site :param preT: list of prefactors for transition states :param betaeneT: list of beta*ET (energy/kB T) for each transition state :param pmaxerror: parameter controlling error from pmax value. Should be same order as integration error. """ self.symmrate = self.SymmRates(pre, betaene, preT, betaeneT) self.maxrate = self.symmrate.max() self.symmrate /= self.maxrate self.escape = -np.diag([sum(self.SEjumps[i, J] * pretrans / pre[wi] * np.exp(betaene[wi] - BET) for J, pretrans, BET in zip(itertools.count(), preT, betaeneT)) for i, wi in enumerate(self.invmap)]) / self.maxrate self.omega_qij = np.tensordot(self.symmrate, self.FTjumps, axes=(0, 0)) self.omega_qij[:] += self.escape # adds it to every point self.omega_Taylor = sum(symmrate * expansion for symmrate, expansion in zip(self.symmrate, self.Taylorjumps)) self.omega_Taylor += self.escape Taylor = T3D if self.crys.dim == 3 else T2D # 1. Diagonalize gamma point value; use to rotate to diffusive / relaxive, and reduce self.r, self.vr = self.DiagGamma() if not np.allclose(self.r[:self.Ndiff], 0): raise ArithmeticError("Did not find {} equilibrium solution to rates?".format(self.Ndiff)) self.omega_Taylor_rotate = (self.omega_Taylor.ldot(self.vr.T)).rdot(self.vr) oT_dd, oT_dr, oT_rd, oT_rr, oT_D, etav = self.BlockRotateOmegaTaylor(self.omega_Taylor_rotate) # 2. Calculate D and eta self.D = self.Diffusivity(oT_D) self.eta = self.biascorrection(etav) # 3. Spatially rotate the Taylor expansion self.d, self.e = LA.eigh(self.D / self.maxrate) # had been 1e-11; changed to 1e-7 to reflect likely integration accuracy of k-point grids self.pmax = np.sqrt(min([np.dot(G, np.dot(G, self.D / self.maxrate)) for G in self.crys.BZG]) / -np.log(pmaxerror)) self.qptrans = self.e.copy() self.pqtrans = self.e.T.copy() self.uxtrans = self.e.T.copy() for i in range(self.crys.dim): self.qptrans[:, i] /= np.sqrt(self.d[i]) self.pqtrans[i, :] *= np.sqrt(self.d[i]) self.uxtrans[i, :] /= np.sqrt(self.d[i]) powtrans = Taylor.rotatedirections(self.qptrans) for t in [oT_dd, oT_dr, oT_rd, oT_rr, oT_D]: t.irotate(powtrans) # rotate in place t.reduce() if oT_D.coefflist[0][1] != 0: raise ArithmeticError("Problem isotropizing D?") # 4. Invert Taylor expansion using block inversion formula, and truncate at n=0 gT_rotate = self.BlockInvertOmegaTaylor(oT_dd, oT_dr, oT_rd, oT_rr, oT_D) self.g_Taylor = (gT_rotate.ldot(self.vr)).rdot(self.vr.T) self.g_Taylor.separate() g_Taylor_fnlp = {(n, l): Fnl_p(n, self.pmax) for (n, l) in self.g_Taylor.nl()} prefactor = self.crys.volume / np.sqrt(np.product(self.d)) self.g_Taylor_fnlu = {(n, l): Fnl_u(n, l, self.pmax, prefactor, d=self.crys.dim) for (n, l) in self.g_Taylor.nl()} # 5. Invert Fourier expansion gsc_qij = np.zeros_like(self.omega_qij) for qind, q in enumerate(self.kpts): if np.allclose(q, 0): # gamma point... need to treat separately gsc_qij[qind] = (-1 / self.pmax ** 2) * \ sum(np.outer(self.vr[:, n], self.vr[:, n]) for n in range(self.Ndiff)) else: # invert, subtract off Taylor expansion to leave semicontinuum piece gsc_qij[qind] = np.linalg.inv(self.omega_qij[qind, :, :]) \ - self.g_Taylor(np.dot(self.pqtrans, q), g_Taylor_fnlp) # 6. Slice the pieces we want for fast(er) evaluation (since we specify i and j in evaluation) self.gsc_ijq = np.zeros((self.N, self.N, self.Nkpt), dtype=complex) for i in range(self.N): for j in range(self.N): self.gsc_ijq[i, j, :] = gsc_qij[:, i, j] # since we can't make an array, use tuples of tuples to do gT_ij[i][j] self.gT_ij = tuple(tuple(self.g_Taylor[i, j].copy().reduce().separate() for j in range(self.N)) for i in range(self.N)) def exp_dxq(self, dx): """ Return the array of exp(-i q.dx) evaluated over the q-points, and accounting for symmetry :param dx: vector :return exp(-i q.dx): array of :math:`\\exp(-i \\cdot dx)` """ # kpts[k,3] .. g_dx_array[NR, 3] return np.exp(-1j * np.tensordot(self.kpts, dx, axes=(1, 0))) def __call__(self, i, j, dx): """ Evaluate the Green function from site i to site j, separated by vector dx :param i: site index :param j: site index :param dx: vector pointing from i to j (can include lattice contributions) :return G: Green function value """ if self.D is 0: raise ValueError("Need to SetRates first") # evaluate Fourier transform component (now with better space group treatment!) gIFT = 0 for gop, pair in zip(self.grouparray, self.indexpair[i][j]): gIFT += np.dot(self.wts, self.gsc_ijq[pair[0], pair[1]] * self.exp_dxq(np.dot(gop, dx))) gIFT /= self.NG if not np.isclose(gIFT.imag, 0): raise ArithmeticError("Got complex IFT? {}".format(gIFT)) # evaluate Taylor expansion component: gTaylor = self.gT_ij[i][j](np.dot(self.uxtrans, dx), self.g_Taylor_fnlu) if not np.isclose(gTaylor.imag, 0): raise ArithmeticError("Got complex IFT from Taylor? {}".format(gTaylor)) # combine: return (gIFT + gTaylor).real / self.maxrate def DiagGamma(self, omega=None): """ Diagonalize the gamma point (q=0) term :param omega: optional; the Taylor expansion to use. If None, use self.omega_Taylor :return r: array of eigenvalues, sorted from 0 to decreasing values. :return vr: array of eigenvectors where vr[:,i] is the vector for eigenvalue r[i] """ if omega is None: omega = self.omega_Taylor gammacoeff = None for (n, l, coeff) in omega.coefflist: if n < 0: raise ValueError("Taylor expansion has terms below n=0?") if n == 0: if l != 0: raise ValueError("n=0 term has angular dependence? l != 0") gammacoeff = -coeff[0].real break if gammacoeff is None: # missing onsite term--indicates that it's been reduced to 0 # should ONLY happen if we have a Bravais lattice, e.g. gammacoeff = np.zeros((self.N, self.N)) #, dtype=complex) r, vr = LA.eigh(gammacoeff) return -r, vr def Diffusivity(self, omega_Taylor_D=None): """ Return the diffusivity, or compute it if it's not already known. Uses omega_Taylor_D to compute with maximum efficiency. :param omega_Taylor_D: Taylor expansion of the diffusivity component :return D: diffusivity [3,3] array """ if self.D is not 0 and omega_Taylor_D is None: return self.D if self.D is 0 and omega_Taylor_D is None: raise ValueError("Need omega_Taylor_D value") Taylor = T3D if self.crys.dim == 3 else T2D D = np.zeros((self.crys.dim, self.crys.dim)) for (n, l, c) in omega_Taylor_D.coefflist: if n < 2: raise ValueError("Reduced Taylor expansion for D doesn't begin with n==2") DTr = np.trace(c.real, axis1=1, axis2=2)/self.Ndiff if n == 2: # first up: constant term (if present) D += np.eye(self.crys.dim) * DTr[0] # next: l == 2 contributions if l >= 2: # done in this way so that we get the 1/2 for the off-diagonal, and the 1 for diagonal for t in ((i, j) for i in range(self.crys.dim) for j in range(i, self.crys.dim)): tupind = tuple(t.count(d) for d in range(self.crys.dim)) ind = Taylor.pow2ind[tupind] # count the powers D[t] += 0.5 * DTr[ind] D[t[1], t[0]] += 0.5 * DTr[ind] # note: the "D" constructed this way will be negative! (as it is -q.D.q) return -D * self.maxrate def biascorrection(self, etav=None): """ Return the bias correction, or compute it if it's not already known. Uses etav to compute. :param etav: Taylor expansion of the bias correction :return eta: [N,3] array """ if etav is None: return self.eta # a little bit of a hack: we keep the implicit vr[:,0] part, that's the square root of # probability that comes from the diagonalization at q=0, but it might be negative! rhosign = [1. if sum(self.vr[:, n0])>0 else -1. for n0 in range(self.Ndiff)] Taylor = T3D if self.crys.dim == 3 else T2D d_ind_list = [(d, Taylor.pow2ind[(0,)*d + (1,) + (0,)*(self.crys.dim-1-d)]) for d in range(self.crys.dim)] eta = np.zeros((self.N, self.crys.dim)) if etav == 0: return eta for (n, l, c) in etav.coefflist: if n < 1: raise ValueError("Reduced Taylor expansion for etav doesn't begin with n==1") if n == 1: if l >= 1: for d, ind in d_ind_list: eta[:, d] -= sum(rhosign[n0]*np.dot(self.vr[:, self.Ndiff:], c[ind, :])[:, n0].imag for n0 in range(self.Ndiff)) / self.Ndiff return eta def BlockRotateOmegaTaylor(self, omega_Taylor_rotate): """ Returns block partitioned Taylor expansion of a rotated omega Taylor expansion. :param omega_Taylor_rotate: rotated into diffusive [0] / relaxive [1:] basis :return dd: diffusive/diffusive block (upper left) :return dr: diffusive/relaxive block (lower left) :return rd: relaxive/diffusive block (upper right) :return rr: relaxive/relaxive block (lower right) :return D: :math:`dd - dr (rr)^{-1} rd` (diffusion) :return etav: :math:`(rr)^{-1} rd` (relaxation vector) """ Taylor = T3D if self.crys.dim == 3 else T2D ND = self.Ndiff # previously had been 1. dd = omega_Taylor_rotate[0:ND, 0:ND].copy() dr = omega_Taylor_rotate[0:ND, ND:].copy() rd = omega_Taylor_rotate[ND:, 0:ND].copy() rr = omega_Taylor_rotate[ND:, ND:].copy() for t in [dd, dr, rd, rr]: t.reduce() if self.N > ND: D = dd - dr * rr.inv() * rd etav = rr.inv() * rd etav.truncate(1, inplace=True) else: D = dd.copy() etav = 0 D.truncate(Taylor.Lmax, inplace=True) D.reduce() return dd, dr, rd, rr, D, etav def BlockInvertOmegaTaylor(self, dd, dr, rd, rr, D): """ Returns block inverted omega as a Taylor expansion, up to Nmax = 0 (discontinuity correction). Needs to be rotated such that leading order of D is isotropic. :param dd: diffusive/diffusive block (upper left) :param dr: diffusive/relaxive block (lower left) :param rd: relaxive/diffusive block (upper right) :param rr: relaxive/relaxive block (lower right) :param D: :math:`dd - dr (rr)^{-1} rd` (diffusion) :return gT: Taylor expansion of g in block form, and reduced (collected terms) """ Taylor = T3D if self.crys.dim == 3 else T2D ND = self.Ndiff # previously had been 1. gT = Taylor.zeros(-2, 0, (self.N, self.N)) # where we'll place our Taylor expansion D_inv = D.inv() gT[0:ND, 0:ND] = D_inv.truncate(0) if self.N > ND: rr_inv = rr.inv() gT[0:ND, ND:] = -(D_inv * dr * rr_inv).truncate(0) gT[ND:, 0:ND] = -(rr_inv * rd * D_inv).truncate(0) gT[ND:, ND:] = (rr_inv + rr_inv * rd * D_inv * dr * rr_inv).truncate(0) return gT.reduce()
""" This module provides a class for interfacing with sensors and actuators. It can add, edit and remove sensors and actuators as well as monitor their states and execute commands. """ from datetime import datetime, timedelta from json import dumps, loads from os import getpid, path from threading import Event, RLock from myDevices.cloud import cayennemqtt from myDevices.cloud.dbmanager import DbManager from myDevices.cloud.download_speed import DownloadSpeed from myDevices.devices import instance, manager from myDevices.devices.bus import BUSLIST, checkAllBus from myDevices.devices.digital.gpio import NativeGPIO as GPIO from myDevices.system import services from myDevices.system.systeminfo import SystemInfo from myDevices.plugins.manager import PluginManager from myDevices.utils.config import Config, APP_SETTINGS from myDevices.utils.daemon import Daemon from myDevices.utils.logger import debug, error, exception, info, logJson, warn from myDevices.utils.threadpool import ThreadPool from myDevices.utils.types import M_JSON REFRESH_FREQUENCY = 15 #seconds REAL_TIME_FREQUENCY = 60/55 #Seconds/messages, this is done to keep messages under the rate limit class SensorsClient(): """Class for interfacing with sensors and actuators""" def __init__(self): """Initialize the bus and sensor info and start monitoring sensor states""" self.sensorMutex = RLock() self.realTimeMutex = RLock() self.exiting = Event() self.onDataChanged = None self.systemData = [] self.currentSystemState = [] self.currentRealTimeData = {} self.queuedRealTimeData = {} self.disabledSensors = {} self.disabledSensorTable = "disabled_sensors" checkAllBus() self.gpio = GPIO() self.downloadSpeed = DownloadSpeed(Config(APP_SETTINGS)) self.downloadSpeed.getDownloadSpeed() manager.addDeviceInstance("GPIO", "GPIO", "GPIO", self.gpio, [], "system") manager.loadJsonDevices("rest") results = DbManager.Select(self.disabledSensorTable) if results: for row in results: self.disabledSensors[row[0]] = 1 self.realTimeMonitorRunning = False self.pluginManager = PluginManager(self.OnPluginChange) self.pluginManager.load_plugins() self.InitCallbacks() self.StartMonitoring() def SetDataChanged(self, onDataChanged=None): """Set callback to call when data has changed Args: onDataChanged: Function to call when sensor data changes """ self.onDataChanged = onDataChanged def QueueRealTimeData(self, name, data): """Add real-time data to queue to be sent on thread Args: name: The name to use for the data data: The data to send """ with self.realTimeMutex: if name not in self.currentRealTimeData: self.currentRealTimeData[name] = data else: self.queuedRealTimeData[name] = data def OnSensorChange(self, device, value): """Callback that is called when digital sensor data has changed Args: device: The device that has changed data value: The new data value """ debug('OnSensorChange: {}, {}'.format(device, value)) with self.realTimeMutex: data = {'name': device['description'], 'value': value, 'type': 'digital_sensor', 'unit': 'd'} if 'args' in device: data['args'] = device['args'] self.QueueRealTimeData(device['name'], data) def OnPluginChange(self, data): """Callback that is called when digital sensor data has changed Args: data: The new data value """ debug('OnPluginChange: {}'.format(data)) self.QueueRealTimeData(data['id'], data) with self.realTimeMutex: if not self.realTimeMonitorRunning: ThreadPool.Submit(self.RealTimeMonitor) def OnGpioStateChange(self, channel, value): """Send updated pin state when it has changed Args: channel: The pin number value: The new value for the pin """ debug('OnGpioStateChange: channel {}, value {}'.format(channel, value)) data = [] cayennemqtt.DataChannel.add_unique(data, cayennemqtt.SYS_GPIO, channel, cayennemqtt.VALUE, value) if not self.realTimeMonitorRunning: self.onDataChanged(data) else: self.QueueRealTimeData(data[0]['channel'], data[0]) def InitCallbacks(self): """Set callback function for any digital devices that support them""" devices = manager.getDeviceList() for device in devices: sensor = instance.deviceInstance(device['name']) if 'DigitalSensor' in device['type'] and hasattr(sensor, 'setCallback'): debug('Set callback for {}'.format(sensor)) sensor.setCallback(self.OnSensorChange, device) if not self.realTimeMonitorRunning: ThreadPool.Submit(self.RealTimeMonitor) def RemoveCallbacks(self): """Remove callback function for all digital devices""" devices = manager.getDeviceList() for device in devices: sensor = instance.deviceInstance(device['name']) if 'DigitalSensor' in device['type'] and hasattr(sensor, 'removeCallback'): sensor.removeCallback() def StartMonitoring(self): """Start thread monitoring sensor data""" ThreadPool.Submit(self.Monitor) def StopMonitoring(self): """Stop thread monitoring sensor data""" self.RemoveCallbacks() self.exiting.set() def Monitor(self): """Monitor bus/sensor states and system info and report changed data via callbacks""" debug('Monitoring sensors and os resources started') sendAllDataCount = 0 nextTime = datetime.now() while not self.exiting.is_set(): try: difference = nextTime - datetime.now() delay = min(REFRESH_FREQUENCY, difference.total_seconds()) delay = max(0, delay) if not self.exiting.wait(delay): nextTime = datetime.now() + timedelta(seconds=REFRESH_FREQUENCY) self.currentSystemState = [] self.MonitorSystemInformation() self.MonitorSensors() self.MonitorPlugins() self.MonitorBus() if self.currentSystemState != self.systemData: data = self.currentSystemState if self.systemData and not sendAllDataCount == 0: data = [x for x in self.currentSystemState if x not in self.systemData] if self.onDataChanged and data: self.onDataChanged(data) sendAllDataCount += 1 if sendAllDataCount >= 4: sendAllDataCount = 0 self.systemData = self.currentSystemState except: exception('Monitoring sensors and os resources failed') debug('Monitoring sensors and os resources finished') def RealTimeMonitor(self): """Monitor real-time state changes and report changed data via callbacks""" self.realTimeMonitorRunning = True info('Monitoring real-time state changes') nextTime = datetime.now() while not self.exiting.is_set(): try: if not self.exiting.wait(0.5): if datetime.now() > nextTime: nextTime = datetime.now() + timedelta(seconds=REAL_TIME_FREQUENCY) self.SendRealTimeData() except: exception('Monitoring real-time changes failed') debug('Monitoring real-time changes finished') self.realTimeMonitorRunning = False def SendRealTimeData(self): """Send real-time data via callback""" data = [] with self.realTimeMutex: if self.currentRealTimeData: for name, item in self.currentRealTimeData.items(): if cayennemqtt.SYS_GPIO in name: data.append(item) else: cayennemqtt.DataChannel.add_unique(data, cayennemqtt.DEV_SENSOR, name, value=item['value'], name=item['name'], type=item['type'], unit=item['unit']) try: cayennemqtt.DataChannel.add_unique(data, cayennemqtt.SYS_GPIO, item['args']['channel'], cayennemqtt.VALUE, item['value']) except: pass if name in self.queuedRealTimeData and self.queuedRealTimeData[name]['value'] == item['value']: del self.queuedRealTimeData[name] self.currentRealTimeData = self.queuedRealTimeData self.queuedRealTimeData = {} if data: self.onDataChanged(data) def MonitorSensors(self): """Check sensor states for changes""" if self.exiting.is_set(): return self.currentSystemState += self.SensorsInfo() def MonitorPlugins(self): """Check plugin states for changes""" if self.exiting.is_set(): return self.currentSystemState += self.pluginManager.get_plugin_readings() def MonitorBus(self): """Check bus states for changes""" if self.exiting.is_set(): return self.currentSystemState += self.BusInfo() def MonitorSystemInformation(self): """Check system info for changes""" if self.exiting.is_set(): return self.currentSystemState += self.SystemInformation() def SystemInformation(self): """Return dict containing current system info, including CPU, RAM, storage and network info""" newSystemInfo = [] try: systemInfo = SystemInfo() newSystemInfo = systemInfo.getSystemInformation() download_speed = self.downloadSpeed.getDownloadSpeed() if download_speed: cayennemqtt.DataChannel.add(newSystemInfo, cayennemqtt.SYS_NET, suffix=cayennemqtt.SPEEDTEST, value=download_speed, type='bw', unit='mbps') except Exception: exception('SystemInformation failed') return newSystemInfo def CallDeviceFunction(self, func, *args): """Call a function for a sensor/actuator device and format the result value type Args: func: Function to call args: Parameters to pass to the function Returns: True for success, False otherwise. """ result = func(*args) if result != None: if hasattr(func, "contentType"): if func.contentType != M_JSON: value_type = type(result) response = value_type(func.format % result) else: response = result else: response = result return response def BusInfo(self): """Return a dict with current bus info""" bus_info = [] gpio_state = self.gpio.wildcard() for key, value in gpio_state.items(): cayennemqtt.DataChannel.add(bus_info, cayennemqtt.SYS_GPIO, key, cayennemqtt.VALUE, value['value']) cayennemqtt.DataChannel.add(bus_info, cayennemqtt.SYS_GPIO, key, cayennemqtt.FUNCTION, value['function']) return bus_info def SensorsInfo(self): """Return a list with current sensor states for all enabled sensors""" manager.deviceDetector() devices = manager.getDeviceList() sensors_info = [] if devices is None: return sensors_info for device in devices: sensor = instance.deviceInstance(device['name']) if 'enabled' not in device or device['enabled'] == 1: sensor_types = {'Temperature': {'function': 'getCelsius', 'data_args': {'type': 'temp', 'unit': 'c'}}, 'Humidity': {'function': 'getHumidityPercent', 'data_args': {'type': 'rel_hum', 'unit': 'p'}}, 'Pressure': {'function': 'getPascal', 'data_args': {'type': 'bp', 'unit': 'pa'}}, 'Luminosity': {'function': 'getLux', 'data_args': {'type': 'lum', 'unit': 'lux'}}, 'Distance': {'function': 'getCentimeter', 'data_args': {'type': 'prox', 'unit': 'cm'}}, 'ServoMotor': {'function': 'readAngle', 'data_args': {'type': 'analog_actuator'}}, 'DigitalSensor': {'function': 'read', 'data_args': {'type': 'digital_sensor', 'unit': 'd'}}, 'DigitalActuator': {'function': 'read', 'data_args': {'type': 'digital_actuator', 'unit': 'd'}}, 'AnalogSensor': {'function': 'readFloat', 'data_args': {'type': 'analog_sensor'}}, 'AnalogActuator': {'function': 'readFloat', 'data_args': {'type': 'analog_actuator'}}} # extension_types = {'ADC': {'function': 'analogReadAllFloat'}, # 'DAC': {'function': 'analogReadAllFloat'}, # 'PWM': {'function': 'pwmWildcard'}, # 'GPIOPort': {'function': 'wildcard'}} for device_type in device['type']: try: display_name = device['description'] except: display_name = None if device_type in sensor_types: try: sensor_type = sensor_types[device_type] func = getattr(sensor, sensor_type['function']) if len(device['type']) > 1: channel = '{}:{}'.format(device['name'], device_type.lower()) else: channel = device['name'] value = self.CallDeviceFunction(func) cayennemqtt.DataChannel.add(sensors_info, cayennemqtt.DEV_SENSOR, channel, value=value, name=display_name, **sensor_type['data_args']) if 'DigitalActuator' == device_type and value in (0, 1): manager.updateDeviceState(device['name'], value) except: exception('Failed to get sensor data: {} {}'.format(device_type, device['name'])) # else: # try: # extension_type = extension_types[device_type] # func = getattr(sensor, extension_type['function']) # values = self.CallDeviceFunction(func) # for pin, value in values.items(): # cayennemqtt.DataChannel.add(sensors_info, cayennemqtt.DEV_SENSOR, device['name'] + ':' + str(pin), cayennemqtt.VALUE, value, name=display_name) # except: # exception('Failed to get extension data: {} {}'.format(device_type, device['name'])) info('Sensors info: {}'.format(sensors_info)) return sensors_info def AddSensor(self, name, description, device, args): """Add a new sensor/actuator Args: name: Name of sensor to add description: Sensor description device: Sensor device class args: Sensor specific args Returns: True for success, False otherwise. """ info('AddSensor: {}, {}, {}, {}'.format(name, description, device, args)) bVal = False try: sensorAdd = {} if name: sensorAdd['name'] = name if device: sensorAdd['device'] = device if args: sensorAdd['args'] = args if description: sensorAdd['description'] = description with self.sensorMutex: retValue = manager.addDeviceJSON(sensorAdd) self.InitCallbacks() info('Add device returned: {}'.format(retValue)) if retValue[0] == 200: bVal = True except Exception: exception('Error adding sensor') bVal = False return bVal def EditSensor(self, name, description, device, args): """Edit an existing sensor/actuator Args: name: Name of sensor to edit description: New sensor description device: New sensor device class args: New sensor specific args Returns: True for success, False otherwise. """ info('EditSensor: {}, {}, {}, {}'.format(name, description, device, args)) bVal = False try: sensorEdit = {} name = name sensorEdit['name'] = name sensorEdit['device'] = device sensorEdit['description'] = description sensorEdit['args'] = args with self.sensorMutex: retValue = manager.updateDevice(name, sensorEdit) self.InitCallbacks() info('Edit device returned: {}'.format(retValue)) if retValue[0] == 200: bVal = True except: exception("Edit sensor failed") bVal = False return bVal def RemoveSensor(self, name): """Remove an existing sensor/actuator Args: name: Name of sensor to remove Returns: True for success, False otherwise. """ bVal = False try: if self.pluginManager.is_plugin(name): return self.pluginManager.disable(name) sensorRemove = name try: sensor = instance.deviceInstance(sensorRemove) if hasattr(sensor, 'removeCallback'): sensor.removeCallback() except: pass with self.sensorMutex: retValue = manager.removeDevice(sensorRemove) info('Remove device returned: {}'.format(retValue)) if retValue[0] == 200: bVal = True except: exception("Remove sensor failed") bVal = False return bVal def EnableSensor(self, sensor, enable): """Enable a sensor/actuator Args: sensor: Hash composed from name and device class/type enable: 1 to enable, 0 to disable Returns: True for success, False otherwise. """ info('Enable sensor: ' + str(sensor) + ' ' + str(enable)) try: if sensor is None: return False if enable is None: return False with self.sensorMutex: if enable == 0: #add item to the list if sensor not in self.disabledSensors: DbManager.Insert(self.disabledSensorTable, sensor) self.disabledSensors[sensor] = 1 else: #remove item from the list if sensor in self.disabledSensors: DbManager.Delete(self.disabledSensorTable, sensor) del self.disabledSensors[sensor] #save list except Exception as ex: error('EnableSensor Failed with exception: ' + str(ex)) return False return True def GpioCommand(self, command, channel, value): """Execute onboard GPIO command Args: command: Type of command to execute channel: GPIO pin value: Value to use for writing data Returns: String containing command specific return value on success, or 'failure' on failure """ info('GpioCommand {}, channel {}, value {}'.format(command, channel, value)) result = 'failure' if command == 'function': old_state = self.gpio.digitalRead(channel) if value.lower() in ('in', 'input'): result = str(self.gpio.setFunctionString(channel, 'in')) elif value.lower() in ('out', 'output'): result = str(self.gpio.setFunctionString(channel, 'out')) new_state = self.gpio.digitalRead(channel) if new_state != old_state: self.OnGpioStateChange(channel, new_state) elif command in ('value', ''): return self.gpio.digitalWrite(channel, int(value)) debug('GPIO command failed') return result def SensorCommand(self, command, sensorId, channel, value): """Execute sensor/actuator command Args: command: Type of command to execute sensorId: Sensor id channel: Pin/channel on device, None if there is no channel value: Value to use for setting the sensor state Returns: Command specific return value on success, False on failure """ result = False info('SensorCommand: {}, sensor {}, channel {}, value {}'.format(command, sensorId, channel, value)) try: if self.pluginManager.is_plugin(sensorId, channel): return self.pluginManager.write_value(sensorId, channel, value) commands = {'integer': {'function': 'write', 'value_type': int}, 'value': {'function': 'write', 'value_type': int}, 'function': {'function': 'setFunctionString', 'value_type': str}, 'angle': {'function': 'writeAngle', 'value_type': float}, 'float': {'function': 'writeFloat', 'value_type': float}, 'volt': {'function': 'writeVolt', 'value_type': float}} with self.sensorMutex: if sensorId in self.disabledSensors: info('Sensor disabled') return result sensor = instance.deviceInstance(sensorId) if not sensor: info('Sensor not found') return result if command in commands: device = instance.DEVICES[sensorId] info('Sensor found: {}'.format(device)) func = getattr(sensor, commands[command]['function']) value = commands[command]['value_type'](value) if channel: result = self.CallDeviceFunction(func, int(channel), value) else: result = self.CallDeviceFunction(func, value) if 'DigitalActuator' in device['type']: manager.updateDeviceState(sensorId, value) return result warn('Command not implemented: {}'.format(command)) return result except Exception: exception('SensorCommand failed') return result
from __future__ import unicode_literals from __future__ import absolute_import, division, print_function """ This module is used to cache per-collection type information. """ __author__ = "Graham Klyne (GK@ACM.ORG)" __copyright__ = "Copyright 2017, G. Klyne" __license__ = "MIT (http://opensource.org/licenses/MIT)" import logging log = logging.getLogger(__name__) from annalist import layout from annalist.exceptions import Annalist_Error from annalist.identifiers import ANNAL, RDFS from annalist.models.collectionentitycache import ( Cache_Error, CollectionEntityCacheObject, CollectionEntityCache ) from annalist.models.closurecache import ClosureCache from annalist.models.recordtype import RecordType # --------------------------------------------------------------------------- # # Type-cache object class # # --------------------------------------------------------------------------- #@@@ supertype_closure_cache = {} class CollectionTypeCacheObject(CollectionEntityCacheObject): """ This class is a type cache for a specified collection. It extends class CollectionEntityCacheObject with type-specific logic; notably overriding method _load_entity with additional logic to maintain a supertype closure cache, and methods to access that cache. """ def __init__(self, coll_id, entity_cls=RecordType): """ Initialize a cache object for a specified collection. coll_id Collection id with which the type cache is associated. """ super(CollectionTypeCacheObject, self).__init__(coll_id, entity_cls) #@@@ supertype_closure_cache[coll_id] self._supertype_closure_cache = ClosureCache(coll_id, ANNAL.CURIE.supertype_uri) return def _gsupertype_cache(self): return self._supertype_closure_cache #@@@ supertype_closure_cache.get(self.get_coll_id(), None) def _load_entity(self, coll, type_entity): """ Internal helper method loads type data to cache. Also updates supertype closure cache. Returns True if new type was added. """ type_id = type_entity.get_id() type_uri = type_entity.get_uri() type_parent = type_entity.get_parent().get_id() type_data = type_entity.get_save_values() add_type = super(CollectionTypeCacheObject, self)._load_entity(coll, type_entity) if add_type: # Add relations for supertype references from the new type URI for supertype_obj in type_data.get(ANNAL.CURIE.supertype_uri, []): supertype_uri = supertype_obj["@id"] self._gsupertype_cache().add_rel(type_uri, supertype_uri) # Also add relations for references *to* the new type URI for try_subtype in self.get_all_entities(coll): sub_st_objs = try_subtype.get(ANNAL.CURIE.supertype_uri, []) sub_st_uris = [ sub_st_obj["@id"] for sub_st_obj in sub_st_objs ] if type_uri in sub_st_uris: subtype_uri = try_subtype.get(ANNAL.CURIE.uri, None) if subtype_uri: self._gsupertype_cache().add_rel(subtype_uri, type_uri) return add_type def _drop_entity(self, coll, type_id): """ Override method that drops entity from cache, to also remove references from the supertype closure cache. Returns the type entity removed, or None if not found. """ type_entity = super(CollectionTypeCacheObject, self)._drop_entity(coll, type_id) if type_entity: type_uri = type_entity.get_uri() self._gsupertype_cache().remove_val(type_uri) return type_entity def get_type_uri_supertype_uris(self, type_uri): """ Returns all supertype URIs for a specified type URI. Returns all supertype URIs, even those for which there is no defined type entity. """ return self._gsupertype_cache().fwd_closure(type_uri) def get_type_uri_subtype_uris(self, type_uri): """ Returns all subtype URIs for a specified type URI. Returns all subtype URIs, even those for which there is no defined type entity. """ return self._gsupertype_cache().rev_closure(type_uri) def get_type_uri_supertypes(self, coll, type_uri): """ Returns all supertypes for a specified type URI. This method returns only those supertypes that are defined as entities. """ self._load_entities(coll) for st_uri in self.get_type_uri_supertype_uris(type_uri): st = self.get_entity_from_uri(coll, st_uri) if st: yield st return def get_type_uri_subtypes(self, coll, type_uri): """ Returns all subtypes for a specified type URI. This method returns only those subtypes that are defined as entities. """ self._load_entities(coll) for st_uri in self.get_type_uri_subtype_uris(type_uri): st = self.get_entity_from_uri(coll, st_uri) if st: yield st return def remove_cache(self): """ Close down and release all type cache data """ # log.debug("@@@@remove type cache %r"%(self.get_coll_id(),)) super(CollectionTypeCacheObject, self).remove_cache() self._supertype_closure_cache.remove_cache() self._supertype_closure_cache = None return # --------------------------------------------------------------------------- # # Collection type-cache class # # --------------------------------------------------------------------------- class CollectionTypeCache(CollectionEntityCache): """ This class manages type cache objects over multiple collections """ def __init__(self): """ Initialize. Initializes a value cache cache with no per-collection data. """ super(CollectionTypeCache, self).__init__(CollectionTypeCacheObject, RecordType) return # Collection type cache allocation and access methods def set_type(self, coll, type_entity): """ Save a new or updated type definition """ return self.set_entity(coll, type_entity) def remove_type(self, coll, type_id): """ Remove type from collection type cache. Returns the type entity removed if found, or None if not defined. """ return self.remove_entity(coll, type_id) def get_type(self, coll, type_id): """ Retrieve a type description for a given type Id. Returns a type object for the specified collection and type Id. """ return self.get_entity(coll, type_id) def get_type_from_uri(self, coll, type_uri): """ Retrieve a type description for a given type URI. Returns a type object for the specified collection and type URI. """ return self.get_entity_from_uri(coll, type_uri) def get_all_type_ids(self, coll, altscope=None): """ Returns all types currently available for a collection in the indicated scope. Default scope is types defined directly in the indicated collection. """ return self.get_all_entity_ids(coll, altscope=altscope) def get_all_types(self, coll, altscope=None): """ Returns all types currently available for a collection in the indicated scope. Default scope is types defined directly in the indicated collection. """ return self.get_all_entities(coll, altscope=altscope) def get_type_uri_supertypes(self, coll, type_uri): """ Returns all supertypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_supertypes(coll, type_uri) def get_type_uri_subtypes(self, coll, type_uri): """ Returns all subtypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_subtypes(coll, type_uri) def get_type_uri_supertype_uris(self, coll, type_uri): """ Returns all supertypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_supertype_uris(type_uri) def get_type_uri_subtype_uris(self, coll, type_uri): """ Returns all subtypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_subtype_uris(type_uri) # End.
"""Utilities to help with constructing distance matrices""" import os, sys, time import numpy as np INF = 1e9 class Distancer(object): """A class to keep track of distances computed so far and compute more""" def __init__(self, distfunc, datafname, defaultval=INF, sortasc=1, feats=None, savefreq=30): """Initializes with the distfunc to use""" self.distfunc = distfunc self.datafname = datafname self.defaultval = defaultval self.sortasc = sortasc self.savefreq = savefreq self.lastsave = 0 self.feats = [] self.dists = None self.load() if feats: for f in feats: if f not in self.feats: self.feats.append(f) self.extenddists() def load(self): """Loads data from disk""" from cPickle import load try: self.dists, self.feats = load(open(self.datafname)) except Exception: pass def save(self, infork=1): """Saves our data to disk""" from cPickle import dump from utils import saveandrename if time.time() - self.lastsave < self.savefreq: return self.lastsave = time.time() try: os.makedirs(os.path.dirname(self.datafname)) except OSError: pass print 'Saving data to %s with %d feats' % (self.datafname, len(self.feats)) saveandrename(self.datafname, lambda f: dump((self.dists, self.feats), f), retfile=1, infork=infork) print ' Finished saving data with %d feats' % (len(self.feats)) def extenddists(self, saveinfork=0): """Extends distance matrix by the needed number of rows/cols""" s = self.dists n = len(self.feats) # first check if we have distances at all if s is None: self.dists = np.ones((n,n)) * self.defaultval self.save() return coldiff = n - s.shape[1] changed = 0 if coldiff > 0: s = np.hstack((s, np.ones((s.shape[0], coldiff))* self.defaultval)) changed = 1 rowdiff = n - s.shape[0] if rowdiff > 0: s = np.vstack((s, np.ones((rowdiff, s.shape[1]))* self.defaultval)) changed = 1 self.dists = s if changed: #self.save(infork=saveinfork) pass def valid(self, v): """Checks if a valid is valid""" return abs(self.defaultval-v) > 1e-4 def dist(self, a, b): """Returns the distance between the given elements""" # extend matrices if we've never seen these before if a not in self.feats: self.feats.append(a) if b not in self.feats: self.feats.append(b) self.extenddists() # find the indices and precomputed distance i = self.feats.index(a) #FIXME these are linear scans for now... j = self.feats.index(b) #FIXME these are linear scans for now... d = self.dists[i,j] # if it's not a default val, it's already computed #print 'here... with %s, %s => %s, %s => %s' % (a,b, i,j,d) if self.valid(d): return d try: d = self.dists[i,j] = self.dists[j,i] = self.distfunc(a, b) except Exception, e: print '%s Error with %s, %s: %s, setting to default val of %s' % (type(e), a, b, e, self.defaultval) d = self.defaultval return d def computemissing(self, useprocs=1): """Computes missing distances""" s = self.dists n = len(self.feats) nums = dict(missing=0, examined=0) todo = [(i, j) for i in range(n) for j in range(i+1, n)] print 'Got %d todo' % (len(todo)) def callback(result): """Takes the result of dfunc() and applies it""" #print 'Got call back with %s' % (result,) d, i, j = result s[j,i] = s[i,j] = d self.save(infork=1) self.computemany(todo, callback=callback, useprocs=useprocs, indices=1) print 'All done' self.lastsave = 0 self.save(infork=0) def computemany(self, todo, callback=None, useprocs=1, indices=0): """Computes many distances using procs. Todo should consist of (i,j) pairs (if indices=1), or feat names (if indices=0). If you supply a callback it is called with a single tuple: (distance, i, j). If you don't, the default callback simply sets dists[i,j] = dists[j,i] = d. """ import multiprocessing as mp newtodo = [] print 'Adding indices if necessary' if not indices: # add any missing elements for a, b in todo: if a not in self.feats: self.feats.append(a) if b not in self.feats: self.feats.append(b) self.extenddists() # find the indices for a, b in todo: i = self.feats.index(a) #FIXME these are linear scans for now... j = self.feats.index(b) #FIXME these are linear scans for now... newtodo.append((i,j)) todo = newtodo def dfunc(i, j): """Returns the distance between the given rows of the data. Returns (d, i, j) for convenience.""" #print 'Got dfunc with %s, %s' % (i, j) a, b = self.feats[i], self.feats[j] try: d = self.distfunc(a, b) except Exception, e: print '%s Error with %s (%s), %s (%s): %s, setting to default val of %s' % (type(e), a, i, b, j, e, self.defaultval) d = self.defaultval return (d, i, j) if not callback: def callback(result): """Default callback just sets the distances""" d, i, j = result self.dists[j,i] = self.dists[i,j] = d # only do those that aren't valid newtodo = [] for i, j in todo: d = self.dists[i,j] if not self.valid(d): newtodo.append((i,j)) else: #callback((d,i,j)) #FIXME see if we want to actually call the callback here pass print 'Filtered down from %d to %d' % (len(todo), len(newtodo)) todo = newtodo if useprocs: # setup the queues and processing function inq, outq = mp.Queue(), mp.Queue() def procfunc(inq, outq): while 1: cur = inq.get() if cur is None: break ret = dfunc(*cur) outq.put(ret) # start the processes #procs = [mp.Process(target=procfunc, args=(inq,outq)) for i in range(mp.cpu_count()//2)] procs = [mp.Process(target=procfunc, args=(inq,outq)) for i in range(12)] for p in procs: p.daemon = True p.start() # add data to the input queue for cur in todo: inq.put(cur) # add sentinels at end to quit for n in range(100): inq.put(None) # read results from the output queue until we're all done ntodo = len(todo) while ntodo > 0: if ntodo % 100 == 0: print 'ntodo is %s, %d in inq, %d in outq' % (ntodo, inq.qsize(), outq.qsize()) ret = outq.get() callback(ret) ntodo -= 1 else: for i, j in todo: callback(dfunc(i, j)) print 'Finished with computemany' def computerow(self, a): """Computes and returns distances for a full row vs. the given element. Returns sorted (dist, name) pairs.""" if a not in self.feats: self.feats.append(a) self.extenddists() ret = [(self.dist(a, b),b) for b in self.feats] ret.sort(reverse=not self.sortasc) return ret def getmatrix(self, rows, cols, indices=0): """Returns a matrix consisting of the values at the given row and cols. Does no computation; only returns existing values.""" ret = np.ones((len(rows), len(cols))) * self.defaultval for i, r in enumerate(rows): print i, r if not indices: try: r = self.feats.index(r) except: continue for j, c in enumerate(cols): if not indices: try: c = self.feats.index(c) except: continue ret[i,j] = self.dists[r,c] return ret
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging import time import urllib2 import ssl from functools import wraps from urllib2 import HTTPError from tempfile import gettempdir from alerts.base_alert import BaseAlert from collections import namedtuple from resource_management.libraries.functions.get_port_from_url import get_port_from_url from resource_management.libraries.functions.get_path_from_url import get_path_from_url from resource_management.libraries.functions.curl_krb_request import curl_krb_request from ambari_commons import OSCheck from ambari_commons.inet_utils import resolve_address, ensure_ssl_using_protocol from ambari_commons.constants import AGENT_TMP_DIR from ambari_agent.AmbariConfig import AmbariConfig logger = logging.getLogger(__name__) # default timeout DEFAULT_CONNECTION_TIMEOUT = 5 WebResponse = namedtuple('WebResponse', 'status_code time_millis error_msg') ensure_ssl_using_protocol( AmbariConfig.get_resolved_config().get_force_https_protocol_name(), AmbariConfig.get_resolved_config().get_ca_cert_file_path() ) class WebAlert(BaseAlert): def __init__(self, alert_meta, alert_source_meta, config): super(WebAlert, self).__init__(alert_meta, alert_source_meta, config) connection_timeout = DEFAULT_CONNECTION_TIMEOUT # extract any lookup keys from the URI structure self.uri_property_keys = None if 'uri' in alert_source_meta: uri = alert_source_meta['uri'] self.uri_property_keys = self._lookup_uri_property_keys(uri) if 'connection_timeout' in uri: connection_timeout = uri['connection_timeout'] # python uses 5.0, CURL uses "5" self.connection_timeout = float(connection_timeout) self.curl_connection_timeout = int(connection_timeout) # will force a kinit even if klist says there are valid tickets (4 hour default) self.kinit_timeout = long(config.get('agent', 'alert_kinit_timeout', BaseAlert._DEFAULT_KINIT_TIMEOUT)) def _collect(self): if self.uri_property_keys is None: raise Exception("Could not determine result. URL(s) were not defined.") # use the URI lookup keys to get a final URI value to query alert_uri = self._get_uri_from_structure(self.uri_property_keys) logger.debug("[Alert][{0}] Calculated web URI to be {1} (ssl={2})".format( self.get_name(), alert_uri.uri, str(alert_uri.is_ssl_enabled))) url = self._build_web_query(alert_uri) # substitute 0.0.0.0 in url with actual fqdn url = url.replace('0.0.0.0', self.host_name) web_response = self._make_web_request(url) status_code = web_response.status_code time_seconds = web_response.time_millis / 1000 error_message = web_response.error_msg if status_code == 0: return (self.RESULT_CRITICAL, [status_code, url, time_seconds, error_message]) # check explicit listed codes if self.uri_property_keys.acceptable_codes and status_code in self.uri_property_keys.acceptable_codes: return (self.RESULT_OK, [status_code, url, time_seconds]) # anything that's less than 400 is OK if status_code < 400: return (self.RESULT_OK, [status_code, url, time_seconds]) # everything else is WARNING return (self.RESULT_WARNING, [status_code, url, time_seconds, error_message]) def _build_web_query(self, alert_uri): """ Builds a URL out of the URI structure. If the URI is already a URL of the form http[s]:// then this will return the URI as the URL; otherwise, it will build the URL from the URI structure's elements """ # shortcut if the supplied URI starts with the information needed string_uri = str(alert_uri.uri) if string_uri.startswith('http://') or string_uri.startswith('https://'): return alert_uri.uri uri_path = None if string_uri and string_uri != str(None): uri_path = get_path_from_url(string_uri) # start building the URL manually host = BaseAlert.get_host_from_url(alert_uri.uri) if host is None: host = self.host_name # maybe slightly realistic port = 80 if alert_uri.is_ssl_enabled is True: port = 443 # extract the port try: port = int(get_port_from_url(alert_uri.uri)) except: pass scheme = 'http' if alert_uri.is_ssl_enabled is True: scheme = 'https' if OSCheck.is_windows_family(): # on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1 host = resolve_address(host) if uri_path: return "{0}://{1}:{2}/{3}".format(scheme, host, str(port), uri_path) else: return "{0}://{1}:{2}".format(scheme, host, str(port)) def _make_web_request(self, url): """ Makes an http(s) request to a web resource and returns the http code. If there was an error making the request, return 0 for the status code. """ error_msg = None try: response_code = 0 kerberos_keytab = None kerberos_principal = None if self.uri_property_keys.kerberos_principal is not None: kerberos_principal = self._get_configuration_value( self.uri_property_keys.kerberos_principal) if kerberos_principal is not None: # substitute _HOST in kerberos principal with actual fqdn kerberos_principal = kerberos_principal.replace('_HOST', self.host_name) if self.uri_property_keys.kerberos_keytab is not None: kerberos_keytab = self._get_configuration_value(self.uri_property_keys.kerberos_keytab) security_enabled = self._get_configuration_value('{{cluster-env/security_enabled}}') if kerberos_principal is not None and kerberos_keytab is not None \ and security_enabled is not None and security_enabled.lower() == "true": tmp_dir = AGENT_TMP_DIR if tmp_dir is None: tmp_dir = gettempdir() # Get the configured Kerberos executables search paths, if any kerberos_executable_search_paths = self._get_configuration_value('{{kerberos-env/executable_search_paths}}') smokeuser = self._get_configuration_value('{{cluster-env/smokeuser}}') response_code, error_msg, time_millis = curl_krb_request(tmp_dir, kerberos_keytab, kerberos_principal, url, "web_alert", kerberos_executable_search_paths, True, self.get_name(), smokeuser, connection_timeout=self.curl_connection_timeout, kinit_timer_ms = self.kinit_timeout) else: # kerberos is not involved; use urllib2 response_code, time_millis, error_msg = self._make_web_request_urllib(url) return WebResponse(status_code=response_code, time_millis=time_millis, error_msg=error_msg) except Exception, exception: if logger.isEnabledFor(logging.DEBUG): logger.exception("[Alert][{0}] Unable to make a web request.".format(self.get_name())) return WebResponse(status_code=0, time_millis=0, error_msg=str(exception)) def _make_web_request_urllib(self, url): """ Make a web request using urllib2. This function does not handle exceptions. :param url: the URL to request :return: a tuple of the response code and the total time in ms """ response = None error_message = None start_time = time.time() try: response = urllib2.urlopen(url, timeout=self.connection_timeout) response_code = response.getcode() time_millis = time.time() - start_time return response_code, time_millis, error_message except HTTPError, httpError: time_millis = time.time() - start_time error_message = str(httpError) return httpError.code, time_millis, error_message finally: if response is not None: try: response.close() except Exception, exception: if logger.isEnabledFor(logging.DEBUG): logger.exception("[Alert][{0}] Unable to close socket connection".format(self.get_name())) def _get_reporting_text(self, state): ''' Gets the default reporting text to use when the alert definition does not contain any. :param state: the state of the alert in uppercase (such as OK, WARNING, etc) :return: the parameterized text ''' if state == self.RESULT_CRITICAL: return 'Connection failed to {1}' return 'HTTP {0} response in {2:.4f} seconds'
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This API defines FeatureColumn abstraction. FeatureColumns provide a high level abstraction for ingesting and representing features. FeatureColumns are also the primary way of encoding features for canned ${tf.estimator.Estimator}s. When using FeatureColumns with `Estimators`, the type of feature column you should choose depends on (1) the feature type and (2) the model type. (1) Feature type: * Continuous features can be represented by `numeric_column`. * Categorical features can be represented by any `categorical_column_with_*` column: - `categorical_column_with_keys` - `categorical_column_with_vocabulary_file` - `categorical_column_with_hash_bucket` - `categorical_column_with_integerized_feature` (2) Model type: * Deep neural network models (`DNNClassifier`, `DNNRegressor`). Continuous features can be directly fed into deep neural network models. age_column = numeric_column("age") To feed sparse features into DNN models, wrap the column with `embedding_column` or `indicator_column`. `indicator_column` is recommended for features with only a few possible values. For features with many possible values, `embedding_column` is recommended. embedded_dept_column = embedding_column( categorical_column_with_keys("department", ["math", "philosphy", ...]), dimension=10) * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`). Sparse features can be fed directly into linear models. They behave like an indicator column but with an efficient implementation. dept_column = categorical_column_with_keys("department", ["math", "philosophy", "english"]) It is recommended that continuous features be bucketized before being fed into linear models. bucketized_age_column = bucketized_column( source_column=age_column, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) Sparse features can be crossed (also known as conjuncted or combined) in order to form non-linearities, and then fed into linear models. cross_dept_age_column = crossed_column( columns=[department_column, bucketized_age_column], hash_bucket_size=1000) Example of building canned `Estimator`s using FeatureColumns: # Define features and transformations deep_feature_columns = [age_column, embedded_dept_column] wide_feature_columns = [dept_column, bucketized_age_column, cross_dept_age_column] # Build deep model estimator = DNNClassifier( feature_columns=deep_feature_columns, hidden_units=[500, 250, 50]) estimator.train(...) # Or build a wide model estimator = LinearClassifier( feature_columns=wide_feature_columns) estimator.train(...) # Or build a wide and deep model! estimator = DNNLinearCombinedClassifier( linear_feature_columns=wide_feature_columns, dnn_feature_columns=deep_feature_columns, dnn_hidden_units=[500, 250, 50]) estimator.train(...) FeatureColumns can also be transformed into a generic input layer for custom models using `input_from_feature_columns`. Example of building model using FeatureColumns, this can be used in a `model_fn` which is given to the {tf.estimator.Estimator}: # Building model via layers deep_feature_columns = [age_column, embedded_dept_column] columns_to_tensor = parse_feature_columns_from_examples( serialized=my_data, feature_columns=deep_feature_columns) first_layer = input_from_feature_columns( columns_to_tensors=columns_to_tensor, feature_columns=deep_feature_columns) second_layer = fully_connected(first_layer, ...) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest def make_linear_model(features, feature_columns, units=1, sparse_combiner='sum', weight_collections=None, trainable=True): """Returns a linear prediction `Tensor` based on given `feature_columns`. This function generates a weighted sum for each unitss`. Weighted sum refers to logits in classification problems. It refers to the prediction itself for linear regression problems. Main difference of `make_linear_model` and `make_input_layer` is handling of categorical columns. `make_linear_model` treats them as `indicator_column`s while `make_input_layer` explicitly requires wrapping each of them with an `embedding_column` or an `indicator_column`. Args: features: A mapping from key to tensors. 'string' key means a base feature. It can have `_FeatureColumn` as a key too. That means that FeatureColumn is already transformed by the input pipeline. feature_columns: An iterable containing all the FeatureColumns. All items should be instances of classes derived from FeatureColumn. units: units: An integer, dimensionality of the output space. Default value is 1. sparse_combiner: A string specifying how to reduce if a sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. It combines each sparse columns independently. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column weight_collections: A list of collection names to which the Variable will be added. Note that, variables will also be added to collections `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). Returns: A `Tensor` which represents predictions/logits of a linear model. Its shape is (batch_size, units) and its dtype is `float32`. Raises: ValueError: if an item in `feature_columns` is neither a `_DenseColumn` nor `_CategoricalColumn`. """ _check_feature_columns(feature_columns) for column in feature_columns: if not isinstance(column, (_DenseColumn, _CategoricalColumn)): raise ValueError('Items of feature_columns must be either a _DenseColumn ' 'or _CategoricalColumn. Given: {}'.format(column)) weight_collections = list(weight_collections or []) weight_collections += [ ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES ] with variable_scope.variable_scope( None, default_name='make_linear_model', values=features.values()): weigthed_sums = [] builder = _LazyBuilder(features) for column in sorted(feature_columns, key=lambda x: x.name): with variable_scope.variable_scope(None, default_name=column.name): if isinstance(column, _DenseColumn): weigthed_sums.append(_create_dense_column_weighted_sum( column, builder, units, weight_collections, trainable)) else: weigthed_sums.append(_create_categorical_column_weighted_sum( column, builder, units, sparse_combiner, weight_collections, trainable)) predictions_no_bias = math_ops.add_n( weigthed_sums, name='weighted_sum_no_bias') bias = variable_scope.get_variable( 'bias_weight', shape=[units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) predictions = nn_ops.bias_add( predictions_no_bias, bias, name='weighted_sum') return predictions def numeric_column(key, shape=(1,), default_value=None, dtype=dtypes.float32, normalizer_fn=None): """Represents real valued or numerical features. An example: ```python price = numeric_column('price') all_feature_columns = [price, ...] dense_tensor = make_input_layer(features, all_feature_columns) # or bucketized_price = bucketized_column(price, boundaries=[...]) all_feature_columns = [bucketized_price, ...] linear_prediction, _, _ = make_linear_model(features, all_feature_columns) ``` Args: key: A string providing key to look up corresponding `Tensor`. shape: An iterable of integers specifies the shape of the `Tensor`. An integer can be given which means a single dimension `Tensor` with given width. The `Tensor` representing the column will have the shape of [batch_size] + `shape`. default_value: A single value compatible with `dtype` or an iterable of values compatible with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A default value of `None` will cause `tf.parse_example` to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. normalizer_fn: If not `None`, a function that can be used to normalize the value of the tensor after `default_value` is applied for parsing. Normalizer function takes the input `Tensor` as its argument, and returns the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A _NumericColumn. Raises: TypeError: if any dimension in shape is not an int ValueError: if any dimension in shape is not a positive integer TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`. """ shape = _check_shape(shape, key) if not (dtype.is_integer or dtype.is_floating): raise ValueError('dtype must be convertible to float. ' 'dtype: {}, key: {}'.format(dtype, key)) default_value = _check_default_value(shape, default_value, dtype, key) if normalizer_fn is not None and not callable(normalizer_fn): raise TypeError( 'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn)) return _NumericColumn( key, shape=shape, default_value=default_value, dtype=dtype, normalizer_fn=normalizer_fn) def categorical_column_with_hash_bucket(key, hash_bucket_size, dtype=dtypes.string): """Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format where you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size An example: ```python keywords = categorical_column_with_hash_bucket("keywords", 10K) linear_prediction, _, _ = make_linear_model(features, all_feature_columns) all_feature_columns = [keywords, ...] # or keywords_embedded = embedding_column(keywords, 16) all_feature_columns = [keywords_embedded, ...] dense_tensor = make_input_layer(features, all_feature_columns) ``` Args: key: A string providing key to look up corresponding `Tensor`. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A `_CategoricalColumnHashed`. Raises: ValueError: `hash_bucket_size` is not greater than 1. ValueError: `dtype` is neither string nor integer. """ if hash_bucket_size is None: raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key)) if hash_bucket_size < 1: raise ValueError('hash_bucket_size must be at least 1. ' 'hash_bucket_size: {}, key: {}'.format( hash_bucket_size, key)) if dtype != dtypes.string and not dtype.is_integer: raise ValueError('dtype must be string or integer. ' 'dtype: {}, column_name: {}'.format(dtype, key)) return _CategoricalColumnHashed(key, hash_bucket_size, dtype) class _FeatureColumn(object): """Represents a feature column abstraction. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. To distinguish the concept of a feature family and a specific binary feature within a family, we refer to a feature family like "country" as a feature column. Following is an example feature in a `tf.Example` format: {key: "country", value: [ "US" ]} In this example the value of feature is "US" and "country" refers to the column of the feature. This class is an abstract class. User should not create instances of this. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def name(self): """Returns string. used for variable_scope and naming.""" pass @abc.abstractmethod def _transform_feature(self, inputs): """Returns transformed `Tensor`, uses `inputs` to access input tensors. It uses `inputs` to get either raw feature or transformation of other FeatureColumns. Example input access: Let's say a Feature column depends on raw feature ('raw') and another `_FeatureColumn` (input_fc). To access corresponding Tensors, inputs will be used as follows: ```python raw_tensor = inputs.get('raw') fc_tensor = inputs.get(input_fc) ``` Args: inputs: A `_LazyBuilder` object to access inputs. Returns: Transformed feature `Tensor`. """ pass @abc.abstractproperty def _parse_example_config(self): """Returns a `tf.Example` parsing spec as dict. It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other supported objects. Please check documentation of ${tf.parse_example} for all supported spec objects. Let's say a Feature column depends on raw feature ('raw') and another `_FeatureColumn` (input_fc). One possible implementation of _parse_example_config is as follows: ```python spec = {'raw': tf.FixedLenFeature(...)} spec.update(input_fc._parse_example_config) return spec ``` """ pass class _DenseColumn(_FeatureColumn): """Represents a column which can be represented as `Tensor`. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. Some examples of this type are: numeric_column, embedding_column, indicator_column. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def _variable_shape(self): """Returns shape of variable which is compatible with _get_dense_tensor.""" pass @abc.abstractmethod def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`. The output of this function will be used by model-buildier-functions. For example the pseudo code of `make_input_layer` will be like that: ```python def make_input_layer(features, feature_columns, ...): outputs = [fc._get_dense_tensor(...) for fc in feature_columns] return tf.concat(outputs) ``` Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: List of graph collections to which Variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.Variable}). """ pass def _create_dense_column_weighted_sum( column, builder, units, weight_collections, trainable): """Create a weighted sum of a dense column for make_linear_model.""" tensor = column._get_dense_tensor( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) num_elements = tensor_shape.TensorShape(column._variable_shape).num_elements() # pylint: disable=protected-access batch_size = array_ops.shape(tensor)[0] tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements)) weight = variable_scope.get_variable( name='weight', shape=[num_elements, units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) return math_ops.matmul(tensor, weight, name='weighted_sum') class _CategoricalColumn(_FeatureColumn): """Represents a categorical feautre. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. A categorical feature typically handled with a ${tf.SparseTensor} of IDs. """ __metaclass__ = abc.ABCMeta IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name 'IdWeightPair', ['id_tensor', 'weight_tensor']) @abc.abstractproperty def _num_buckets(self): """Returns number of buckets in this sparse feature.""" pass @abc.abstractmethod def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Returns an IdWeightPair. `IdWeightPair` is a pair of `SparseTensor`s which represents ids and weights. `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets` `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a `SparseTensor` of `float` or `None` to indicate all weights should be taken to be 1. If specified, `weight_tensor` must have exactly the same shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing output of a `VarLenFeature` which is a ragged matrix. Args: inputs: A `LazyBuilder` as a cache to get input tensors required to create `IdWeightPair`. weight_collections: List of graph collections to which variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.get_variable}). """ pass def _create_categorical_column_weighted_sum( column, builder, units, sparse_combiner, weight_collections, trainable): """Create a weighted sum of a categorical column for make_linear_model.""" sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) weight = variable_scope.get_variable( name='weight', shape=[column._num_buckets, units], # pylint: disable=protected-access initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) return _safe_embedding_lookup_sparse( weight, sparse_tensors.id_tensor, sparse_weights=sparse_tensors.weight_tensor, combiner=sparse_combiner, name='weighted_sum') class _LazyBuilder(object): """Handles caching of transformations while building the model. `FeatureColumn` specifies how to digest an input column to the network. Some feature columns require data transformations. This class caches those transformations. Some features may be used in more than one place. For example, one can use a bucketized feature by itself and a cross with it. In that case we should create only one bucketization op instead of creating ops for each feature column separately. To handle re-use of transformed columns, `_LazyBuilder` caches all previously transformed columns. Example: We're trying to use the following `FeatureColumns`: ```python bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...) keywords = fc.categorical_column_with_hash_buckets("keywords", ...) age_X_keywords = fc.crossed_column([bucketized_age, keywords]) ... = make_linear_model(features, [bucketized_age, keywords, age_X_keywords] ``` If we transform each column independently, then we'll get duplication of bucketization (one for cross, one for bucketization itself). The `_LazyBuilder` eliminates this duplication. """ def __init__(self, features): """Creates a `_LazyBuilder`. Args: features: A mapping from feature column to tensors. A `string` key signifies a base feature (not-transformed). A `FeatureColumn` key means that this `Tensor` is the output of an existing `FeatureColumn` which can be reused. """ self._columns_to_tensors = features.copy() def get(self, key): """Returns a `Tensor` for the given key. A `str` key is used to access a base feature (not-transformed). When a `_FeatureColumn` is passed, the transformed feature is returned if it already exists, otherwise the given `_FeatureColumn` is asked to provide its transformed output, which is then cached. Args: key: a `str` or a `_FeatureColumn`. Returns: The transformed `Tensor` corresponding to the `key`. Raises: ValueError: if key is not found or a transformed `Tensor` cannot be computed. """ if key in self._columns_to_tensors: # Feature_column is already transformed or it's a raw feature. return self._columns_to_tensors[key] if not isinstance(key, (str, _FeatureColumn)): raise TypeError('"key" must be either a "str" or "_FeatureColumn". ' 'Provided: {}'.format(key)) if not isinstance(key, _FeatureColumn): raise ValueError('Feature {} is not in features dictionary.'.format(key)) column = key logging.debug('Transforming feature_column %s.', column) transformed = column._transform_feature(self) # pylint: disable=protected-access if transformed is None: raise ValueError('Column {} is not supported.'.format(column.name)) self._columns_to_tensors[column] = transformed return self._columns_to_tensors[column] def _check_feature_columns(feature_columns): if isinstance(feature_columns, dict): raise ValueError('Expected feature_columns to be iterable, found dict.') for column in feature_columns: if not isinstance(column, _FeatureColumn): raise ValueError('Items of feature_columns must be a _FeatureColumn.') name_to_column = dict() for column in feature_columns: if column.name in name_to_column: raise ValueError('Duplicate feature column name found for columns: {} ' 'and {}. This usually means that these columns refer to ' 'same base feature. Either one must be discarded or a ' 'duplicated but renamed item must be inserted in ' 'features dict.'.format(column, name_to_column[column.name])) name_to_column[column.name] = column class _NumericColumn(_DenseColumn, collections.namedtuple('_NumericColumn', [ 'key', 'shape', 'default_value', 'dtype', 'normalizer_fn' ])): """see `numeric_column`.""" @property def name(self): return self.key @property def _parse_example_config(self): return { self.key: parsing_ops.FixedLenFeature(self.shape, self.dtype, self.default_value) } def _transform_feature(self, inputs): input_tensor = inputs.get(self.key) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError( 'The corresponding Tensor of numerical column must be a Tensor. ' 'SparseTensor is not supported. key: {}'.format(self.key)) if self.normalizer_fn is not None: input_tensor = self.normalizer_fn(input_tensor) return math_ops.to_float(input_tensor) @property def _variable_shape(self): return self.shape def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return inputs.get(self) def _create_tuple(shape, value): """Returns a tuple with given shape and filled with value.""" if shape: return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])]) return value def _as_tuple(value): if not nest.is_sequence(value): return value return tuple([_as_tuple(v) for v in value]) def _check_shape(shape, key): """Returns shape if it's valid, raises error otherwise.""" assert shape is not None if not nest.is_sequence(shape): shape = [shape] shape = tuple(shape) for dimension in shape: if not isinstance(dimension, int): raise TypeError('shape dimensions must be integer. ' 'shape: {}, key: {}'.format(shape, key)) if dimension < 1: raise ValueError('shape dimensions must be greater than 0. ' 'shape: {}, key: {}'.format(shape, key)) return shape def _is_shape_and_default_value_compatible(default_value, shape): """Verifies compatibility of shape and default_value.""" # Invalid condition: # * if default_value is not a scalar and shape is empty # * or if default_value is an iterable and shape is not empty if nest.is_sequence(default_value) != bool(shape): return False if not shape: return True if len(default_value) != shape[0]: return False for i in range(shape[0]): if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]): return False return True def _check_default_value(shape, default_value, dtype, key): """Returns default value as tuple if it's valid, otherwise raises errors. This function verifies that `default_value` is compatible with both `shape` and `dtype`. If it is not compatible, it raises an error. If it is compatible, it casts default_value to a tuple and returns it. `key` is used only for error message. Args: shape: An iterable of integers specifies the shape of the `Tensor`. default_value: If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. key: A string providing key to look up corresponding `Tensor`. Returns: A tuple which will be used as default value. Raises: TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`. """ if default_value is None: return None if isinstance(default_value, int): return _create_tuple(shape, default_value) if isinstance(default_value, float) and dtype.is_floating: return _create_tuple(shape, default_value) if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays default_value = default_value.tolist() if nest.is_sequence(default_value): if not _is_shape_and_default_value_compatible(default_value, shape): raise ValueError( 'The shape of default_value must be equal to given shape. ' 'default_value: {}, shape: {}, key: {}'.format( default_value, shape, key)) # Check if the values in the list are all integers or are convertible to # floats. is_list_all_int = all( isinstance(v, int) for v in nest.flatten(default_value)) is_list_has_float = any( isinstance(v, float) for v in nest.flatten(default_value)) if is_list_all_int: return _as_tuple(default_value) if is_list_has_float and dtype.is_floating: return _as_tuple(default_value) raise TypeError('default_value must be compatible with dtype. ' 'default_value: {}, dtype: {}, key: {}'.format( default_value, dtype, key)) class _CategoricalColumnHashed( _CategoricalColumn, collections.namedtuple('_CategoricalColumnHashed', ['key', 'hash_bucket_size', 'dtype'])): """see `categorical_column_with_hash_bucket`.""" @property def name(self): return self.key @property def _parse_example_config(self): return {self.key: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): input_tensor = inputs.get(self.key) if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError('SparseColumn input must be a SparseTensor.') if (input_tensor.dtype != dtypes.string and not input_tensor.dtype.is_integer): raise ValueError('input tensors dtype must be string or integer. ' 'dtype: {}, column_name: {}'.format( input_tensor.dtype, self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) if self.dtype == dtypes.string: sparse_values = input_tensor.values else: sparse_values = string_ops.as_string(input_tensor.values) sparse_id_values = string_ops.string_to_hash_bucket_fast( sparse_values, self.hash_bucket_size, name='lookup') return sparse_tensor_lib.SparseTensor( input_tensor.indices, sparse_id_values, input_tensor.dense_shape) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.hash_bucket_size def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) # TODO(zakaria): Move this to embedding_ops and make it public. def _safe_embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights=None, combiner=None, default_id=None, name=None, partition_strategy='div', max_norm=None): """Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Args: embedding_weights: A list of `P` float tensors or values representing partitioned embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. The total unpartitioned shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and `e_1, ..., e_m` are the embedding dimensions. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. name: A name for this operation (optional). partition_strategy: A string specifying the partitioning strategy. Currently `"div"` and `"mod"` are supported. Default is `"div"`. max_norm: If not None, all embeddings are l2-normalized to max_norm before combining. Returns: Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. Raises: ValueError: if `embedding_weights` is empty. """ if combiner is None: logging.warn('The default value of combiner will change from \"mean\" ' 'to \"sqrtn\" after 2016/11/01.') combiner = 'mean' if embedding_weights is None: raise ValueError('Missing embedding_weights %s.' % embedding_weights) if isinstance(embedding_weights, variables.PartitionedVariable): embedding_weights = list(embedding_weights) # get underlying Variables. if not isinstance(embedding_weights, list): embedding_weights = [embedding_weights] if len(embedding_weights) < 1: raise ValueError('Missing embedding_weights %s.' % embedding_weights) dtype = sparse_weights.dtype if sparse_weights is not None else None if isinstance(embedding_weights, variables.PartitionedVariable): embedding_weights = list(embedding_weights) embedding_weights = [ ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights ] with ops.name_scope(name, 'embedding_lookup', embedding_weights + [sparse_ids, sparse_weights]) as scope: # Reshape higher-rank sparse ids and weights to linear segment ids. original_shape = sparse_ids.dense_shape original_rank_dim = sparse_ids.dense_shape.get_shape()[0] original_rank = ( array_ops.size(original_shape) if original_rank_dim.value is None else original_rank_dim.value) sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [ math_ops.reduce_prod( array_ops.slice(original_shape, [0], [original_rank - 1])), array_ops.gather(original_shape, original_rank - 1)]) if sparse_weights is not None: sparse_weights = sparse_tensor_lib.SparseTensor( sparse_ids.indices, sparse_weights.values, sparse_ids.dense_shape) # Prune invalid ids and weights. sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights) # Fill in dummy values for empty features, if necessary. sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids, default_id or 0) if sparse_weights is not None: sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0) result = embedding_ops.embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=combiner, partition_strategy=partition_strategy, name=None if default_id is None else scope, max_norm=max_norm) if default_id is None: # Broadcast is_row_empty to the same shape as embedding_lookup_result, # for use in Select. is_row_empty = array_ops.tile( array_ops.reshape(is_row_empty, [-1, 1]), array_ops.stack([1, array_ops.shape(result)[1]])) result = array_ops.where(is_row_empty, array_ops.zeros_like(result), result, name=scope) # Reshape back from linear ids back into higher-dimensional dense result. final_result = array_ops.reshape( result, array_ops.concat([ array_ops.slice( math_ops.cast(original_shape, dtypes.int32), [0], [original_rank - 1]), array_ops.slice(array_ops.shape(result), [1], [-1]) ], 0)) final_result.set_shape(tensor_shape.unknown_shape( (original_rank_dim - 1).value).concatenate(result.get_shape()[1:])) return final_result def _prune_invalid_ids(sparse_ids, sparse_weights): """Prune invalid IDs (< 0) from the input ids and weights.""" is_id_valid = math_ops.greater_equal(sparse_ids.values, 0) if sparse_weights is not None: is_id_valid = math_ops.logical_and( is_id_valid, math_ops.greater(sparse_weights.values, 0)) sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid) if sparse_weights is not None: sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid) return sparse_ids, sparse_weights
# -*- coding: utf-8 -*- import gevent from gevent.lock import Semaphore from ethereum import slogging from raiden.api.python import RaidenAPI from raiden.utils import pex from raiden.transfer.state import ( CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, ) log = slogging.get_logger(__name__) # pylint: disable=invalid-name class ConnectionManager(object): """The ConnectionManager provides a high level abstraction for connecting to a Token network. Note: It is initialized with 0 funds; a connection to the token network will be only established _after_ calling `connect(funds)` """ # XXX Hack: for bootstrapping the first node on a network opens a channel # with this address to become visible. BOOTSTRAP_ADDR_HEX = '2' * 40 BOOTSTRAP_ADDR = BOOTSTRAP_ADDR_HEX.decode('hex') def __init__( self, raiden, token_address, channelgraph, ): self.lock = Semaphore() self.raiden = raiden self.api = RaidenAPI(raiden) self.channelgraph = channelgraph self.token_address = token_address self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0 def connect( self, funds, initial_channel_target=3, joinable_funds_target=.4 ): """Connect to the network. Use this to establish a connection with the token network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds (int): the amount of tokens spendable for this ConnectionManager. initial_channel_target (int): number of channels to open immediately joinable_funds_target (float): amount of funds not initially assigned """ if funds <= 0: raise ValueError('connecting needs a positive value for `funds`') self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target open_channels = self.open_channels # there are already channels open if len(open_channels): log.debug( 'connect() called on an already joined token network', token_address=pex(self.token_address), open_channels=len(open_channels), sum_deposits=sum( channel.deposit for channel in open_channels ), funds=funds, ) if len(self.channelgraph.graph.nodes()) == 0: with self.lock: log.debug('bootstrapping token network.') # make ourselves visible self.api.open( self.token_address, ConnectionManager.BOOTSTRAP_ADDR ) with self.lock: self.funds = funds funding = self.initial_funding_per_partner # this could be a subsequent call, or some channels already open new_partner_count = max(0, self.initial_channel_target - len(self.open_channels) ) for partner in self.find_new_partners(new_partner_count): self.api.open( self.token_address, partner, ) self.api.deposit( self.token_address, partner, funding ) def leave(self, wait_for_settle=True, timeout=30): """ Leave the token network. This implies closing all open channels and optionally waiting for settlement. Args: wait_for_settle (bool): block until successful settlement? timeout (float): maximum time to wait """ with self.lock: self.initial_channel_target = 0 open_channels = self.open_channels channel_specs = [( self.token_address, c.partner_address) for c in open_channels] for channel in channel_specs: try: self.api.close(*channel), except RuntimeError: # if the error wasn't that the channel was already closed: raise if channel[1] in [c.partner_address for c in self.open_channels]: raise # wait for events to propagate gevent.sleep(self.raiden.alarm.wait_time) if wait_for_settle: try: with gevent.timeout.Timeout(timeout): while any(c.state != CHANNEL_STATE_SETTLED for c in open_channels): # wait for events to propagate gevent.sleep(self.raiden.alarm.wait_time) except gevent.timeout.Timeout: log.debug( 'timeout while waiting for settlement', unsettled=sum( 1 for channel in open_channels if channel.state != CHANNEL_STATE_SETTLED ), settled=sum( 1 for channel in open_channels if channel.state == CHANNEL_STATE_SETTLED ) ) def join_channel(self, partner_address, partner_deposit): """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partner's deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ # not initialized if self.funds <= 0: return # in leaving state if self.initial_channel_target < 1: return with self.lock: remaining = self.funds_remaining initial = self.initial_funding_per_partner joining_funds = min( partner_deposit, remaining, initial ) if joining_funds <= 0: return self.api.deposit( self.token_address, partner_address, joining_funds ) log.debug( 'joined a channel!', funds=joining_funds, me=pex(self.raiden.address), partner=pex(partner_address) ) def retry_connect(self): """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ # not initialized if self.funds <= 0: return # in leaving state if self.initial_channel_target == 0: return with self.lock: if self.funds_remaining <= 0: return if len(self.open_channels) >= self.initial_channel_target: return for partner in self.find_new_partners( self.initial_channel_target - len(self.open_channels) ): try: self.api.open( self.token_address, partner ) self.api.deposit( self.token_address, partner, self.initial_funding_per_partner ) # this can fail because of a race condition, where the channel partner opens first except Exception as e: log.error('could not open a channel', exc_info=e) def find_new_partners(self, number): """Search the token network for potential channel partners. Args: number (int): number of partners to return """ known = set(c.partner_address for c in self.open_channels) known = known.union({self.__class__.BOOTSTRAP_ADDR}) known = known.union({self.raiden.address}) available = set(self.channelgraph.graph.nodes()) - known available = self._select_best_partners(available) log.debug('found {} partners'.format(len(available))) return available[:number] def _select_best_partners(self, partners): # FIXME: use a proper selection strategy # https://github.com/raiden-network/raiden/issues/576 return list(partners) @property def initial_funding_per_partner(self): """The calculated funding per partner depending on configuration and overall funding of the ConnectionManager. """ if self.initial_channel_target: return int( self.funds * (1 - self.joinable_funds_target) / self.initial_channel_target ) else: return 0 @property def wants_more_channels(self): """True, if funds available and the `initial_channel_target` was not yet reached. """ return ( self.funds_remaining > 0 and len(self.open_channels) < self.initial_channel_target ) @property def funds_remaining(self): """The remaining funds after subtracting the already deposited amounts. """ if self.funds > 0: remaining = self.funds - sum( channel.deposit for channel in self.open_channels ) assert isinstance(remaining, int) return remaining return 0 @property def open_channels(self): """Shorthand for getting our open channels in this token network. """ return [ channel for channel in self.api.get_channel_list(token_address=self.token_address) if channel.state == CHANNEL_STATE_OPENED ]
# Sebastian Raschka 2016-2017 # # ann is a supporting package for the book # "Introduction to Artificial Neural Networks and Deep Learning: # A Practical Guide with Applications in Python" # # Author: Sebastian Raschka <sebastianraschka.com> # # License: MIT import unittest import numpy as np from ann.np import onehot from ann.np import onehot_reverse from ann.np import square_padding from ann.np import l2_normalize from ann.np import minmax_scaling from ann.np import standardize from ann.np import subsampling_frequent_tokens class TestOnehot(unittest.TestCase): def test_defaults(self): oh_ary = onehot(ary=np.array([0, 1, 2, 3])) expect = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]]) self.assertTrue(np.array_equal(oh_ary, expect)) self.assertTrue(oh_ary.dtype == np.float32) def test_skiplabel(self): oh_ary = onehot(ary=np.array([0, 1, 2, 3, 5])) expect = np.array([[1., 0., 0., 0., 0., 0.], [0., 1., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 1.]]) self.assertTrue(np.array_equal(oh_ary, expect)) def test_n_classes(self): oh_ary = onehot(ary=np.array([0, 1, 2]), n_classes=5) expect = np.array([[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.], [0., 0., 1., 0., 0.]]) self.assertTrue(np.array_equal(oh_ary, expect)) def test_dtype(self): oh_ary = onehot(ary=np.array([0, 1, 2]), dtype=np.int32) expect = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) self.assertTrue(np.array_equal(oh_ary, expect)) self.assertTrue(oh_ary.dtype == np.int32) class TestOnehotReverse(unittest.TestCase): def test_defaults(self): a = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 1.]]) got = onehot_reverse(a) expect = np.array([0, 1, 3, 3], dtype=np.int32) self.assertTrue(np.array_equal(got, expect)) def test_proba(self): a = np.array([[0.66, 0.24, 0.10], [0.66, 0.24, 0.10], [0.66, 0.24, 0.10], [0.24, 0.66, 0.10]]) got = onehot_reverse(a) expect = np.array([0, 0, 0, 1], dtype=np.int32) self.assertTrue(np.array_equal(got, expect)) def test_dim(self): a = np.array([0, 0, 0, 1]) with self.assertRaises(ValueError) as context: onehot_reverse(a) msg = context.exception self.assertEqual(str(msg), "Input array must have 2 dimensions\n" "Got predictions.ndim: 1") class TestSquarePadding(unittest.TestCase): def test_1ary_defaults_3to7(self): a = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) exp = np.array([[0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 2., 3., 0., 0.], [0., 0., 4., 5., 6., 0., 0.], [0., 0., 7., 8., 9., 0., 0.], [0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0.]]) got = square_padding(ary=a, n_elements=7, value=0) np.testing.assert_allclose(exp, got) def test_1ary_defaults_3to6(self): a = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) exp = np.array([[0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.], [0., 0., 1., 2., 3., 0.], [0., 0., 4., 5., 6., 0.], [0., 0., 7., 8., 9., 0.], [0., 0., 0., 0., 0., 0.]]) got = square_padding(ary=a, n_elements=6, value=0) np.testing.assert_allclose(exp, got) def test_2ary_defaults_3to7(self): a = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], [[10., 11., 12.], [13., 5., 6.], [7., 8., 9.]]]) exp = np.array([[[0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 2., 3., 0., 0.], [0., 0., 4., 5., 6., 0., 0.], [0., 0., 7., 8., 9., 0., 0.], [0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0.]], [[0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0.], [0., 0., 10., 11., 12., 0., 0.], [0., 0., 13., 5., 6., 0., 0.], [0., 0., 7., 8., 9., 0., 0.], [0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0.]]]) got = square_padding(ary=a, n_elements=7, axes=(1, 2), value=0) np.testing.assert_allclose(exp, got) def test_2ary_defaults_3to6(self): a = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], [[10., 11., 12.], [13., 5., 6.], [7., 8., 9.]]]) got = square_padding(ary=a, n_elements=6, axes=(1, 2), value=0) exp = np.array([[[0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.], [0., 0., 1., 2., 3., 0.], [0., 0., 4., 5., 6., 0.], [0., 0., 7., 8., 9., 0.], [0., 0., 0., 0., 0., 0.]], [[0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.], [0., 0., 10., 11., 12., 0.], [0., 0., 13., 5., 6., 0.], [0., 0., 7., 8., 9., 0.], [0., 0., 0., 0., 0., 0.]]]) np.testing.assert_allclose(exp, got) class TestL2Normalize(unittest.TestCase): def test_1d(self): exp = np.array([0.26726124, 0.53452248, 0.80178373]) got = l2_normalize(np.array([1, 2, 3])) np.testing.assert_allclose(exp, got) def test_2d(self): exp = np.array([[0.26726124, 0.53452248, 0.80178373], [0.26726124, 0.53452248, 0.80178373], [0.80178373, 0.53452248, 0.26726124]]) got = l2_normalize(np.array([[1, 2, 3], [1, 2, 3], [3, 2, 1]])) np.testing.assert_allclose(exp, got) def test_3d(self): exp = np.array([[[0.26726124, 0.53452248, 0.80178373], [0.26726124, 0.53452248, 0.80178373], [0.26726124, 0.53452248, 0.80178373]], [[0.26726124, 0.53452248, 0.80178373], [0.26726124, 0.53452248, 0.80178373], [0.26726124, 0.53452248, 0.80178373]]]) got = l2_normalize(np.array([[[1, 2, 3], [1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3], [1, 2, 3]]])) np.testing.assert_allclose(exp, got) class TestMinMax(unittest.TestCase): def test_standardize(self): train_ary = np.array([[1, 1, 1], [4, 5, 6]]) test_ary = np.array([[1, 2, 3], [4, 3, 4]]) train_rescaled, tmin, tmax = minmax_scaling(train_ary, feature_minmax=(0.1, 0.9)) exp_train = np.array([[0.1, 0.1, 0.1], [0.9, 0.9, 0.9]]) np.testing.assert_allclose(train_rescaled, exp_train) test_rescaled, _, _ = minmax_scaling(test_ary, feature_minmax=(0.1, 0.9), precomputed_min=tmin, precomputed_max=tmax) exp_test = np.array([[0.1, 0.3, 0.42], [0.9, 0.5, 0.58]]) np.testing.assert_allclose(test_rescaled, exp_test) class TestStandardize(unittest.TestCase): def test_standardize(self): train_ary = np.array([[1, 1, 1], [4, 5, 6]]) test_ary = np.array([[1, 2, 3], [4, 3, 4]]) train_rescaled, tmean, tstd = standardize(train_ary) exp_train = np.array([[-1., -1., -1.], [1., 1., 1.]]) np.testing.assert_allclose(train_rescaled, exp_train) test_rescaled, _, _ = standardize(test_ary, precomputed_mean=tmean, precomputed_std=tstd) exp_test = np.array([[-1., -0.5, -0.2], [1., 0., 0.2]]) np.testing.assert_allclose(test_rescaled, exp_test) class SubsamplingFrequentTokens(unittest.TestCase): def test_simple(self): ary = [['this', 'is', 'is', 'a', 'test'], ['test', 'hello', 'world']] got = subsampling_frequent_tokens(ary, threshold=0.1, seed=1) expect = [['this', 'is', 'a'], ['hello', 'world']] self.assertEqual(got, expect) if __name__ == '__main__': unittest.main()
#-*- coding: utf-8 -*- # # runner.py # --------- # Handles running ooni.nettests as well as ooni.plugoo.tests.OONITests. # # :authors: Isis Lovecruft, Arturo Filasto # :license: see included LICENSE file # :copyright: (c) 2012 Isis Lovecruft, Arturo Filasto, The Tor Project, Inc. # :version: 0.1.0-pre-alpha # import os import inspect from twisted.python import reflect, usage from twisted.trial.runner import isTestCase from twisted.trial.runner import filenameToModule from ooni.inputunit import InputUnitFactory from ooni.nettest import InputTestSuite, NetTestCase from ooni.plugoo import tests as oonitests from ooni.reporter import ReporterFactory from ooni.utils import log, date from ooni.utils.legacy import LegacyOONITest from ooni.utils.legacy import start_legacy_test, adapt_legacy_test def isLegacyTest(obj): """ Returns True if the test in question is written using the OONITest legacy class. We do this for backward compatibility of the OONIProbe API. """ try: return issubclass(obj, oonitests.OONITest) and not obj == oonitests.OONITest except TypeError: return False def processTest(obj, config): """ Process the parameters and :class:`twisted.python.usage.Options` of a :class:`ooni.nettest.Nettest`. :param obj: An uninstantiated old test, which should be a subclass of :class:`ooni.plugoo.tests.OONITest`. :param config: A configured and instantiated :class:`twisted.python.usage.Options` class. """ input_file = obj.inputFile if obj.requiresRoot: if os.getuid() != 0: raise Exception("This test requires root to run") if obj.optParameters or input_file: if not obj.optParameters: obj.optParameters = [] if input_file: obj.optParameters.append(input_file) class Options(usage.Options): optParameters = obj.optParameters options = Options() options.parseOptions(config['subArgs']) obj.localOptions = options if input_file: obj.inputFile = options[input_file[0]] try: tmp_obj = obj() tmp_obj.getOptions() except usage.UsageError: options.opt_help() return obj def findTestClassesFromConfig(config): """ Takes as input the command line config parameters and returns the test case classes. If it detects that a certain test class is using the old OONIProbe format, then it will adapt it to the new testing system. :param config: A configured and instantiated :class:`twisted.python.usage.Options` class. :return: A list of class objects found in a file or module given on the commandline. """ filename = config['test'] classes = [] module = filenameToModule(filename) for name, val in inspect.getmembers(module): if isTestCase(val): log.debug("Detected TestCase %s" % val) classes.append(val) elif isLegacyTest(val): log.debug("Detected Legacy Test %s" % val) classes.append(adapt_legacy_test(val, config)) return classes def makeTestCases(klass, tests, method_prefix): """ Takes a class some tests and returns the test cases. method_prefix is how the test case functions should be prefixed with. """ cases = [] for test in tests: cases.append(klass(method_prefix+test)) return cases def processTestOptions(cls, config): """ Process the parameters and :class:`twisted.python.usage.Options` of a :class:`ooni.nettest.Nettest`. :param cls: An subclass of :class:`ooni.nettest.NetTestCase`. :param config: A configured and instantiated :class:`twisted.python.usage.Options` class. """ #if cls.optParameters or cls.inputFile: if not cls.optParameters: cls.optParameters = [] if cls.inputFile: cls.optParameters.append(cls.inputFile) log.debug("CLS IS %s" % cls) log.debug("CLS OPTPARAM IS %s" % cls.optParameters) #if not hasattr(cls, subCommands): # cls.subCommands = [] if not cls.subCommands: cls.subCommands = [] class Options(usage.Options): optParameters = cls.optParameters parseArgs = lambda a: cls.subCommands.append(a) opts = Options() opts.parseOptions(config['subArgs']) cls.localOptions = opts if cls.inputFile: cls.inputFile = opts[cls.inputFile[0]] """ try: log.debug("%s: trying %s.localoptions.getOptions()..." % (__name__, cls.name)) try: assert hasattr(cls, 'getOptions') except AssertionError, ae: options = opts.opt_help() raise Exception, "Cannot find %s.getOptions()" % cls.name else: options = cls.getOptions() except usage.UsageError: options = opts.opt_help() else: """ return cls.localOptions def loadTestsAndOptions(classes, config): """ Takes a list of test classes and returns their testcases and options. Legacy tests will be adapted. """ from inspect import isclass method_prefix = 'test' options = [] test_cases = [] DEPRECATED = LegacyOONITest for klass in classes: if isinstance(klass, DEPRECATED): #not issubclass(klass, TestCase): try: cases, opts = processLegacyTest(klass, config) if cases: log.debug("Processing cases: %s" % str(cases)) return [], [] test_cases.append(cases) except Exception, e: log.err(e) else: try: opts = klass.local_options option.append(opts) except AttributeError, ae: options.append([]) log.err(ae) elif issubclass(klass, NetTestCase): try: cases, opts = processNetTest(klass, config, method_prefix) except Exception, e: log.err(e) else: test_cases.append(cases) options.append(opts) return test_cases, options def processNetTest(klass, config, method_prefix): try: log.debug("Processing cases and options for OONI %s test" % (klass.name if hasattr(klass, 'name') else 'Network Test')) tests = reflect.prefixedMethodNames(klass, method_prefix) if tests: cases = makeTestCases(klass, tests, method_prefix) log.debug("loadTestsAndOptions(): test %s found cases=%s"% (tests, cases)) try: k = klass() opts = processTestOptions(k, config) except Exception, e: opts = [] log.err(e) else: cases = [] except Exception, e: log.err(e) return cases, opts ''' if hasattr(klass, 'optParameters') or hasattr(klass, 'inputFile'): try: opts = processTestOptions(klass, config) except: opts = [] finally: try: k = klass() inputs = k._getInputs() except Exception, e: inputs = [] log.err(e) else: if opts and len(inputs) != 0: opts.append(['inputs', '', inputs, "cmdline inputs"]) log.debug("loadTestsAndOptions(): inputs=%s" % inputs) ''' def processLegacyTest(klass, config): log.msg("Processing cases and options for legacy test %s" % ( klass.shortName if hasattr(klass, shortName) else 'oonitest' )) if hasattr(klass, description): log.msg("%s" % klass.description) subcmds = [] if hasattr(klass, options): ## an unitiated Legacy test log.debug("%s.options found: %s " % (klass, klass.options)) try: assert isclass(klass.options), "legacytest.options is not class" except AssertionError, ae: log.debug(ae) else: ok = klass.options ok.parseArgs = lambda x: subcmds.append(x) try: opts = ok() opts.parseOptions(config['subArgs']) except Exception, e: log.err(e) opts = {} elif hasattr(klass, local_options): ## we've been initialized already log.debug("%s.local_options found" % klass) try: assert klass.local_options is not None opts = klass.local_options except AttributeError, ae: opts = {}; log.err(ae) try: cases = start_legacy_test(klass) ## XXX we need to get these results into the reporter if cases: return [], [] except Exception, e: cases = []; log.err(e) finally: log.debug(str(cases)) return cases, opts class ORunner(object): """ This is a specialized runner used by the ooniprobe command line tool. I am responsible for reading the inputs from the test files and splitting them in input units. I also create all the report instances required to run the tests. """ def __init__(self, cases, options=None, config=None): self.baseSuite = InputTestSuite self.cases = cases self.options = options try: assert len(options) != 0, "Length of options is zero!" except AssertionError, ae: log.err(ae) self.inputs = [] else: try: first = options.pop(0) except: first = options if 'inputs' in first: self.inputs = options['inputs'] else: log.msg("Could not find inputs!") self.inputs = [None] try: reportFile = open(config['reportfile'], 'a+') except: filename = 'report_'+date.timestamp()+'.yaml' reportFile = open(filename, 'a+') self.reporterFactory = ReporterFactory( reportFile, testSuite=self.baseSuite(self.cases) ) def runWithInputUnit(self, input_unit): idx = 0 result = self.reporterFactory.create() log.debug("Running test with input unit %s" % input_unit) for inputs in input_unit: result.reporterFactory = self.reporterFactory log.debug("Running with %s" % inputs) suite = self.baseSuite(self.cases) suite.input = inputs suite(result, idx) # XXX refactor all of this index bullshit to avoid having to pass # this index around. Probably what I want to do is go and make # changes to report to support the concept of having multiple runs # of the same test. # We currently need to do this addition in order to get the number # of times the test cases that have run inside of the test suite. idx += (suite._idx - idx) log.debug("I am now at the index %s" % idx) log.debug("Finished") result.done() def run(self): self.reporterFactory.options = self.options for input_unit in InputUnitFactory(self.inputs): self.runWithInputUnit(input_unit)
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division from django.conf import settings from django.utils import timezone from django.utils.timezone import localtime from django.utils.encoding import smart_str, smart_unicode from measure_finance.models import Measure, MeasureContributor, MeasureTotal from election_registrar.models import ResultSource, Election from delorean import parse import pytz from pytz import timezone from nameparser import HumanName import requests import logging import types import re from datetime import tzinfo, date logger = logging.getLogger("kpcc_backroom_handshakes") class BuildDonationCharts(object): """ scaffolding to make an api request to MapLight and return ballot initiative contributions """ this_election = "general-2016-11-08" list_of_measures = [ 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, ] request_headers = settings.REQUEST_HEADERS request_headers["x-api-key"] = settings.MAP_LIGHT_API_KEY api_url = "https://8d984hb45b.execute-api.us-west-2.amazonaws.com/prod/measures?language=en&id=" def _init(self, *args, **kwargs): """ run the functions needed to get contributors """ f = Framer() election = Election.objects.filter(electionid=self.this_election).first() for measure in self.list_of_measures: requested_url = "%s%s" % (self.api_url, measure) response = requests.get(requested_url, headers=self.request_headers) measure_data = response.json()["measure"] identifying_information = measure_data["official_identifier"].split(" ") measure_data["official_identifier"] = "Proposition %s" % (identifying_information[1]) measure_data["official_identifier_slug"] = f._slug(measure_data["official_identifier"]) measure_data["election_id"] = election.id measure_data = f._massage_measure_title(measure_data) saver = Saver() saver.make_measure(measure_data) saver.make_measure_contributor(measure_data) saver.make_measure_total(measure_data) class Saver(object): """ """ log_message = "\n*** My Import Messages ***\n" def make_measure(self, measure): """ """ log_message = "" f = Framer() try: obj, created = Measure.objects.update_or_create( measure_id=measure["measure_id"], election_id=measure["election_id"], defaults={ "official_identifier": measure["official_identifier"], "official_identifier_slug": measure["official_identifier_slug"], "topic": measure["topic"], "official_title": measure["official_title"], "official_short_summary": measure["official_short_summary"], "official_summary": measure["official_summary"], "official_summary_author": measure["official_summary_author"], "official_yes_vote_means": measure["official_yes_vote_means"], "official_no_vote_means": measure["official_no_vote_means"], "official_vote_means_source": measure["official_vote_means_source"], "official_financial_effect": measure["official_financial_effect"], "official_financial_effect_author": measure["official_financial_effect_author"], "official_impartial_analysis": measure["official_impartial_analysis"], "official_impartial_analysis_author": measure["official_impartial_analysis_author"], "official_tax_rate": measure["official_tax_rate"], "official_tax_rate_author": measure["official_tax_rate_author"], "official_short_arguments_yes": measure["official_short_arguments_yes"], "official_short_arguments_no": measure["official_short_arguments_no"], "official_short_arguments_source": measure["official_short_arguments_source"], "official_rebuttal_yes": measure["official_rebuttal_yes"], "official_rebuttal_no": measure["official_rebuttal_no"], "measure_type": measure["measure_type"], "passage_requirements": measure["passage_requirements"], "fulltext_link": measure["fulltext_link"], "published": measure["published"], "disable_finance_data": measure["disable_finance_data"], "deleted": measure["deleted"], "entity_type": measure["entity_type"], "measure_timestamp": f._save_proper_timezone(measure["measure_timestamp"]), } ) if created: log_message += "* %s created\n" % (measure["official_title"]) else: log_message += "* %s exists\n" % (measure["official_title"]) except Exception, exception: error_output = "%s %s" % (exception, measure["official_title"]) logger.error(error_output) raise logger.info(log_message) return log_message def make_measure_contributor(self, measure): """ """ log_message = "\n" f = Framer() this_measure = Measure.objects.get(measure_id=measure["measure_id"]) try: this_measure = Measure.objects.get(measure_id=measure["measure_id"]) except Exception, exception: error_output = "%s %s" % (exception, measure["measure_id"]) logger.error(error_output) raise try: contrib = MeasureContributor.objects.filter(measure_id=this_measure.id) if contrib: contrib.delete() log_message += "\t* Resetting contributors\n" except: pass try: for contrib in measure["measure_finance_top"]: if contrib["percentage_individual"] == "100.00" and contrib["top_type"] == "D": contrib["name"] = f._massage_measure_donor_name(contrib["name"]) is_llc = contrib["name"].find("LLC") if is_llc > 0: donor_name = contrib["name"].split("LLC") contrib["name"] = "%s LLC" % (donor_name[0].strip().title()) obj, created = this_measure.measurecontributor_set.update_or_create( measure=this_measure.id, finance_top_id=f._to_num(contrib["finance_top_id"])["value"], defaults={ "top_type": contrib["top_type"], "support": contrib["support"], "name": contrib["name"], "total_amount": f._to_num(contrib["total_amount"])["value"], "total_individual": f._to_num(contrib["total_individual"])["value"], "total_organization": f._to_num(contrib["total_organization"])["value"], "percentage_total": f._convert_to_pct(contrib["percentage_total"])["output_decimal"], "percentage_individual": f._convert_to_pct(contrib["percentage_individual"])["output_decimal"], "percentage_organization": f._convert_to_pct(contrib["percentage_organization"])["output_decimal"], "updated_date": contrib["updated_date"], "entity_type": contrib["entity_type"], "finance_top_timestamp": f._save_proper_timezone(contrib["finance_top_timestamp"]) } ) if created: log_message += "\t* %s created\n" % (smart_unicode(contrib["name"])) else: log_message += "\t* %s updated\n" % (smart_unicode(contrib["name"])) except Exception, exception: error_output = "%s %s" % (exception, contrib["finance_top_id"]) logger.error(error_output) raise logger.info(log_message) return log_message def make_measure_total(self, measure): """ """ log_message = "\n" f = Framer() try: this_measure = Measure.objects.get(measure_id=measure["measure_id"]) except Exception, exception: error_output = "%s %s" % (exception, measure["measure_id"]) logger.error(error_output) raise try: for position in measure["measure_finance"]: uniq_key = "%s_%s" % (position["support"].lower(), this_measure.measure_id) obj, created = this_measure.measuretotal_set.update_or_create( measure=this_measure.id, finance_id=uniq_key, defaults={ "support": position["support"], "total_amount": f._to_num(position["total_amount"])["value"], "total_individual": f._to_num(position["total_individual"])["value"], "total_unitemized": f._to_num(position["total_unitemized"])["value"], "total_itemized": f._to_num(position["total_itemized"])["value"], "total_organization": f._to_num(position["total_organization"])["value"], "pct_individual": f._convert_to_pct(position["percentage_individual"])["output_decimal"], "pct_organization": f._convert_to_pct(position["percentage_organization"])["output_decimal"], "pct_unitemized": f._convert_to_pct(position["percentage_unitemized"])["output_decimal"], "pct_itemized": f._convert_to_pct(position["percentage_itemized"])["output_decimal"], "updated_date": position["updated_date"], "entity_type": position["entity_type"], "finance_timestamp": f._save_proper_timezone(position["finance_timestamp"]) } ) if created: log_message += "\t* %s created\n" % (smart_unicode(position["support"])) else: log_message += "\t* %s updated\n" % (smart_unicode(position["support"])) except Exception, exception: error_output = "%s %s" % (exception, position["finance_id"]) logger.error(error_output) raise logger.info(log_message) return log_message class Framer(object): """ """ def _slug(self, value): """ creates an unicode slug from a value """ if isinstance(value, basestring): try: converted = value except Exception, exception: logger.error(exception) raise elif isinstance(value, str): try: converted = unicode(value, "utf-8") except Exception, exception: logger.error(exception) raise elif isinstance(value, (int, long, float)): self.assertNotIsInstance(value, basestring) try: converted = str(value) converted = unicode(converted) except Exception, exception: logger.error(exception) raise else: self.assertNotIsInstance(value, basestring) try: converted = unicode(value) except Exception, exception: logger.error(exception) raise output = converted.lower().strip().replace(" ", "-") output = re.sub(r'[^a-z0-9]+', '-', output).strip('-') output = re.sub(r'[-]+', '-', output) output = re.sub(r"[^\w-]", "", output) if isinstance(output, basestring): number_of_spaces = output.count(" ") if number_of_spaces == 0: return output else: return False def _to_num(self, value): """ given a value can it be converted to an int http://stackoverflow.com/a/16464365 """ output = {} # actually integer values if isinstance(value, (int, long)): output["convert"] = True output["value"] = value output["type"] = type(value) # some floats can be converted without loss elif isinstance(value, float): output["convert"] = (int(value) == float(value)) output["value"] = value output["type"] = type(value) # we can't convert nonetypes elif isinstance(value, types.NoneType): output["convert"] = False output["value"] = None output["type"] = type(value) # we can't convert non-string elif not isinstance(value, basestring): output["convert"] = False output["value"] = "Nonstring" output["type"] = type(value) else: value = value.strip() try: # try to convert value to float float_value = float(value) output["convert"] = True output["value"] = float_value output["type"] = type(float_value) except ValueError: # if fails try to convert value to int try: int_value = int(value) output["convert"] = True output["value"] = int_value output["type"] = type(int_value) # if fails it's a string except ValueError: output["convert"] = False output["value"] = None output["type"] = type(value) return output def _convert_to_pct(self, value): """ convert whole percent to decimal """ output = self._to_num(value) if output["convert"] == True: output["output_decimal"] = float(output["value"] / 100) else: output["output_decimal"] = None return output def _save_proper_timezone(self, eval_this_time): """ """ file_timestamp = parse(eval_this_time, timezone="US/Pacific") output = file_timestamp.datetime.astimezone(pytz.UTC) return output def _find_nth(self, haystack, needle, n): start = haystack.find(needle) while start >= 0 and n > 1: start = haystack.find(needle, start + 1) n -= 1 if isinstance(start, (int, float)): return start else: return False def _massage_measure_donor_name(self, name_string): """ """ name = HumanName(name_string) name.first = name.first.title() name.last = name.last.title() if name.middle: name.middle = name.middle.replace(".", "") name.middle = "%s." % (name.middle.title()) if name == "JR. Munger CHARLES T.": name.first = "Charles" name.middle = "T." name.last = "Munger" name.suffix = "Jr." if name == "M. Quinn. Delaney": name.first = "M." name.middle = "Quinn" name.last = "Delaney" name.suffix = None if name == "Robert Alan. Eustace": name.first = "Robert" name.middle = "Alan" name.last = "Eustace" name.suffix = None if name == "Susie Tompkins. Buell": name.first = "Susie" name.middle = "Tompkins" name.last = "Buell" name.suffix = None if name.middle and name.suffix: output = "%s %s %s %s" % (name.first, name.middle, name.last, name.suffix) if name.middle: output = "%s %s %s" % (name.first, name.middle, name.last) elif name.suffix: output = "%s %s %s" % (name.first, name.last, name.suffix) else: output = "%s %s" % (name.first, name.last) return output def _massage_measure_title(self, measure): """ """ string = measure["official_title"] number_of_periods = [i for i, letter in enumerate(string) if letter == "."] if len(number_of_periods) > 1: count = len(number_of_periods) - 1 output = string.replace(". ", ", ", count) output = output.replace(".", "") else: output = string.replace(".", "") measure["official_title"] = output if measure["official_identifier_slug"] == "proposition-54": measure["official_title"] = "public display of legislative bills, initiative and statute" elif measure["official_identifier_slug"] == "proposition-63": measure["official_title"] = "ammunition sales background checks, large-capacity magazine ban" elif measure["official_identifier_slug"] == "proposition-64": measure["official_title"] = "recreational marijuana legalization" elif measure["official_identifier_slug"] == "proposition-65": measure["official_title"] = "disposable bag sales for wildlife conservation" return measure def _concat(self, *args, **kwargs): """ create a slug-like string given values and a delimiter """ values = list(args) output = [] for value in values: if not isinstance(value, (str, basestring)): value = unicode(value) else: value = unicode(value) value = value.strip() output.append(value) output = kwargs["delimiter"].join(output) output = unicode(output) return output class Checker(object): """ """ def _return_sanity_checks(self, obj, **kwargs): sane_data = [] if hasattr(obj, "votepct"): sane_data.append(self._eval_part_of_whole(obj.votepct, 100)) if hasattr(obj, "votecount"): sane_data.append(self._eval_part_of_whole(obj.votecount, kwargs["totalvotes"])) if hasattr(obj, "precinctsreporting") and hasattr(obj, "precinctstotal"): sane_data.append(self._eval_part_of_whole(obj.precinctsreporting, obj.precinctstotal)) if obj.precinctsreporting == obj.precinctstotal and obj.precinctsreportingpct != 1.0: sane_data.append(False) sane_data.append(self._eval_part_of_whole(obj.precinctsreportingpct, 1.0)) if hasattr(obj, "votersturnout"): sane_data.append(self._eval_part_of_whole(obj.votersturnout, 1.0)) if hasattr(obj, "yescount") and hasattr(obj, "nocount"): total = (obj.yescount + obj.nocount) sane_data.append(self._eval_part_of_whole(obj.yescount, total)) sane_data.append(self._eval_part_of_whole(obj.nocount, total)) if hasattr(obj, "yespct") and hasattr(obj, "nopct"): sane_data.append(self._eval_part_of_whole(obj.yespct, 100)) sane_data.append(self._eval_part_of_whole(obj.nopct, 100)) if False in sane_data: return True else: return False def _eval_part_of_whole(self, part, whole): if part <= whole: return True else: return False
# -*- coding: utf-8 -*- """ HMS Hospital Status Assessment and Request Management System @author: nursix """ module = request.controller resourcename = request.function if not deployment_settings.has_module(module): raise HTTP(404, body="Module disabled: %s" % module) # ----------------------------------------------------------------------------- def s3_menu_postp(): # @todo: rewrite this for new framework if len(request.args) > 0 and request.args[0].isdigit(): newreq = dict(from_record="hms_hospital.%s" % request.args[0], from_fields="hospital_id$id") #selreq = {"req.hospital_id":request.args[0]} else: newreq = dict() selreq = {"req.hospital_id__ne":"NONE"} menu_selected = [] hospital_id = s3mgr.get_session("hms", "hospital") if hospital_id: hospital = s3db.hms_hospital query = (hospital.id == hospital_id) record = db(query).select(hospital.id, hospital.name, limitby=(0, 1)).first() if record: name = record.name menu_selected.append(["%s: %s" % (T("Hospital"), name), False, URL(f="hospital", args=[record.id])]) if menu_selected: menu_selected = [T("Open recent"), True, None, menu_selected] response.menu_options.append(menu_selected) # ----------------------------------------------------------------------------- def index(): """ Module's Home Page """ module_name = deployment_settings.modules[module].name_nice response.title = module_name return dict(module_name=module_name) # ----------------------------------------------------------------------------- def hospital(): """ Main controller for hospital data entry """ tablename = "%s_%s" % (module, resourcename) table = s3db[tablename] # Load Models to add tabs if deployment_settings.has_module("inv"): s3mgr.load("inv_inv_item") elif deployment_settings.has_module("req"): # (gets loaded by Inv if available) s3mgr.load("req_req") # Pre-processor def prep(r): if r.interactive: # Add comments table = r.table table.gov_uuid.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Government UID"), T("The Unique Identifier (UUID) as assigned to this facility by the government.")))) table.total_beds.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Total Beds"), T("Total number of beds in this hospital. Automatically updated from daily reports.")))) table.available_beds.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Available Beds"), T("Number of vacant/available beds in this hospital. Automatically updated from daily reports.")))) table.ems_status.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("EMS Status"), T("Status of operations of the emergency department of this hospital.")))) table.ems_reason.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("EMS Reason"), T("Report the contributing factors for the current EMS status.")))) table.or_status.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("OR Status"), T("Status of the operating rooms of this hospital.")))) table.or_reason.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("OR Reason"), T("Report the contributing factors for the current OR status.")))) table.facility_status.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Facility Status"), T("Status of general operation of the facility.")))) table.clinical_status.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Clinical Status"), T("Status of clinical operation of the facility.")))) table.morgue_status.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Morgue Status"), T("Status of morgue capacity.")))) table.security_status.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Security Status"), T("Status of security procedures/access restrictions in the hospital.")))) table.morgue_units.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Morgue Units Available"), T("Number of vacant/available units to which victims can be transported immediately.")))) table.access_status.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Road Conditions"), T("Describe the condition of the roads to your hospital.")))) if r.method and r.method != "read": # Don't want to see in Create forms # inc list_create (list_fields over-rides) #address_hide(r.table) # Once separate fields have been migrated from location_id pass if r.component: if r.component.name == "inv_item" or \ r.component.name == "recv" or \ r.component.name == "send": # Filter out items which are already in this inventory s3db.inv_prep(r) elif r.component.name == "human_resource": # Filter out people which are already staff for this hospital s3_filter_staff(r) # Cascade the organisation_id from the hospital to the staff db.hrm_human_resource.organisation_id.default = r.record.organisation_id db.hrm_human_resource.organisation_id.writable = False elif r.component.name == "req": if r.method != "update" and r.method != "read": # Hide fields which don't make sense in a Create form # inc list_create (list_fields over-rides) s3db.req_create_form_mods() elif r.component.name == "bed_capacity": table = db.hms_bed_capacity table.bed_type.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Bed Type"), T("Specify the bed type of this unit.")))) table.beds_baseline.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Baseline Number of Beds"), T("Baseline number of beds of that type in this unit.")))) table.beds_available.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Available Beds"), T("Number of available/vacant beds of that type in this unit at the time of reporting.")))) table.beds_add24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Additional Beds / 24hrs"), T("Number of additional beds of that type expected to become available in this unit within the next 24 hours.")))) elif r.component.name == "activity": table = db.hms_activity table.date.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Date & Time"), T("Date and time this report relates to.")))) table.patients.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Patients"), T("Number of in-patients at the time of reporting.")))) table.admissions24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Admissions/24hrs"), T("Number of newly admitted patients during the past 24 hours.")))) table.discharges24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Discharges/24hrs"), T("Number of discharged patients during the past 24 hours.")))) table.deaths24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Deaths/24hrs"), T("Number of deaths during the past 24 hours.")))) elif r.component.name == "contact": table = db.hms_contact table.title.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Title"), T("The Role this person plays within this hospital.")))) elif r.component.name == "image": table = s3db.doc_image table.location_id.readable = False table.location_id.writable = False table.organisation_id.readable = False table.organisation_id.writable = False table.person_id.readable = False table.person_id.writable = False elif r.component.name == "ctc_capability": table = db.hms_ctc_capability table.ctc.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Cholera Treatment Center"), T("Does this facility provide a cholera treatment center?")))) table.number_of_patients.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Current number of patients"), T("How many patients with the disease are currently hospitalized at this facility?")))) table.cases_24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("New cases in the past 24h"), T("How many new cases have been admitted to this facility in the past 24h?")))) table.deaths_24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Deaths in the past 24h"), T("How many of the patients with the disease died in the past 24h at this facility?")))) table.icaths_available.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Infusion catheters available"), T("Specify the number of available sets")))) table.icaths_needed_24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Infusion catheters need per 24h"), T("Specify the number of sets needed per 24h")))) table.infusions_available.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Infusions available"), T("Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions")))) table.infusions_needed_24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Infusions needed per 24h"), T("Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h")))) table.antibiotics_available.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Antibiotics available"), T("Specify the number of available units (adult doses)")))) table.antibiotics_needed_24.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Antibiotics needed per 24h"), T("Specify the number of units (adult doses) needed per 24h")))) table.problem_types.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Current problems, categories"), T("Select all that apply")))) table.problem_details.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Current problems, details"), T("Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.")))) elif r.representation == "aadata": pass # Hide the Implied fields here too to make columns match #db.rms_req.shelter_id.readable = False #db.rms_req.organisation_id.readable = False return True response.s3.prep = prep rheader = s3db.hms_hospital_rheader output = s3_rest_controller(module, resourcename, rheader=rheader) return output # ----------------------------------------------------------------------------- def incoming(): """ Incoming Shipments """ return inv_incoming() # ----------------------------------------------------------------------------- def req_match(): """ Match Requests """ return s3db.req_match() # END =========================================================================
''' builders/__init__.py @authon ashon @date 2014-08-26 ~ ''' import json import traceback class HighchartJsonBuilderException(Exception): ''' HighchartJsonBuilderException ''' def __init__(self, code, msg=''): super(HighchartJsonBuilderException, self).__init__(code) self.exception_table = { # builder initialize exception 0: 'Chart json template is needed.', 1: 'Exception occurred while loading json template.', # chart compile exception 10: 'Chart is not compiled.', # build exception 100: 'Build Exception.', 101: 'build_chart() must have return value.', 102: 'build_chart() must return <dict> value.', # builder factory exception 1000: 'Factory has no matched builder.', 1001: 'Factory has no selected builder.', 1010: 'Factory preprocess exception.', } self.code = code self.msg = self.exception_table.get(self.code, 'Unknown Error.') + ' (%s)' % str(msg) self.trace = traceback.format_exc() def __str__(self): return repr(self.code) def print_trace(self): print "HighchartJsonBuilderException: [code %(code)s] %(message)s\n%(trace)s" % { 'code': self.code, 'message': self.msg, 'trace': self.trace } class HighchartJsonBuilder(object): ''' HighchartJsonBuilder @description Generate highchart settings jsonString. ''' def __init__(self, highchart_template_path=None): ''' HighchartJsonBuilder() @constructor @description Initialize highchart global settings. ''' if highchart_template_path is not None: self.__chart_constructor = 'StockChart' self.__template_path = highchart_template_path try: self.__template_json = json.load(open(self.__template_path, 'r')) except (OSError, IOError): raise HighchartJsonBuilderException(1, self.__template_path) self.__compiled_chart_option = None else: raise HighchartJsonBuilderException(0) def get_json_path(self): return self.__template_path def get_compiled_chart(self): return self.__compiled_chart_option def get_chart_template(self): return self.__template_json def get_compiled_chart_json(self): ''' get_compiled_chart_json @returns <string> : highchart setting json ''' if self.__compiled_chart_option is not None: json_string = json.dumps(self.__compiled_chart_option) json_string = json_string.replace('"', "'") return json_string else: raise HighchartJsonBuilderException(10) def set_export_constr(self, string): self.__chart_constructor = string def get_export_constr(self): return self.__chart_constructor def __flush_option(self): self.__compiled_chart_option = None def build_chart(self, **kwargs): ''' build_chart() @interface ''' pass def build(self, **kwargs): ''' build() @final @exception <BuildSeriesException> ''' self.__flush_option() try: self.__compiled_chart_option = self.build_chart(**kwargs) if self.__compiled_chart_option is None: raise HighchartJsonBuilderException(101) else: if type(self.__compiled_chart_option) is not dict: raise HighchartJsonBuilderException(102) except HighchartJsonBuilderException as builder_exception: raise HighchartJsonBuilderException(builder_exception.code) class HighchartBuilderFactory(object): ''' HighchartBuilderFactory @description Factory of HighchartJsonBuilder ''' def __init__(self, **kwargs): # init default variables self.__chart_builder_dict = {} self.__chart_builder = None self.__chart_key = '' self.__optional_params = kwargs def add_builder(self, string, json_builder): if isinstance(json_builder, HighchartJsonBuilder): self.__chart_builder_dict[string] = json_builder def get_builder(self, builder_key): chart_builder = self.__chart_builder_dict.get(builder_key, None) if chart_builder is None: raise HighchartJsonBuilderException(1000) return chart_builder def get_builder_dict(self): return self.__chart_builder_dict def select_builder(self, key): ''' select_builder(key) @description select chart builder matched with key. if not matched, then raise exception. @params key <string> - builder key. @exception <FactoryHasNoMatchedBuilderException> ''' self.__chart_builder = self.get_builder(key) self.__chart_key = key def get_selected_chart_key(self): return self.__chart_key def get_selected_builder(self): ''' get_selected_builder() @description Returns selected builder. if selected builder is null, then raise exception. @returns <HighchartJsonBuilder> - selected builder @exception <FactoryHasNoSelectedBuilderException> ''' if self.__chart_builder is None: raise HighchartJsonBuilderException(1001, self.__chart_key) else: return self.__chart_builder def preprocess(self): ''' preprocess() @abstract @description before build chartOption ''' pass def build(self): ''' build() @final @exception <FactoryHasNoSelectedBuilderException> <PreprocessException> <BuildSeriesException> ''' selcted_builder = self.get_selected_builder() try: self.preprocess() except Exception: raise HighchartJsonBuilderException(1010) selcted_builder.build() def get_compiled_chart_json(self): ''' get_json_string() @facade of HighchartJsonBuilder @exception <FactoryHasNoSelectedBuilderException> ''' return self.get_selected_builder().get_compiled_chart_json() def get_export_constr(self): ''' get_export_constr() @facade of HighchartJsonBuilder @exception <FactoryHasNoSelectedBuilderException> ''' return self.get_selected_builder().get_export_constr()
import rospy from rospy import ServiceException from rospy.rostime import Time, Duration from python_qt_binding.QtCore import QObject from python_qt_binding.QtCore import QTranslator from abstract_item import AbstractItem from arni_core.host_lookup import HostLookup from arni_core.helper import SEUID import arni_core.helper as helper from arni_msgs.srv import NodeReaction from helper_functions import prepare_number_for_representation, MAXIMUM_OFFLINE_TIME, ROUND_DIGITS class NodeItem(AbstractItem): """ A NodeItem represents a node with all of its data. It also has a interface to start/stop/restart nodes. """ def __init__(self, logger, seuid, parent=None): """ Initializes the NodeItem. :param seuid: the seuid of the item :type seuid: str :param logger: a logger where to log when special events occur :type logger: ModelLogger :param parent: the parent-item :type parent: AbstractItem """ AbstractItem.__init__(self, logger, seuid, parent) self._type = "node" self.__parent = parent self._type = "node" self._attributes = [] self._attributes.extend(["node_cpu_usage_mean", "node_cpu_usage_stddev", "node_cpu_usage_max", "node_cpu_usage_core_mean", "node_cpu_usage_core_stddev", "node_cpu_usage_core_max", "node_gpu_usage_mean", "node_gpu_usage_stddev", "node_gpu_usage_max", "node_ramusage_mean", "node_ramusage_stddev", "node_ramusage_max", "node_message_frequency_mean", "node_message_frequency_stddev", "node_message_frequency_max", "node_bandwidth_mean", "node_bandwidth_stddev", "node_bandwidth_max", "node_write_mean", "node_write_stddev", "node_write_max", "node_read_mean", "node_read_stddev", "node_read_max"]) for item in self._attributes: self._add_data_list(item) for item in self._attributes: self._rated_attributes.append(item + ".actual_value") self._rated_attributes.append(item + ".expected_value") self._rated_attributes.append(item + ".state") for item in self._rated_attributes: self._add_rated_data_list(item) self._logger.log("info", Time.now(), seuid, "Created a new NodeItem") def execute_action(self, action): """ Sends a signal to top or restart the node. :param action: action to be executed :type action: RemoteAction """ host_formatted = helper.underscore_ip(self.__parent.get_seuid()[2:]) service_name = "/execute_node_reaction/%s" % host_formatted try: execute = rospy.ServiceProxy( service_name, NodeReaction) resp = execute(self.seuid[2:], action, '') except ServiceException: self._logger.log("error", Time.now(), self.seuid, "could not stop node %s, service %s not found" % (self.seuid, service_name)) def get_detailed_data(self): """ Returns the detailed data of the NodeItem. :returns: detailed data :rtype: str """ data_dict = self.get_latest_data() content = "<p class=\"detailed_data\">" content += self.get_erroneous_entries() content += self.tr("node_cpu_usage_mean") + ": " + prepare_number_for_representation( data_dict["node_cpu_usage_mean"]) + " " + self.tr("node_cpu_usage_mean_unit") + " <br>" content += self.tr("node_cpu_usage_stddev") + ": " + prepare_number_for_representation( data_dict["node_cpu_usage_stddev"]) \ + " " + self.tr("node_cpu_usage_stddev_unit") + " <br>" content += self.tr("node_cpu_usage_max") + ": " + prepare_number_for_representation( data_dict["node_cpu_usage_max"]) \ + " " + self.tr("node_cpu_usage_max_unit") + " <br>" content += self.tr("node_ramusage_mean") + ": " + prepare_number_for_representation( data_dict["node_ramusage_mean"]) \ + " " + self.tr("node_ramusage_mean_unit") + " <br>" content += self.tr("node_ramusage_stddev") + ": " + prepare_number_for_representation( data_dict["node_ramusage_stddev"]) \ + " " + self.tr("node_ramusage_stddev_unit") + " <br>" content += self.tr("node_ramusage_max") + ": " + prepare_number_for_representation( data_dict["node_ramusage_max"]) \ + " " + self.tr("node_ramusage_max_unit") + " <br>" for i in range(0, len(data_dict["node_cpu_usage_core_mean"])): content += self.tr("core" + str(i + 1)) + "<br>" content += self.tr("node_cpu_usage_core_mean") + ": " + prepare_number_for_representation( data_dict["node_cpu_usage_core_mean"][i]) \ + " " + self.tr("node_cpu_usage_core_mean_unit") + " <br>" content += self.tr("node_cpu_usage_core_stddev") + ": " + prepare_number_for_representation( data_dict["node_cpu_usage_core_stddev"][i]) \ + " " + self.tr("node_cpu_usage_core_stddev_unit") + " <br>" content += self.tr("node_cpu_usage_core_max") + ": " + prepare_number_for_representation( data_dict["node_cpu_usage_core_max"][i]) \ + " " + self.tr("node_cpu_usage_core_max_unit") + " <br>" for i in range(0, len(data_dict["node_gpu_usage_mean"])): content += self.tr("node_gpu_usage_mean") + ": " + prepare_number_for_representation( data_dict["node_gpu_usage_mean"][i]) \ + " " + self.tr("node_gpu_usage_mean_unit") + " <br>" content += self.tr("node_gpu_usage_stddev") + ": " + prepare_number_for_representation( data_dict["node_gpu_usage_stddev"][i]) \ + " " + self.tr("node_gpu_usage_stddev_unit") + " <br>" content += self.tr("node_gpu_usage_max") + ": " + prepare_number_for_representation( data_dict["node_gpu_usage_max"][i]) \ + " " + self.tr("node_gpu_usage_max_unit") + " <br>" content += self.tr("node_message_frequency_mean") + ": " + prepare_number_for_representation( data_dict["node_message_frequency_mean"]) \ + " " + self.tr("node_message_frequency_mean_unit") + " <br>" content += self.tr("node_message_frequency_stddev") + ": " + prepare_number_for_representation( data_dict["node_message_frequency_stddev"]) \ + " " + self.tr("node_message_frequency_stddev_unit") + " <br>" content += self.tr("node_message_frequency_max") + ": " + prepare_number_for_representation( data_dict["node_message_frequency_max"]) \ + " " + self.tr("node_message_frequency_max_unit") + " <br>" content += self.tr("node_bandwidth_mean") + ": " + prepare_number_for_representation( data_dict["node_bandwidth_mean"]) \ + " " + self.tr("node_bandwidth_mean_unit") + " <br>" content += self.tr("node_bandwidth_stddev") + ": " + prepare_number_for_representation( data_dict["node_bandwidth_stddev"]) \ + " " + self.tr("node_bandwidth_stddev_unit") + " <br>" content += self.tr("node_bandwidth_max") + ": " + prepare_number_for_representation( data_dict["node_bandwidth_max"]) \ + " " + self.tr("node_bandwidth_max_unit") + " <br>" content += self.tr("node_write_mean") + ": " + prepare_number_for_representation(data_dict["node_write_mean"]) \ + " " + self.tr("node_write_mean_unit") + " <br>" content += self.tr("node_write_stddev") + ": " + prepare_number_for_representation( data_dict["node_write_stddev"]) \ + " " + self.tr("node_write_stddev_unit") + " <br>" content += self.tr("node_write_max") + ": " + prepare_number_for_representation(data_dict["node_write_max"]) \ + " " + self.tr("node_write_max_unit") + " <br>" content += self.tr("node_read_mean") + ": " + prepare_number_for_representation(data_dict["node_read_mean"]) \ + " " + self.tr("node_read_mean_unit") + " <br>" content += self.tr("node_read_stddev") + ": " + prepare_number_for_representation(data_dict["node_read_stddev"]) \ + " " + self.tr("node_read_stddev_unit") + " <br>" content += self.tr("node_read_max") + ": " + prepare_number_for_representation(data_dict["node_read_max"]) \ + " " + self.tr("node_read_max_unit") + " <br>" content += "</p>" return content def get_plotable_items(self): """ Returns items for the plot. :returns: str[] """ return ["node_cpu_usage_mean", "node_cpu_usage_stddev", "node_cpu_usage_max", "node_cpu_usage_core_mean", "node_cpu_usage_core_stddev", "node_cpu_usage_core_max", "node_gpu_usage_mean", "node_gpu_usage_stddev", "node_gpu_usage_max", "node_ramusage_mean", "node_ramusage_stddev", "node_ramusage_max", "node_message_frequency_mean", "node_message_frequency_stddev", "node_message_frequency_max", "node_bandwidth_mean", "node_bandwidth_stddev", "node_bandwidth_max", "node_write_mean", "node_write_stddev", "node_write_max", "node_read_mean", "node_read_stddev", "node_read_max"] def get_short_data(self): """ Returns a shortend version of the item data. :returns: data of the item :rtype: str """ data_dict = self.get_latest_data() if data_dict["window_stop"] == Time(0): return "No data yet" elif (Time.now() - data_dict["window_stop"]) > Duration(MAXIMUM_OFFLINE_TIME): # last entry was more than MAXIMUM_OFFLINE_TIME ago, it could be offline! return "No data since " + str(round((Time.now() - data_dict["window_stop"]).to_sec(), ROUND_DIGITS)) \ + " seconds" content = "" if data_dict["state"] is "error": content += self.get_erroneous_entries_for_log() else: content += self.tr("node_cpu_usage_mean") + ": " + prepare_number_for_representation( data_dict["node_cpu_usage_mean"]) + " " + self.tr("node_cpu_usage_mean_unit") + " - " content += self.tr("node_ramusage_mean") + ": " + prepare_number_for_representation( data_dict["node_ramusage_mean"]) \ + " " + self.tr("node_ramusage_mean_unit") + " - " content += self.tr("node_message_frequency_mean") + ": " + prepare_number_for_representation( data_dict["node_message_frequency_mean"]) \ + " " + self.tr("node_message_frequency_mean_unit") + " - " content += self.tr("node_bandwidth_mean") + ": " + prepare_number_for_representation( data_dict["node_bandwidth_mean"]) \ + " " + self.tr("node_bandwidth_mean_unit") return content def can_execute_actions(self): """ This item can execute actions, so it returns True :return: True """ return True def get_list_items(self): return ["node_cpu_usage_core_mean", "node_cpu_usage_core_stddev", "node_cpu_usage_core_max", "node_gpu_usage_mean", "node_gpu_usage_stddev", "node_gpu_usage_max"] def get_time_items(self): return []
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Provide coverage QC for assembled sequences: 1. plot paired-end reads as curves 2. plot base coverage and mate coverage 3. plot gaps in the sequence (if any) """ import os.path as op import sys import logging from collections import defaultdict from jcvi.formats.base import BaseFile, must_open from jcvi.formats.fasta import gaps from jcvi.formats.sizes import Sizes from jcvi.formats.posmap import query, bed from jcvi.formats.bed import BedLine from jcvi.apps.base import OptionParser, ActionDispatcher, sh, need_update class Coverage (BaseFile): """ Three-column .coverage file, often generated by `genomeCoverageBed -d` contigID baseID coverage """ def __init__(self, bedfile, sizesfile): from jcvi.formats.bed import sort sortedbedfile = bedfile.rsplit(".", 1)[0] + ".sorted.bed" if need_update(bedfile, sortedbedfile): sort([bedfile]) bedfile = sortedbedfile coveragefile = bedfile + ".coverage" if need_update(bedfile, coveragefile): cmd = "genomeCoverageBed" cmd += " -bg -i {0} -g {1}".format(bedfile, sizesfile) sh(cmd, outfile=coveragefile) self.sizes = Sizes(sizesfile).mapping filename = coveragefile assert filename.endswith(".coverage") super(Coverage, self).__init__(filename) def get_plot_data(self, ctg, bins=None): import numpy as np from jcvi.algorithms.matrix import chunk_average fp = open(self.filename) size = self.sizes[ctg] data = np.zeros((size, ), dtype=np.int) for row in fp: seqid, start, end, cov = row.split() if seqid != ctg: continue start, end = int(start), int(end) cov = int(cov) data[start:end] = cov bases = np.arange(1, size + 1) if bins: window = size / bins bases = bases[::window] data = chunk_average(data, window) return bases, data def main(): actions = ( ('posmap', 'QC based on indexed posmap file'), ) p = ActionDispatcher(actions) p.dispatch(globals()) def clone_name(s, ca=False): """ >>> clone_name("120038881639") "0038881639" >>> clone_name("GW11W6RK01DAJDWa") "GW11W6RK01DAJDW" """ if not ca: return s[:-1] if s[0] == '1': return s[2:] return s.rstrip('ab') def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False): """ This converts the bedfile to bedpefile, assuming the reads are from CA. """ fp = must_open(bedfile) fw = must_open(bedpefile, "w") if pairsbedfile: fwpairs = must_open(pairsbedfile, "w") clones = defaultdict(list) for row in fp: b = BedLine(row) name = b.accn clonename = clone_name(name, ca=ca) clones[clonename].append(b) if matesfile: fp = open(matesfile) libraryline = fp.next() # 'library bes 37896 126916' lib, name, smin, smax = libraryline.split() assert lib == "library" smin, smax = int(smin), int(smax) logging.debug("Happy mates for lib {0} fall between {1} - {2}".\ format(name, smin, smax)) nbedpe = 0 nspan = 0 for clonename, blines in clones.items(): if len(blines) == 2: a, b = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = b.seqid, b.start, b.end print >> fw, "\t".join(str(x) for x in (aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename)) nbedpe += 1 else: a, = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = 0, 0, 0 if pairsbedfile: start = min(astart, bstart) if bstart > 0 else astart end = max(aend, bend) if bend > 0 else aend if aseqid != bseqid: continue span = end - start + 1 if (not matesfile) or (smin <= span <= smax): print >> fwpairs, "\t".join(str(x) for x in \ (aseqid, start - 1, end, clonename)) nspan += 1 fw.close() logging.debug("A total of {0} bedpe written to `{1}`.".\ format(nbedpe, bedpefile)) if pairsbedfile: fwpairs.close() logging.debug("A total of {0} spans written to `{1}`.".\ format(nspan, pairsbedfile)) def posmap(args): """ %prog posmap frgscf.sorted scf.fasta scfID Perform QC on the selected scfID, generate multiple BED files for plotting. """ p = OptionParser(posmap.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(p.print_help()) frgscffile, fastafile, scf = args # fasta cmd = "faOneRecord {0} {1}".format(fastafile, scf) scffastafile = scf + ".fasta" if not op.exists(scffastafile): sh(cmd, outfile=scffastafile) # sizes sizesfile = scffastafile + ".sizes" sizes = Sizes(scffastafile).mapping scfsize = sizes[scf] logging.debug("`{0}` has length of {1}.".format(scf, scfsize)) # gaps.bed gapsbedfile = scf + ".gaps.bed" if not op.exists(gapsbedfile): args = [scffastafile, "--bed", "--mingap=100"] gaps(args) # reads frgscf posmap posmapfile = scf + ".posmap" if not op.exists(posmapfile): args = [frgscffile, scf] query(args) # reads bed bedfile = scf + ".bed" if not op.exists(bedfile): args = [posmapfile] bed(args) # reads bedpe bedpefile = scf + ".bedpe" pairsbedfile = scf + ".pairs.bed" if not (op.exists(bedpefile) and op.exists(pairsbedfile)): bed_to_bedpe(bedfile, bedpefile, pairsbedfile=pairsbedfile, ca=True) # base coverage Coverage(bedfile, sizesfile) Coverage(pairsbedfile, sizesfile) if __name__ == '__main__': main()
import collections import logging import os import subprocess import sys from pathlib import Path from subprocess import check_output, CalledProcessError import numpy as np import pandas as pd from django.db.models import Q from django.db import close_old_connections from emgapi import models as emg_models from emgapianns.management.lib import utils from emgapianns.management.lib.utils import DownloadFileDatabaseHandler class StudySummaryGenerator(object): def __init__(self, accession, pipeline, rootpath, nfs_public_rootpath, database): self.study_accession = accession self.pipeline = pipeline self.rootpath = rootpath self.nfs_public_rootpath = nfs_public_rootpath self.emg_db_name = database self.study = emg_models.Study.objects.using(self.emg_db_name).get(secondary_accession=self.study_accession) self.study_result_dir = os.path.join(self.rootpath, self.study.result_directory) self.summary_dir = None self.MAPSEQ_COLUMN_MAPPER = {'SSU': 'SILVA', 'LSU': 'SILVA', 'unite': 'UNITE', 'itsonedb': 'ITSone'} def run(self): if not os.path.exists(self.study_result_dir): sys.exit( "Study result directory for {} does not exist:\n{}".format(self.study_accession, self.study_result_dir)) jobs = emg_models.AnalysisJob.objects.using(self.emg_db_name) jobs = jobs.filter( Q(study__secondary_accession=self.study_accession) & Q(analysis_status__analysis_status='completed') & Q(pipeline__release_version=self.pipeline)) experiment_types = set() analysis_jobs = {} for job in jobs: job_result_directory = os.path.join(self.rootpath, job.result_directory) job_input_file_name = job.input_file_name analysis_jobs[job_input_file_name] = job_result_directory experiment_types.add(job.experiment_type.experiment_type) self.summary_dir = os.path.join(self.study_result_dir, 'version_{}/project-summary'.format(self.pipeline)) self.create_summary_dir() for rna_types in self.MAPSEQ_COLUMN_MAPPER.keys(): self.generate_taxonomy_phylum_summary(analysis_jobs, self.pipeline, '{}'.format(rna_types), 'phylum_taxonomy_abundances_{}_v{}.tsv'.format(rna_types, self.pipeline)) self.generate_taxonomy_summary(analysis_jobs, '{}'.format(rna_types), 'taxonomy_abundances_{}_v{}.tsv'.format(rna_types, self.pipeline)) if len(experiment_types) == 1 and 'amplicon' in experiment_types: logging.info("AMPLICON datasets only! Skipping the generation of the functional matrix files!") else: self.generate_ipr_summary(analysis_jobs, 'IPR_abundances_v{}.tsv'.format(self.pipeline), self.pipeline) self.generate_go_summary(analysis_jobs, 'slim', self.pipeline) self.generate_go_summary(analysis_jobs, 'full', self.pipeline) self.sync_study_summary_files() logging.info("Program finished successfully.") def sync_study_summary_files(self): logging.info("Syncing project summary files over to NFS public...") _study_result_dir = self.study.result_directory nfs_prod_dest = os.path.join(self.rootpath, _study_result_dir, 'version_{}/{}'.format(self.pipeline, 'project-summary')) nfs_public_dest = os.path.join(self.nfs_public_rootpath, _study_result_dir, 'version_{}/'.format(self.pipeline)) logging.info("From: " + nfs_prod_dest) logging.info("To: " + nfs_public_dest) rsync_options = ['-rtDzv'] more_rsync_options = ['--no-owner', '--no-perms', '--prune-empty-dirs', '--exclude', '*.lsf', '--delete-excluded', '--chmod=Do-w,Fu+x,Fg+x,Fo+r'] rsync_cmd = ["sudo", "-H", "-u", "emg_adm", "rsync"] + rsync_options + more_rsync_options + [nfs_prod_dest, nfs_public_dest] logging.info(rsync_cmd) subprocess.check_call(rsync_cmd) logging.info("Synchronisation is done.") @staticmethod def _get_group_type(rna_type): group = None if rna_type in ['SSU', 'LSU']: group = 'Taxonomic analysis {} rRNA'.format(rna_type) elif rna_type == 'unite': group = 'Taxonomic analysis UNITE' elif rna_type == 'itsonedb': group = 'Taxonomic analysis ITSoneDB' else: logging.warning("RNA type {} not supported!".format(rna_type)) return group @staticmethod def _get_phylum_file_description(rna_type): desc = None if rna_type in ['SSU', 'LSU', 'unite']: desc = 'Phylum level taxonomies {}'.format(rna_type.upper()) elif rna_type == 'itsonedb': desc = 'Phylum level taxonomies ITSoneDB' else: logging.warning("RNA type {} not supported!".format(rna_type)) return desc @staticmethod def _get_abundance_file_description(rna_type): desc = None if rna_type in ['SSU', 'LSU', 'unite']: desc = 'Taxonomic assignments {}'.format(rna_type.upper()) elif rna_type == 'itsonedb': desc = 'Taxonomic assignments ITSoneDB' else: logging.warning("RNA type {} not supported!".format(rna_type)) return desc def generate_taxonomy_phylum_summary(self, analysis_jobs, version, rna_type, filename): study_df = None if version == '4.1': study_df = self.generate_taxonomy_phylum_summary_v4(analysis_jobs, rna_type) elif version == '5.0': study_df = self.generate_taxonomy_phylum_summary_v5(analysis_jobs, rna_type) else: logging.warning("Pipeline version {} not supported yet!".format(version)) pass if not study_df.empty: self.write_results_file(study_df, filename) alias = '{}_phylum_taxonomy_abundances_{}_v{}.tsv'.format(self.study_accession, rna_type, self.pipeline) description = self._get_phylum_file_description(rna_type) group = self._get_group_type(rna_type) self.upload_study_file(filename, alias, description, group) def generate_taxonomy_phylum_summary_v4(self, analysis_result_dirs, su_type): res_files = self.get_kingdom_counts_files(analysis_result_dirs, su_type) study_df = self.merge_dfs(res_files, delimiter='\t', key=['kingdom', 'phylum'], raw_cols=['kingdom', 'phylum', 'count', 'ignored']) return study_df def generate_taxonomy_phylum_summary_v5(self, analysis_jobs, rna_type): job_data_frames = dict() # Iterate over each run for acc, result_directory in analysis_jobs.items(): # Define results files and for each result file perform necessary operations if rna_type in ['unite', 'itsonedb']: sequence_file = self.__get_rna_fasta_file(result_directory, 'ITS_masked.fasta.gz') else: # for SILVA: LSU and SSU sequence_file = self.__get_rna_fasta_file(result_directory, '{}.fasta.gz'.format(rna_type)) if not sequence_file: continue num_rna_seqs = self.__count_number_of_seqs(sequence_file) # mapseq_result_file = self.__get_mapseq_result_file(acc, result_directory, rna_type, '.fasta.mseq.gz') if not mapseq_result_file: continue phylum_count_data = self.__parse_phylum_counts_v5(mapseq_result_file, num_rna_seqs, rna_type) job_df = self.__build_dataframe(phylum_count_data) job_data_frames[acc] = job_df study_df = self.merge_dfs_v5(job_data_frames, key=['superkingdom', 'kingdom', 'phylum']) return study_df def generate_taxonomy_summary(self, analysis_result_dirs, rna_type, filename): res_files = self.get_mapseq_result_files(analysis_result_dirs, rna_type, '.fasta.mseq.tsv') raw_cols = ['OTU', 'count', 'lineage'] if self.pipeline in ['5.0']: raw_cols = ['OTU', 'count', 'lineage', 'taxid'] study_df = self.merge_dfs(res_files, key=['lineage'], delimiter='\t', raw_cols=raw_cols, skip_rows=2) study_df = study_df.rename(columns={'lineage': '#SampleID'}) if len(study_df.index) > 0: self.write_results_file(study_df, filename) alias = '{}_taxonomy_abundances_{}_v{}.tsv'.format(self.study_accession, rna_type, self.pipeline) description = self._get_abundance_file_description(rna_type) group = self._get_group_type(rna_type) self.upload_study_file(filename, alias, description, group) def get_raw_result_files(self, res_file_re): paths = list(Path(self.study_result_dir).glob(res_file_re)) return [str(p.resolve()) for p in paths] def merge_dfs_v5(self, dataframes, key): study_df = pd.DataFrame(columns=key) for accession, df in dataframes.items(): df = df.filter(key + ['count']) df = df.rename(columns={'count': accession}) study_df = study_df.merge(df, on=key, how='outer') study_df = study_df.sort_values(by=key) study_df = self.clean_summary_df(study_df) return study_df def merge_dfs(self, filelist, delimiter, key, raw_cols, skip_rows=0): study_df = pd.DataFrame(columns=key) for f in sorted(filelist): accession = utils.get_accession_from_result_dir_path(f) df = self.read_count_tsv(f, delimiter, raw_cols, skip_rows) df = df.filter(key + ['count']) df = df.rename(columns={'count': accession}) study_df = study_df.merge(df, on=key, how='outer') study_df = study_df.sort_values(by=key) study_df = self.clean_summary_df(study_df) return study_df def __parse_phylum_counts_v5(self, mapseq_file, num_rna_seqs, rna_type, delimiter='\t', compression='gzip', header=1): """ Get phylum counts for v5 results. Implementation of the following linux command using pandas dataframe and collections: zcat SRR6028649_MERGED_FASTQ_SSU.fasta.mseq.gz | grep -v "^#" | cut -f 14- | cut -d ";" -f 1-3 | sed 's/\t$//' | sed 's/;p__$//' | sed 's/;k__$//' | sort | uniq -c :return: """ unassigned = 'Unassigned' # column header keywords: UNITE, ITSone, SILVA column_name = self.MAPSEQ_COLUMN_MAPPER.get(rna_type) # Fixme: Replace pandas dataframe read csv function by pure python and introduce buffered reading df = pd.read_csv(mapseq_file, compression=compression, header=header, sep=delimiter) filtered_df = df.dropna(subset=[column_name]) taxonomies = list() for i, row in filtered_df.iterrows(): value = filtered_df.at[i, column_name] index = value.find(";c__") if index > 0: value = value[0:index] value = self.normalize_taxa_hierarchy(value) taxonomies.append(value) counter = 1 data = dict() for phylum, count in collections.Counter(taxonomies).items(): new_columns = phylum.split(';') while len(new_columns) < 3: new_columns.append(unassigned) new_columns.append(count) data[counter] = new_columns counter += 1 num_assigned_seqs = filtered_df[column_name].count() num_unassigned_seqs = num_rna_seqs - num_assigned_seqs if num_unassigned_seqs > 0: data[counter] = [unassigned, unassigned, unassigned, num_unassigned_seqs] return data def generate_go_summary_v4(self, analysis_result_dirs, mode): res_files = self.get_go_v4_result_files(analysis_result_dirs, mode) study_df = self.merge_dfs(res_files, delimiter=',', key=['GO', 'description', 'category'], raw_cols=['GO', 'description', 'category', 'count']) study_df['description'] = study_df['description'].str.replace(',', '@') study_df['category'] = study_df['category'].str.replace('_', ' ') return study_df def generate_go_summary_v5(self, analysis_result_dirs, mode): res_files = self.get_go_v5_result_files(analysis_result_dirs, mode) study_df = self.merge_dfs(res_files, delimiter=',', key=['GO', 'description', 'category'], raw_cols=['GO', 'description', 'category', 'count']) study_df['description'] = study_df['description'].str.replace(',', '@') study_df['category'] = study_df['category'].str.replace('_', ' ') return study_df def generate_ips_summary_v4(self, analysis_result_dirs): res_files = self.get_ipr_v4_result_files(analysis_result_dirs) return self.merge_dfs(res_files, delimiter=',', key=['IPR', 'description'], raw_cols=['IPR', 'description', 'count']) def generate_ips_summary_v5(self, analysis_result_dirs): res_files = self.get_ipr_v5_result_files(analysis_result_dirs) return self.merge_dfs(res_files, delimiter=',', key=['IPR', 'description'], raw_cols=['count', 'IPR', 'description']) def generate_ipr_summary(self, analysis_jobs, filename, version): study_df = None if version == '4.1': study_df = self.generate_ips_summary_v4(analysis_jobs) elif version == '5.0': study_df = self.generate_ips_summary_v5(analysis_jobs) else: logging.warning("Pipeline version {} not supported yet!".format(version)) if not study_df.empty: self.write_results_file(study_df, filename) alias = '{}_IPR_abundances_v{}.tsv'.format(self.study_accession, self.pipeline) description = 'InterPro matches' self.upload_study_file(filename, alias, description, 'Functional analysis') def generate_go_summary(self, analysis_jobs, mode, version): if mode == 'slim': sum_file = 'GO-slim' description = 'GO slim annotation' else: sum_file = 'GO' description = 'Complete GO annotation' study_df = None if version == '4.1': study_df = self.generate_go_summary_v4(analysis_jobs, mode) elif version == '5.0': study_df = self.generate_go_summary_v5(analysis_jobs, mode) else: logging.warning("Pipeline version {} not supported yet!".format(version)) if not study_df.empty: realname = sum_file + '_abundances_v{}.tsv'.format(version) self.write_results_file(study_df, realname) self.generate_filtered_go_summary(study_df, 'category == "cellular component"', 'CC_{}_abundances_v{}.tsv'.format(sum_file, version)) self.generate_filtered_go_summary(study_df, 'category == "biological process"', 'BP_{}_abundances_v{}.tsv'.format(sum_file, version)) self.generate_filtered_go_summary(study_df, 'category not in ["biological process", "cellular component"]', 'MF_{}_abundances_v{}.tsv'.format(sum_file, version)) alias = '{}_{}_abundances_v{}.tsv'.format(self.study_accession, sum_file, version) self.upload_study_file(realname, alias, description, 'Functional analysis') return study_df def generate_filtered_go_summary(self, df, query, filename): df = df.query(query) self.write_results_file(df, filename) @staticmethod def read_count_tsv(filename, delimiter, cols, skip_rows=0): df = pd.read_csv(filename, delimiter=delimiter, names=cols, skiprows=skip_rows) df = df.astype({'count': 'int'}) return df def write_results_file(self, df, filename): filepath = os.path.join(self.summary_dir, filename) df.to_csv(filepath, sep='\t', header=True, index=False) @staticmethod def clean_summary_df(df): df = df.fillna(0) int_cols = df.select_dtypes(include=[np.number]).columns.tolist() df = df.astype({col: 'int' for col in int_cols}) return df def create_summary_dir(self): try: os.makedirs(self.summary_dir, exist_ok=True) except PermissionError: version_dir = os.path.join(self.study_result_dir, 'version_{}'.format(self.pipeline)) logging.warning("Permission issue encountered on folder: {}".format(version_dir)) os.chmod(version_dir, 0o755) os.makedirs(self.summary_dir, exist_ok=True) def upload_study_file(self, realname, alias, description, group): """Store the study file in the DB """ # Close any obsolete connections to the db for studies with > 100 analysis # this is required as django doesn't close/re-open the connection # and it will results in django.db.utils.OperationalError: (2006, 'MySQL server has gone away') # CONN_MAX_AGE doesn't work for shell commands close_old_connections() file_config = { 'alias': alias, 'compression': False, 'description_label': description, 'format_name': 'TSV', 'group_type': group, 'real_name': realname, 'subdir': 'version_{}/project-summary'.format(self.pipeline), '_required': True } _study_rootpath = self.study_result_dir.replace('version_{}'.format(self.pipeline), '') f = utils.StudyDownload(_study_rootpath, file_config, self.pipeline) DownloadFileDatabaseHandler(self.emg_db_name).save_study_download_file(f, self.study) @staticmethod def __get_mapseq_result_file(input_file_name, result_directory, su_type, mapseq_file_extension): sub_dir = '' if su_type in ['unite', 'itsonedb']: sub_dir = 'its' res_file_re = os.path.join(result_directory, 'taxonomy-summary', sub_dir, su_type, '{}_{}{}'.format(input_file_name, su_type, mapseq_file_extension)) if os.path.exists(res_file_re): return res_file_re else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) @staticmethod def __get_rna_fasta_file(result_directory, file_name): res_file_re = os.path.join(result_directory, 'sequence-categorisation', file_name) if os.path.exists(res_file_re): return res_file_re else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) @staticmethod def get_mapseq_result_files(analysis_result_dirs, su_type, mapseq_file_extension): result = [] for input_file_name, dir in analysis_result_dirs.items(): sub_dir = '' if su_type in ['unite', 'itsonedb']: sub_dir = 'its' res_file_re = os.path.join(dir, 'taxonomy-summary', sub_dir, su_type, '{}_{}{}'.format(input_file_name, su_type, mapseq_file_extension)) if os.path.exists(res_file_re): result.append(res_file_re) else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) return result @staticmethod def get_kingdom_counts_files(analysis_result_dirs, su_type): result = [] for input_file_name, dir in analysis_result_dirs.items(): res_file_re = os.path.join(dir, 'taxonomy-summary', su_type, 'kingdom-counts.txt') if os.path.exists(res_file_re): result.append(res_file_re) else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) return result @staticmethod def get_ipr_v4_result_files(analysis_result_dirs): result = [] for input_file_name, dir in analysis_result_dirs.items(): res_file_re = os.path.join(dir, '{}_summary.ipr'.format(input_file_name)) if os.path.exists(res_file_re): result.append(res_file_re) else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) return result @staticmethod def get_ipr_v5_result_files(analysis_result_dirs): result = [] for input_file_name, dir in analysis_result_dirs.items(): res_file_re = os.path.join(dir, 'functional-annotation', '{}.summary.ips'.format(input_file_name)) if os.path.exists(res_file_re): result.append(res_file_re) else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) return result @staticmethod def get_go_v4_result_files(analysis_result_dirs, mode): result = [] for input_file_name, dir in analysis_result_dirs.items(): file_name = '{}_summary.go' if mode == 'full' else '{}_summary.go_slim' res_file_re = os.path.join(dir, file_name.format(input_file_name)) if os.path.exists(res_file_re): result.append(res_file_re) else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) return result @staticmethod def get_go_v5_result_files(analysis_result_dirs, mode): result = [] for input_file_name, dir in analysis_result_dirs.items(): file_name = '{}.summary.go' if mode == 'full' else '{}.summary.go_slim' res_file_re = os.path.join(dir, 'functional-annotation', file_name.format(input_file_name)) if os.path.exists(res_file_re): result.append(res_file_re) else: logging.warning("Result file does not exist:\n{}".format(res_file_re)) return result @staticmethod def __count_number_of_seqs(filepath): """ Counts number of sequences in compressed fasta file. :return: """ try: count = check_output("zcat {} | grep -c '>'".format(filepath), shell=True).rstrip() return int(count) except CalledProcessError: return 0 @staticmethod def __build_dataframe(data): df = pd.DataFrame.from_dict(data, orient='index', columns=['superkingdom', 'kingdom', 'phylum', 'count']) return df @staticmethod def normalize_taxa_hierarchy(taxa_str): unassigned = 'Unassigned' if taxa_str.endswith('k__'): taxa_str = taxa_str.replace(';k__', ';{}'.format(unassigned)) elif taxa_str.endswith('p__'): taxa_str = taxa_str.replace(';p__', ';{}'.format(unassigned)) result = (lambda x: x.replace('k__;', '{};'.format(unassigned)))(taxa_str) result = (lambda y: y.replace('sk__', '').replace('k__', '').replace('p__', ''))(result) # while result.count(';') < 2: result = '{};{}'.format(result, unassigned) return result
# -*- coding: utf-8 -*- # Copyright 2014, 2015 Metaswitch Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ felix.test.test_futils ~~~~~~~~~~~ Test Felix utils. """ import logging import mock import unittest2 import calico.felix.futils as futils # Logger log = logging.getLogger(__name__) UNIQUE_SHORTEN_TESTS = [ # Tries to return the input string if it can. ("foo", 10, "foo"), ("foobarbaz1", 10, "foobarbaz1"), # Too long, truncated hash ("foobarbaz12", 10, '_d71c1ff3e'), ("foobarbaz12", 9, '_94df2800'), # Different input, different hash ("foobarbaz123", 10, '_438f419f9'), # This is OK, it starts with the prefix but it's the wrong length so it # can't clash with our output: ("_foobar", 10, "_foobar"), # But this is not since it's the same length as our output and starts with # a _. ("_foobar", 7, "_9f4764"), ("_78c38617f", 10, '_f13be85cf'), ] class TestFutils(unittest2.TestCase): def setUp(self): pass def tearDown(self): pass def test_good_check_call(self): with mock.patch("calico.felix.futils._call_semaphore", wraps=futils._call_semaphore) as m_sem: # Test a command. Result must include "calico" given where it is # run from. args = ["ls"] result = futils.check_call(args) self.assertNotEqual(result.stdout, None) self.assertNotEqual(result.stderr, None) self.assertTrue("calico" in result.stdout) self.assertEqual(result.stderr, "") self.assertTrue(m_sem.__enter__.called) def test_bad_check_call(self): # Test an invalid command - must parse but not return anything. args = ["ls", "wibble_wobble"] try: futils.check_call(args) self.assertTrue(False) except futils.FailedSystemCall as e: self.assertNotEqual(e.retcode, 0) self.assertEqual(list(e.args), args) self.assertNotEqual(e.stdout, None) self.assertNotEqual(e.stderr, None) self.assertTrue("wibble_wobble" in str(e)) def test_good_call_silent(self): # Test a command. Result must include "calico" given where it is run from. args = ["ls"] retcode = futils.call_silent(args) self.assertEqual(retcode, 0) def test_bad_call_silent(self): # Test an invalid command - must parse but not return anything. args = ["ls", "wibble_wobble"] retcode = futils.call_silent(args) self.assertNotEqual(retcode, 0) def stub_store_calls(self, args): log.debug("Args are : %s", args) self.assertEqual(args[0], "bash") with open(args[1], 'r') as f: self.data = f.read() def test_multi_call(self): # Test multiple command calls; this just stores the command values. ops = [ ["ls"], ["ls", "calico"] ] expected = "set -e\n" for op in ops: cmd = " ".join(op) + "\n" expected += "echo Executing : " + cmd + cmd with mock.patch('calico.felix.futils.check_call', side_effect=self.stub_store_calls): result = futils.multi_call(ops) self.assertEqual(expected, self.data) def test_uniquely_shorten(self): for inp, length, exp in UNIQUE_SHORTEN_TESTS: output = futils.uniquely_shorten(inp, length) self.assertTrue(len(output) <= length) self.assertEqual(exp, output, "Input %r truncated to length %s " "should have given output " "%r but got %r" % (inp, length, exp, output)) def test_safe_truncate(self): self.assert_safe_truncate("foobarbazb", 10, "foobarbazb") # Yes, this gets longer, which is silly. However, there's no point # making the code complicated to handle this case that should never be # hit. self.assert_safe_truncate("foobarbazb", 9, "fooba...<snip>...bazb") self.assert_safe_truncate(None, 9, None) self.assert_safe_truncate(1234, 9, "1234") def assert_safe_truncate(self, s, length, expected): result = futils.safe_truncate(s, length) self.assertEqual(result, expected, "Expected %r to be truncated as %r but got %r" % (s, expected, result)) class TestStats(unittest2.TestCase): def setUp(self): futils._registered_diags = [] self.sc = futils.StatCounter("foo") def tearDown(self): futils._registered_diags = [] def test_stats_counter(self): self.assertTrue(("foo", self.sc._dump) in futils._registered_diags) self.sc.increment("bar") self.sc.increment("baz") self.assertEqual(self.sc.stats["bar"], 1) self.sc.increment("bar") self.assertEqual(self.sc.stats["bar"], 2) self.sc.increment("baz", by=2) self.assertEqual(self.sc.stats["baz"], 3) m_log = mock.Mock(spec=logging.Logger) self.sc._dump(m_log) self.assertEqual(m_log.info.mock_calls, [ mock.call("%s: %s", "bar", 2), mock.call("%s: %s", "baz", 3), ]) def test_dump_diags(self): with mock.patch("calico.felix.futils.stat_log") as m_log: self.sc.increment("bar") futils.dump_diags() self.assertEqual(m_log.info.mock_calls, [ mock.call("=== DIAGNOSTICS ==="), mock.call("--- %s ---", "foo"), mock.call("%s: %s", "bar", 1), mock.call("=== END OF DIAGNOSTICS ==="), ]) def test_dump_diags_process(self): process_results = [ ('Execution time in user mode (seconds)', 'ru_utime', 1), ('Execution time in kernel mode (seconds)', 'ru_stime', 2), ('Maximum Resident Set Size (KB)', 'ru_maxrss', 3), ('Soft page faults', 'ru_minflt', 4), ('Hard page faults', 'ru_majflt', 5), ('Input events', 'ru_inblock', 6), ('Output events', 'ru_oublock', 7), ('Voluntary context switches', 'ru_nvcsw', 8), ('Involuntary context switches', 'ru_nivcsw', 9), ] with mock.patch('calico.felix.futils.stat_log') as m_log: with mock.patch('calico.felix.futils.resource') as m_resource: res = m_resource.getrusage.return_value for _, field, val in process_results: setattr(res, field, val) calls = [ mock.call.info('=== DIAGNOSTICS ==='), mock.call.info("--- %s ---", "foo"), mock.call.info('--- %s ---', 'Process Statistics'), mock.call.info('=== END OF DIAGNOSTICS ==='), ] calls.extend( mock.call.info('%s: %s', name, value) for name, _, value in process_results ) futils.register_process_statistics() futils.dump_diags() m_log.assert_has_calls(calls, any_order=True) def test_dump_diags_cover(self): with mock.patch("calico.felix.futils.stat_log") as m_log: m_log.info.side_effect = Exception() m_log.exception.side_effect = Exception() futils.dump_diags()
#$Id$ class InvoiceSetting: """This class is used to create object for invoice settings.""" def __init__(self): """Initialize parameters for Invoice settings.""" self.auto_generate = None self.prefix_string = '' self.start_at = 0 self.next_number = '' self.quantity_precision = 0 self.discount_type = '' self.is_discount_before_tax = None self.reference_text = '' self.default_template_id = '' self.notes = '' self.terms = '' self.is_shipping_charge_required = None self.is_adjustment_required = None self.is_open_invoice_editable = None self.warn_convert_to_open = None self.warn_create_creditnotes = None self.attach_expense_receipt_to_invoice = '' self.invoice_item_type = '' self.is_sales_person_required = None self.is_show_invoice_setup = None self.discount_enabled = None def set_discount_enabled(self, discount_enabled): """Set discount enabled. Args: discount_enabled(str): Discount enabled. """ self.discount_enabled = discount_enabled def get_discount_enabled(self): """Get discount enabled. Returns: str: Discount enabled. """ return self.discount_enabled def set_default_template_id(self, default_template_id): """Set default template id. Args: default_template_id(str): Default template id. """ self.default_template_id = default_template_id def get_default_template_id(self): """Get default template id. Returns: str: Default template id. """ return self.default_template_id def set_auto_generate(self, auto_generate): """Set auto generate. Args: auto_generate(bool): Auto generate. """ self.auto_generate = auto_generate def get_auto_generate(self): """Get auto generate. Returns: bool: Auto generate. """ return self.auto_generate def set_prefix_string(self, prefix_string): """Set prefix string. Args: prefix_string(str): Prefix string. """ self.prefix_string = prefix_string def get_prefix_string(self): """Get prefix string. Returns: str: Prefix string. """ return self.prefix_string def set_start_at(self, start_at): """Set start at. Args: start_at(int): Start at. """ self.start_at = start_at def get_start_at(self): """Get start at. Returns: int: Start at. """ return self.start_at def set_next_number(self, next_number): """Set next number. Args: next_number(str): Next number. """ self.next_number = next_number def get_next_number(self): """Get next number. Returns: str: Next number. """ return self.next_number def set_quantity_precision(self, quantity_precision): """Set quantity precision. Args: quantity_precision(int): Quantity precision. """ self.quantity_precision = quantity_precision def get_quantity_precision(self): """Get quantity precision. Returns: int: Quantity precision. """ return self.quantity_precision def set_discount_type(self, discount_type): """Set discount type. Args: discount_type(str): Discount type. """ self.discount_type = discount_type def get_discount_type(self): """Get discount type. Returns: str: Discount type. """ return self.discount_type def set_is_discount_before_tax(self, is_discount_before_tax): """Set whether it is discount before tax. Args: is_discount_before_tax(bool): True to discount before tax. """ self.is_discount_before_tax = is_discount_before_tax def get_is_discount_before_tax(self): """Get whether to discount before tax. Returns: bool: True to discount before tax else false. """ return self.is_discount_before_tax def set_reference_text(self, reference_text): """Set reference text. Args: reference_text(str): Reference text. """ self.reference_text = reference_text def get_reference_text(self): """Get reference text. Returns: str: Reference text. """ return self.reference_text def set_notes(self, notes): """Set notes. Args: notes(str): Notes. """ self.notes = notes def get_notes(self): """Get notes. Returns: str: Notes. """ return self.notes def set_terms(self, terms): """Set terms. Args: terms(str): Terms. """ self.terms = terms def get_terms(self): """Get terms. Returns: str: Terms. """ return self.terms def set_is_shipping_charge_required(self, is_shipping_charge_required): """Set whether shipping charge is required or not. Args: is_shipping_charge_required(bool): True if shipping charge is required else false. """ self.is_shipping_charge_required = is_shipping_charge_required def get_is_shipping_charge_required(self): """Get whether shipping charge is required or not. Returns: bool: True if shipping charge is required or not. """ return self.is_shipping_charge_required def set_is_adjustment_required(self, is_adjustment_required): """Set whether adjustment is required. Args: is_adjustment_required(bool): True if adjustment is required else false. """ self.is_adjustment_required = is_adjustment_required def get_is_adjustment_required(self): """Get whether adjustment is required. Returns: bool: True if adjustment is required. """ return self.is_adjustment_required def set_is_open_invoice_editable(self, is_open_invoice_editable): """Set whether open invoice editable. Args: is_open_invoice_editable(bool): True if open invoice editable else false. """ self.is_open_invoice_editable = is_open_invoice_editable def get_is_open_invoice_editable(self): """Get whether open invoice editable. Returns: bool: True if open invoice editable else false. """ return self.is_open_invoice_editable def set_warn_convert_to_open(self, warn_convert_to_open): """Set whether to enable warning while converting to open. Args: warn_convert_to_open(bool): True to warn while converting to open else false. """ self.warn_convert_to_open = warn_convert_to_open def get_warn_convert_to_open(self): """Get whether to enable warning while converting to open. Returns: bool: True if warning while converting to open is enabled else false. """ return self.warn_convert_to_open def set_warn_create_creditnotes(self, warn_create_creditnotes): """Set whether to enable warning while creating creditnotes. Args: warn_create_creditnotes(bool): True to warn while creating creditnotes else false. """ self.warn_create_creditnotes = warn_create_creditnotes def get_warn_create_creditnotes(self): """Get whether warning while creating creditnotes is enabled or not. Returns: bool: True to warn while creating creditnotes else false. """ return self.warn_create_creditnotes def set_attach_expense_receipt_to_invoice(self, \ attach_expense_receipt_to_invoice): """Set attach expense receipt to invoice. Args: attach_expense_receipt_to_invoice(str): Attach expense receipt to invoice. """ self.attach_expense_receipt_to_invoice = \ attach_expense_receipt_to_invoice def get_attach_expense_receipt_to_invoice(self): """Get attach expense receipt to invoice. Returns: str: Attach expense receipt to invoice. """ return self.attach_expense_receipt_to_invoice def set_is_open_invoice_editable(self, is_open_invoice_editable): """Set whether to open invoice editable. Args: is_open_invoice_editable(bool): True to open invoice editable else false. """ self.is_open_invoice_editable = is_open_invoice_editable def get_is_open_invoice_editable(self): """Get whether to open invoice editable. Returns: bool: True to open invoice editable else false. """ return self.is_open_invoice_editable def set_is_sales_person_required(self, is_sales_person_required): """Set whether sales person is required or not. Args: is_sales_person_required(bool): True if sales person is required else false. """ self.is_sales_person_required = is_sales_person_required def get_is_sales_person_required(self): """Get whether sales person is required or not. Returns: bool: True if sales person is required else false. """ return self.is_sales_person_required def set_is_show_invoice_setup(self, is_show_invoice_setup): """Set whether to show invoice setup. Args: is_show_invoice_setup(bool): True to show invoice setup. """ self.is_show_invoice_setup = is_show_invoice_setup def get_is_show_invoice_setup(self): """Get whether to show invoice setup. Returns: bool: True to show invoice setup. """ return self.is_show_invoice_setup def set_invoice_item_type(self, invoice_item_type): """Set invoice item type. Args: invoice_item_type(str): Invoice item type. """ self.invoice_item_type = invoice_item_type def get_invoice_item_type(self): """Get invoice item type. Returns: str: Invoice item type. """ return self.invoice_item_type def to_json(self): """This method is used to convert invoice settings in json format. Returns: dict: Dictionary containing json object for invoice settings. """ data = {} if self.auto_generate is not None: data['auto_generate'] = self.auto_generate if self.prefix_string != None: data['prefix_string'] = self.prefix_string if self.start_at > 0: data['start_at'] = self.start_at if self.next_number != '': data['next_number'] = self.next_number if self.quantity_precision > 0: data['quantity_precision'] = self.quantity_precision if self.discount_enabled is not None: data['discount_enabled'] = self.discount_enabled if self.reference_text != '': data['reference_text'] = self.reference_text if self.default_template_id != '': data['default_template_id'] = self.default_template_id if self.notes != '': data['notes'] = self.notes if self.terms != '': data['terms'] = self.terms if self.is_shipping_charge_required is not None: data['is_shipping_charge_required'] = \ self.is_shipping_charge_required if self.is_adjustment_required is not None: data['is_adjustment_required'] = \ self.is_adjustment_required if self.invoice_item_type != '': data['invoice_item_type'] = self.invoice_item_type if self.discount_type != '': data['discount_type'] = self.discount_type if self.warn_convert_to_open is not None: data['warn_convert_to_open'] = self.warn_convert_to_open if self.warn_create_creditnotes is not None: data['warn_create_creditnotes'] = self.warn_create_creditnotes if self.is_open_invoice_editable is not None: data['is_open_invoice_editable'] = self.is_open_invoice_editable if self.is_sales_person_required is not None: data['is_sales_person_required'] = self.is_sales_person_required return data
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors # # This module is part of GitDB and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Contains PackIndexFile and PackFile implementations""" from git.exc import ( BadObject, UnsupportedOperation, ParseError ) from util import ( zlib, mman, LazyMixin, unpack_from, bin_to_hex, ) from fun import ( create_pack_object_header, pack_object_header_info, is_equal_canonical_sha, type_id_to_type_map, write_object, stream_copy, chunk_size, delta_types, OFS_DELTA, REF_DELTA, msb_size ) try: from _perf import PackIndexFile_sha_to_index except ImportError: pass # END try c module from base import ( # Amazing ! OInfo, OStream, OPackInfo, OPackStream, ODeltaStream, ODeltaPackInfo, ODeltaPackStream, ) from stream import ( DecompressMemMapReader, DeltaApplyReader, Sha1Writer, NullStream, FlexibleSha1Writer ) from struct import ( pack, unpack, ) from binascii import crc32 from itertools import izip import tempfile import array import os import sys __all__ = ('PackIndexFile', 'PackFile', 'PackEntity') #{ Utilities def pack_object_at(cursor, offset, as_stream): """ :return: Tuple(abs_data_offset, PackInfo|PackStream) an object of the correct type according to the type_id of the object. If as_stream is True, the object will contain a stream, allowing the data to be read decompressed. :param data: random accessable data containing all required information :parma offset: offset in to the data at which the object information is located :param as_stream: if True, a stream object will be returned that can read the data, otherwise you receive an info object only""" data = cursor.use_region(offset).buffer() type_id, uncomp_size, data_rela_offset = pack_object_header_info(data) total_rela_offset = None # set later, actual offset until data stream begins delta_info = None # OFFSET DELTA if type_id == OFS_DELTA: i = data_rela_offset c = ord(data[i]) i += 1 delta_offset = c & 0x7f while c & 0x80: c = ord(data[i]) i += 1 delta_offset += 1 delta_offset = (delta_offset << 7) + (c & 0x7f) # END character loop delta_info = delta_offset total_rela_offset = i # REF DELTA elif type_id == REF_DELTA: total_rela_offset = data_rela_offset+20 delta_info = data[data_rela_offset:total_rela_offset] # BASE OBJECT else: # assume its a base object total_rela_offset = data_rela_offset # END handle type id abs_data_offset = offset + total_rela_offset if as_stream: stream = DecompressMemMapReader(buffer(data, total_rela_offset), False, uncomp_size) if delta_info is None: return abs_data_offset, OPackStream(offset, type_id, uncomp_size, stream) else: return abs_data_offset, ODeltaPackStream(offset, type_id, uncomp_size, delta_info, stream) else: if delta_info is None: return abs_data_offset, OPackInfo(offset, type_id, uncomp_size) else: return abs_data_offset, ODeltaPackInfo(offset, type_id, uncomp_size, delta_info) # END handle info # END handle stream def write_stream_to_pack(read, write, zstream, base_crc=None): """Copy a stream as read from read function, zip it, and write the result. Count the number of written bytes and return it :param base_crc: if not None, the crc will be the base for all compressed data we consecutively write and generate a crc32 from. If None, no crc will be generated :return: tuple(no bytes read, no bytes written, crc32) crc might be 0 if base_crc was false""" br = 0 # bytes read bw = 0 # bytes written want_crc = base_crc is not None crc = 0 if want_crc: crc = base_crc #END initialize crc while True: chunk = read(chunk_size) br += len(chunk) compressed = zstream.compress(chunk) bw += len(compressed) write(compressed) # cannot assume return value if want_crc: crc = crc32(compressed, crc) #END handle crc if len(chunk) != chunk_size: break #END copy loop compressed = zstream.flush() bw += len(compressed) write(compressed) if want_crc: crc = crc32(compressed, crc) #END handle crc return (br, bw, crc) #} END utilities class IndexWriter(object): """Utility to cache index information, allowing to write all information later in one go to the given stream :note: currently only writes v2 indices""" __slots__ = '_objs' def __init__(self): self._objs = list() def append(self, binsha, crc, offset): """Append one piece of object information""" self._objs.append((binsha, crc, offset)) def write(self, pack_sha, write): """Write the index file using the given write method :param pack_sha: binary sha over the whole pack that we index :return: sha1 binary sha over all index file contents""" # sort for sha1 hash self._objs.sort(key=lambda o: o[0]) sha_writer = FlexibleSha1Writer(write) sha_write = sha_writer.write sha_write(PackIndexFile.index_v2_signature) sha_write(pack(">L", PackIndexFile.index_version_default)) # fanout tmplist = list((0,)*256) # fanout or list with 64 bit offsets for t in self._objs: tmplist[ord(t[0][0])] += 1 #END prepare fanout for i in xrange(255): v = tmplist[i] sha_write(pack('>L', v)) tmplist[i+1] += v #END write each fanout entry sha_write(pack('>L', tmplist[255])) # sha1 ordered # save calls, that is push them into c sha_write(''.join(t[0] for t in self._objs)) # crc32 for t in self._objs: sha_write(pack('>L', t[1]&0xffffffff)) #END for each crc tmplist = list() # offset 32 for t in self._objs: ofs = t[2] if ofs > 0x7fffffff: tmplist.append(ofs) ofs = 0x80000000 + len(tmplist)-1 #END hande 64 bit offsets sha_write(pack('>L', ofs&0xffffffff)) #END for each offset # offset 64 for ofs in tmplist: sha_write(pack(">Q", ofs)) #END for each offset # trailer assert(len(pack_sha) == 20) sha_write(pack_sha) sha = sha_writer.sha(as_hex=False) write(sha) return sha class PackIndexFile(LazyMixin): """A pack index provides offsets into the corresponding pack, allowing to find locations for offsets faster.""" # Dont use slots as we dynamically bind functions for each version, need a dict for this # The slots you see here are just to keep track of our instance variables # __slots__ = ('_indexpath', '_fanout_table', '_cursor', '_version', # '_sha_list_offset', '_crc_list_offset', '_pack_offset', '_pack_64_offset') # used in v2 indices _sha_list_offset = 8 + 1024 index_v2_signature = '\377tOc' index_version_default = 2 def __init__(self, indexpath): super(PackIndexFile, self).__init__() self._indexpath = indexpath def _set_cache_(self, attr): if attr == "_packfile_checksum": self._packfile_checksum = self._cursor.map()[-40:-20] elif attr == "_packfile_checksum": self._packfile_checksum = self._cursor.map()[-20:] elif attr == "_cursor": # Note: We don't lock the file when reading as we cannot be sure # that we can actually write to the location - it could be a read-only # alternate for instance self._cursor = mman.make_cursor(self._indexpath).use_region() # We will assume that the index will always fully fit into memory ! if mman.window_size() > 0 and self._cursor.file_size() > mman.window_size(): raise AssertionError("The index file at %s is too large to fit into a mapped window (%i > %i). This is a limitation of the implementation" % (self._indexpath, self._cursor.file_size(), mman.window_size())) #END assert window size else: # now its time to initialize everything - if we are here, someone wants # to access the fanout table or related properties # CHECK VERSION mmap = self._cursor.map() self._version = (mmap[:4] == self.index_v2_signature and 2) or 1 if self._version == 2: version_id = unpack_from(">L", mmap, 4)[0] assert version_id == self._version, "Unsupported index version: %i" % version_id # END assert version # SETUP FUNCTIONS # setup our functions according to the actual version for fname in ('entry', 'offset', 'sha', 'crc'): setattr(self, fname, getattr(self, "_%s_v%i" % (fname, self._version))) # END for each function to initialize # INITIALIZE DATA # byte offset is 8 if version is 2, 0 otherwise self._initialize() # END handle attributes #{ Access V1 def _entry_v1(self, i): """:return: tuple(offset, binsha, 0)""" return unpack_from(">L20s", self._cursor.map(), 1024 + i*24) + (0, ) def _offset_v1(self, i): """see ``_offset_v2``""" return unpack_from(">L", self._cursor.map(), 1024 + i*24)[0] def _sha_v1(self, i): """see ``_sha_v2``""" base = 1024 + (i*24)+4 return self._cursor.map()[base:base+20] def _crc_v1(self, i): """unsupported""" return 0 #} END access V1 #{ Access V2 def _entry_v2(self, i): """:return: tuple(offset, binsha, crc)""" return (self._offset_v2(i), self._sha_v2(i), self._crc_v2(i)) def _offset_v2(self, i): """:return: 32 or 64 byte offset into pack files. 64 byte offsets will only be returned if the pack is larger than 4 GiB, or 2^32""" offset = unpack_from(">L", self._cursor.map(), self._pack_offset + i * 4)[0] # if the high-bit is set, this indicates that we have to lookup the offset # in the 64 bit region of the file. The current offset ( lower 31 bits ) # are the index into it if offset & 0x80000000: offset = unpack_from(">Q", self._cursor.map(), self._pack_64_offset + (offset & ~0x80000000) * 8)[0] # END handle 64 bit offset return offset def _sha_v2(self, i): """:return: sha at the given index of this file index instance""" base = self._sha_list_offset + i * 20 return self._cursor.map()[base:base+20] def _crc_v2(self, i): """:return: 4 bytes crc for the object at index i""" return unpack_from(">L", self._cursor.map(), self._crc_list_offset + i * 4)[0] #} END access V2 #{ Initialization def _initialize(self): """initialize base data""" self._fanout_table = self._read_fanout((self._version == 2) * 8) if self._version == 2: self._crc_list_offset = self._sha_list_offset + self.size() * 20 self._pack_offset = self._crc_list_offset + self.size() * 4 self._pack_64_offset = self._pack_offset + self.size() * 4 # END setup base def _read_fanout(self, byte_offset): """Generate a fanout table from our data""" d = self._cursor.map() out = list() append = out.append for i in range(256): append(unpack_from('>L', d, byte_offset + i*4)[0]) # END for each entry return out #} END initialization #{ Properties def version(self): return self._version def size(self): """:return: amount of objects referred to by this index""" return self._fanout_table[255] def path(self): """:return: path to the packindexfile""" return self._indexpath def packfile_checksum(self): """:return: 20 byte sha representing the sha1 hash of the pack file""" return self._cursor.map()[-40:-20] def indexfile_checksum(self): """:return: 20 byte sha representing the sha1 hash of this index file""" return self._cursor.map()[-20:] def offsets(self): """:return: sequence of all offsets in the order in which they were written :note: return value can be random accessed, but may be immmutable""" if self._version == 2: # read stream to array, convert to tuple a = array.array('I') # 4 byte unsigned int, long are 8 byte on 64 bit it appears a.fromstring(buffer(self._cursor.map(), self._pack_offset, self._pack_64_offset - self._pack_offset)) # networkbyteorder to something array likes more if sys.byteorder == 'little': a.byteswap() return a else: return tuple(self.offset(index) for index in xrange(self.size())) # END handle version def sha_to_index(self, sha): """ :return: index usable with the ``offset`` or ``entry`` method, or None if the sha was not found in this pack index :param sha: 20 byte sha to lookup""" first_byte = ord(sha[0]) get_sha = self.sha lo = 0 # lower index, the left bound of the bisection if first_byte != 0: lo = self._fanout_table[first_byte-1] hi = self._fanout_table[first_byte] # the upper, right bound of the bisection # bisect until we have the sha while lo < hi: mid = (lo + hi) / 2 c = cmp(sha, get_sha(mid)) if c < 0: hi = mid elif not c: return mid else: lo = mid + 1 # END handle midpoint # END bisect return None def partial_sha_to_index(self, partial_bin_sha, canonical_length): """ :return: index as in `sha_to_index` or None if the sha was not found in this index file :param partial_bin_sha: an at least two bytes of a partial binary sha :param canonical_length: lenght of the original hexadecimal representation of the given partial binary sha :raise AmbiguousObjectName:""" if len(partial_bin_sha) < 2: raise ValueError("Require at least 2 bytes of partial sha") first_byte = ord(partial_bin_sha[0]) get_sha = self.sha lo = 0 # lower index, the left bound of the bisection if first_byte != 0: lo = self._fanout_table[first_byte-1] hi = self._fanout_table[first_byte] # the upper, right bound of the bisection # fill the partial to full 20 bytes filled_sha = partial_bin_sha + '\0'*(20 - len(partial_bin_sha)) # find lowest while lo < hi: mid = (lo + hi) / 2 c = cmp(filled_sha, get_sha(mid)) if c < 0: hi = mid elif not c: # perfect match lo = mid break else: lo = mid + 1 # END handle midpoint # END bisect if lo < self.size(): cur_sha = get_sha(lo) if is_equal_canonical_sha(canonical_length, partial_bin_sha, cur_sha): next_sha = None if lo+1 < self.size(): next_sha = get_sha(lo+1) if next_sha and next_sha == cur_sha: raise AmbiguousObjectName(partial_bin_sha) return lo # END if we have a match # END if we found something return None if 'PackIndexFile_sha_to_index' in globals(): # NOTE: Its just about 25% faster, the major bottleneck might be the attr # accesses def sha_to_index(self, sha): return PackIndexFile_sha_to_index(self, sha) # END redefine heavy-hitter with c version #} END properties class PackFile(LazyMixin): """A pack is a file written according to the Version 2 for git packs As we currently use memory maps, it could be assumed that the maximum size of packs therefor is 32 bit on 32 bit systems. On 64 bit systems, this should be fine though. :note: at some point, this might be implemented using streams as well, or streams are an alternate path in the case memory maps cannot be created for some reason - one clearly doesn't want to read 10GB at once in that case""" __slots__ = ('_packpath', '_cursor', '_size', '_version') pack_signature = 0x5041434b # 'PACK' pack_version_default = 2 # offset into our data at which the first object starts first_object_offset = 3*4 # header bytes footer_size = 20 # final sha def __init__(self, packpath): self._packpath = packpath def _set_cache_(self, attr): # we fill the whole cache, whichever attribute gets queried first self._cursor = mman.make_cursor(self._packpath).use_region() # read the header information type_id, self._version, self._size = unpack_from(">LLL", self._cursor.map(), 0) # TODO: figure out whether we should better keep the lock, or maybe # add a .keep file instead ? if type_id != self.pack_signature: raise ParseError("Invalid pack signature: %i" % type_id) def _iter_objects(self, start_offset, as_stream=True): """Handle the actual iteration of objects within this pack""" c = self._cursor content_size = c.file_size() - self.footer_size cur_offset = start_offset or self.first_object_offset null = NullStream() while cur_offset < content_size: data_offset, ostream = pack_object_at(c, cur_offset, True) # scrub the stream to the end - this decompresses the object, but yields # the amount of compressed bytes we need to get to the next offset stream_copy(ostream.read, null.write, ostream.size, chunk_size) cur_offset += (data_offset - ostream.pack_offset) + ostream.stream.compressed_bytes_read() # if a stream is requested, reset it beforehand # Otherwise return the Stream object directly, its derived from the # info object if as_stream: ostream.stream.seek(0) yield ostream # END until we have read everything #{ Pack Information def size(self): """:return: The amount of objects stored in this pack""" return self._size def version(self): """:return: the version of this pack""" return self._version def data(self): """ :return: read-only data of this pack. It provides random access and usually is a memory map. :note: This method is unsafe as it returns a window into a file which might be larger than than the actual window size""" # can use map as we are starting at offset 0. Otherwise we would have to use buffer() return self._cursor.use_region().map() def checksum(self): """:return: 20 byte sha1 hash on all object sha's contained in this file""" return self._cursor.use_region(self._cursor.file_size()-20).buffer()[:] def path(self): """:return: path to the packfile""" return self._packpath #} END pack information #{ Pack Specific def collect_streams(self, offset): """ :return: list of pack streams which are required to build the object at the given offset. The first entry of the list is the object at offset, the last one is either a full object, or a REF_Delta stream. The latter type needs its reference object to be locked up in an ODB to form a valid delta chain. If the object at offset is no delta, the size of the list is 1. :param offset: specifies the first byte of the object within this pack""" out = list() c = self._cursor while True: ostream = pack_object_at(c, offset, True)[1] out.append(ostream) if ostream.type_id == OFS_DELTA: offset = ostream.pack_offset - ostream.delta_info else: # the only thing we can lookup are OFFSET deltas. Everything # else is either an object, or a ref delta, in the latter # case someone else has to find it break # END handle type # END while chaining streams return out #} END pack specific #{ Read-Database like Interface def info(self, offset): """Retrieve information about the object at the given file-absolute offset :param offset: byte offset :return: OPackInfo instance, the actual type differs depending on the type_id attribute""" return pack_object_at(self._cursor, offset or self.first_object_offset, False)[1] def stream(self, offset): """Retrieve an object at the given file-relative offset as stream along with its information :param offset: byte offset :return: OPackStream instance, the actual type differs depending on the type_id attribute""" return pack_object_at(self._cursor, offset or self.first_object_offset, True)[1] def stream_iter(self, start_offset=0): """ :return: iterator yielding OPackStream compatible instances, allowing to access the data in the pack directly. :param start_offset: offset to the first object to iterate. If 0, iteration starts at the very first object in the pack. :note: Iterating a pack directly is costly as the datastream has to be decompressed to determine the bounds between the objects""" return self._iter_objects(start_offset, as_stream=True) #} END Read-Database like Interface class PackEntity(LazyMixin): """Combines the PackIndexFile and the PackFile into one, allowing the actual objects to be resolved and iterated""" __slots__ = ( '_index', # our index file '_pack', # our pack file '_offset_map' # on demand dict mapping one offset to the next consecutive one ) IndexFileCls = PackIndexFile PackFileCls = PackFile def __init__(self, pack_or_index_path): """Initialize ourselves with the path to the respective pack or index file""" basename, ext = os.path.splitext(pack_or_index_path) self._index = self.IndexFileCls("%s.idx" % basename) # PackIndexFile instance self._pack = self.PackFileCls("%s.pack" % basename) # corresponding PackFile instance def _set_cache_(self, attr): # currently this can only be _offset_map # TODO: make this a simple sorted offset array which can be bisected # to find the respective entry, from which we can take a +1 easily # This might be slower, but should also be much lighter in memory ! offsets_sorted = sorted(self._index.offsets()) last_offset = len(self._pack.data()) - self._pack.footer_size assert offsets_sorted, "Cannot handle empty indices" offset_map = None if len(offsets_sorted) == 1: offset_map = { offsets_sorted[0] : last_offset } else: iter_offsets = iter(offsets_sorted) iter_offsets_plus_one = iter(offsets_sorted) iter_offsets_plus_one.next() consecutive = izip(iter_offsets, iter_offsets_plus_one) offset_map = dict(consecutive) # the last offset is not yet set offset_map[offsets_sorted[-1]] = last_offset # END handle offset amount self._offset_map = offset_map def _sha_to_index(self, sha): """:return: index for the given sha, or raise""" index = self._index.sha_to_index(sha) if index is None: raise BadObject(sha) return index def _iter_objects(self, as_stream): """Iterate over all objects in our index and yield their OInfo or OStream instences""" _sha = self._index.sha _object = self._object for index in xrange(self._index.size()): yield _object(_sha(index), as_stream, index) # END for each index def _object(self, sha, as_stream, index=-1): """:return: OInfo or OStream object providing information about the given sha :param index: if not -1, its assumed to be the sha's index in the IndexFile""" # its a little bit redundant here, but it needs to be efficient if index < 0: index = self._sha_to_index(sha) if sha is None: sha = self._index.sha(index) # END assure sha is present ( in output ) offset = self._index.offset(index) type_id, uncomp_size, data_rela_offset = pack_object_header_info(self._pack._cursor.use_region(offset).buffer()) if as_stream: if type_id not in delta_types: packstream = self._pack.stream(offset) return OStream(sha, packstream.type, packstream.size, packstream.stream) # END handle non-deltas # produce a delta stream containing all info # To prevent it from applying the deltas when querying the size, # we extract it from the delta stream ourselves streams = self.collect_streams_at_offset(offset) dstream = DeltaApplyReader.new(streams) return ODeltaStream(sha, dstream.type, None, dstream) else: if type_id not in delta_types: return OInfo(sha, type_id_to_type_map[type_id], uncomp_size) # END handle non-deltas # deltas are a little tougher - unpack the first bytes to obtain # the actual target size, as opposed to the size of the delta data streams = self.collect_streams_at_offset(offset) buf = streams[0].read(512) offset, src_size = msb_size(buf) offset, target_size = msb_size(buf, offset) # collect the streams to obtain the actual object type if streams[-1].type_id in delta_types: raise BadObject(sha, "Could not resolve delta object") return OInfo(sha, streams[-1].type, target_size) # END handle stream #{ Read-Database like Interface def info(self, sha): """Retrieve information about the object identified by the given sha :param sha: 20 byte sha1 :raise BadObject: :return: OInfo instance, with 20 byte sha""" return self._object(sha, False) def stream(self, sha): """Retrieve an object stream along with its information as identified by the given sha :param sha: 20 byte sha1 :raise BadObject: :return: OStream instance, with 20 byte sha""" return self._object(sha, True) def info_at_index(self, index): """As ``info``, but uses a PackIndexFile compatible index to refer to the object""" return self._object(None, False, index) def stream_at_index(self, index): """As ``stream``, but uses a PackIndexFile compatible index to refer to the object""" return self._object(None, True, index) #} END Read-Database like Interface #{ Interface def pack(self): """:return: the underlying pack file instance""" return self._pack def index(self): """:return: the underlying pack index file instance""" return self._index def is_valid_stream(self, sha, use_crc=False): """ Verify that the stream at the given sha is valid. :param use_crc: if True, the index' crc is run over the compressed stream of the object, which is much faster than checking the sha1. It is also more prone to unnoticed corruption or manipulation. :param sha: 20 byte sha1 of the object whose stream to verify whether the compressed stream of the object is valid. If it is a delta, this only verifies that the delta's data is valid, not the data of the actual undeltified object, as it depends on more than just this stream. If False, the object will be decompressed and the sha generated. It must match the given sha :return: True if the stream is valid :raise UnsupportedOperation: If the index is version 1 only :raise BadObject: sha was not found""" if use_crc: if self._index.version() < 2: raise UnsupportedOperation("Version 1 indices do not contain crc's, verify by sha instead") # END handle index version index = self._sha_to_index(sha) offset = self._index.offset(index) next_offset = self._offset_map[offset] crc_value = self._index.crc(index) # create the current crc value, on the compressed object data # Read it in chunks, without copying the data crc_update = zlib.crc32 pack_data = self._pack.data() cur_pos = offset this_crc_value = 0 while cur_pos < next_offset: rbound = min(cur_pos + chunk_size, next_offset) size = rbound - cur_pos this_crc_value = crc_update(buffer(pack_data, cur_pos, size), this_crc_value) cur_pos += size # END window size loop # crc returns signed 32 bit numbers, the AND op forces it into unsigned # mode ... wow, sneaky, from dulwich. return (this_crc_value & 0xffffffff) == crc_value else: shawriter = Sha1Writer() stream = self._object(sha, as_stream=True) # write a loose object, which is the basis for the sha write_object(stream.type, stream.size, stream.read, shawriter.write) assert shawriter.sha(as_hex=False) == sha return shawriter.sha(as_hex=False) == sha # END handle crc/sha verification return True def info_iter(self): """ :return: Iterator over all objects in this pack. The iterator yields OInfo instances""" return self._iter_objects(as_stream=False) def stream_iter(self): """ :return: iterator over all objects in this pack. The iterator yields OStream instances""" return self._iter_objects(as_stream=True) def collect_streams_at_offset(self, offset): """ As the version in the PackFile, but can resolve REF deltas within this pack For more info, see ``collect_streams`` :param offset: offset into the pack file at which the object can be found""" streams = self._pack.collect_streams(offset) # try to resolve the last one if needed. It is assumed to be either # a REF delta, or a base object, as OFFSET deltas are resolved by the pack if streams[-1].type_id == REF_DELTA: stream = streams[-1] while stream.type_id in delta_types: if stream.type_id == REF_DELTA: sindex = self._index.sha_to_index(stream.delta_info) if sindex is None: break stream = self._pack.stream(self._index.offset(sindex)) streams.append(stream) else: # must be another OFS DELTA - this could happen if a REF # delta we resolve previously points to an OFS delta. Who # would do that ;) ? We can handle it though stream = self._pack.stream(stream.delta_info) streams.append(stream) # END handle ref delta # END resolve ref streams # END resolve streams return streams def collect_streams(self, sha): """ As ``PackFile.collect_streams``, but takes a sha instead of an offset. Additionally, ref_delta streams will be resolved within this pack. If this is not possible, the stream will be left alone, hence it is adivsed to check for unresolved ref-deltas and resolve them before attempting to construct a delta stream. :param sha: 20 byte sha1 specifying the object whose related streams you want to collect :return: list of streams, first being the actual object delta, the last being a possibly unresolved base object. :raise BadObject:""" return self.collect_streams_at_offset(self._index.offset(self._sha_to_index(sha))) @classmethod def write_pack(cls, object_iter, pack_write, index_write=None, object_count = None, zlib_compression = zlib.Z_BEST_SPEED): """ Create a new pack by putting all objects obtained by the object_iterator into a pack which is written using the pack_write method. The respective index is produced as well if index_write is not Non. :param object_iter: iterator yielding odb output objects :param pack_write: function to receive strings to write into the pack stream :param indx_write: if not None, the function writes the index file corresponding to the pack. :param object_count: if you can provide the amount of objects in your iteration, this would be the place to put it. Otherwise we have to pre-iterate and store all items into a list to get the number, which uses more memory than necessary. :param zlib_compression: the zlib compression level to use :return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack and over all contents of the index. If index_write was None, index_binsha will be None :note: The destination of the write functions is up to the user. It could be a socket, or a file for instance :note: writes only undeltified objects""" objs = object_iter if not object_count: if not isinstance(object_iter, (tuple, list)): objs = list(object_iter) #END handle list type object_count = len(objs) #END handle object pack_writer = FlexibleSha1Writer(pack_write) pwrite = pack_writer.write ofs = 0 # current offset into the pack file index = None wants_index = index_write is not None # write header pwrite(pack('>LLL', PackFile.pack_signature, PackFile.pack_version_default, object_count)) ofs += 12 if wants_index: index = IndexWriter() #END handle index header actual_count = 0 for obj in objs: actual_count += 1 crc = 0 # object header hdr = create_pack_object_header(obj.type_id, obj.size) if index_write: crc = crc32(hdr) else: crc = None #END handle crc pwrite(hdr) # data stream zstream = zlib.compressobj(zlib_compression) ostream = obj.stream br, bw, crc = write_stream_to_pack(ostream.read, pwrite, zstream, base_crc = crc) assert(br == obj.size) if wants_index: index.append(obj.binsha, crc, ofs) #END handle index ofs += len(hdr) + bw if actual_count == object_count: break #END abort once we are done #END for each object if actual_count != object_count: raise ValueError("Expected to write %i objects into pack, but received only %i from iterators" % (object_count, actual_count)) #END count assertion # write footer pack_sha = pack_writer.sha(as_hex = False) assert len(pack_sha) == 20 pack_write(pack_sha) ofs += len(pack_sha) # just for completeness ;) index_sha = None if wants_index: index_sha = index.write(pack_sha, index_write) #END handle index return pack_sha, index_sha @classmethod def create(cls, object_iter, base_dir, object_count = None, zlib_compression = zlib.Z_BEST_SPEED): """Create a new on-disk entity comprised of a properly named pack file and a properly named and corresponding index file. The pack contains all OStream objects contained in object iter. :param base_dir: directory which is to contain the files :return: PackEntity instance initialized with the new pack :note: for more information on the other parameters see the write_pack method""" pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir) index_fd, index_path = tempfile.mkstemp('', 'index', base_dir) pack_write = lambda d: os.write(pack_fd, d) index_write = lambda d: os.write(index_fd, d) pack_binsha, index_binsha = cls.write_pack(object_iter, pack_write, index_write, object_count, zlib_compression) os.close(pack_fd) os.close(index_fd) fmt = "pack-%s.%s" new_pack_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'pack')) new_index_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'idx')) os.rename(pack_path, new_pack_path) os.rename(index_path, new_index_path) return cls(new_pack_path) #} END interface
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Nicira, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from lxml import etree import mock from oslo.config import cfg from nova.compute import flavors from nova import exception from nova.network import linux_net from nova.network import model as network_model from nova.openstack.common import processutils from nova import test from nova.tests.virt.libvirt import fakelibvirt from nova import utils from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import vif CONF = cfg.CONF class LibvirtVifTestCase(test.TestCase): gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway') dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None) ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)] subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24', dns=[dns_bridge_4], gateway=gateway_bridge_4, routes=None, dhcp_server='191.168.1.1') gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway') subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64', dns=None, gateway=gateway_bridge_6, ips=None, routes=None) network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz', bridge='br0', label=None, subnets=[subnet_bridge_4, subnet_bridge_6], bridge_interface='eth0', vlan=99) vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_bridge, type=network_model.VIF_TYPE_BRIDGE, devname='tap-xxx-yyy-zzz', ovs_interfaceid=None) network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz', bridge=None, label=None, subnets=[subnet_bridge_4, subnet_bridge_6], bridge_interface='eth0', vlan=99) vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_bridge_neutron, type=None, devname='tap-xxx-yyy-zzz', ovs_interfaceid='aaa-bbb-ccc') network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz', bridge='br0', label=None, subnets=[subnet_bridge_4, subnet_bridge_6], bridge_interface=None, vlan=99) network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz', bridge='br0', label=None, subnets=[subnet_bridge_4, subnet_bridge_6], bridge_interface=None, vlan=99) vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_ovs, type=network_model.VIF_TYPE_OVS, devname='tap-xxx-yyy-zzz', ovs_interfaceid='aaa-bbb-ccc') vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_ovs, type=None, devname=None, ovs_interfaceid=None) vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_ivs, type=network_model.VIF_TYPE_IVS, devname='tap-xxx-yyy-zzz', ovs_interfaceid='aaa-bbb-ccc') vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_ovs, type=None, devname=None, ovs_interfaceid='aaa') vif_none = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_bridge, type=None, devname='tap-xxx-yyy-zzz', ovs_interfaceid=None) network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz', bridge=None, label=None, subnets=[subnet_bridge_4, subnet_bridge_6], interface='eth0', vlan=99) vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_8021, type=network_model.VIF_TYPE_802_QBH, devname='tap-xxx-yyy-zzz', ovs_interfaceid=None, qbh_params=network_model.VIF8021QbhParams( profileid="xxx-yyy-zzz")) vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_8021, type=network_model.VIF_TYPE_802_QBG, devname='tap-xxx-yyy-zzz', ovs_interfaceid=None, qbg_params=network_model.VIF8021QbgParams( managerid="xxx-yyy-zzz", typeid="aaa-bbb-ccc", typeidversion="1", instanceid="ddd-eee-fff")) network_mlnx = network_model.Network(id='network-id-xxx-yyy-zzz', label=None, bridge=None, subnets=[subnet_bridge_4, subnet_bridge_6], interface='eth0') network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz', label=None, bridge=None, subnets=[subnet_bridge_4], interface='eth0') vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_mlnx, type=network_model.VIF_TYPE_MLNX_DIRECT, devname='tap-xxx-yyy-zzz') vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_midonet, type=network_model.VIF_TYPE_MIDONET, devname='tap-xxx-yyy-zzz') vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_bridge, type=network_model.VIF_TYPE_IOVISOR, devname='tap-xxx-yyy-zzz', ovs_interfaceid=None) instance = { 'name': 'instance-name', 'uuid': 'instance-uuid' } bandwidth = { 'quota:vif_inbound_peak': '200', 'quota:vif_outbound_peak': '20', 'quota:vif_inbound_average': '100', 'quota:vif_outbound_average': '10', 'quota:vif_inbound_burst': '300', 'quota:vif_outbound_burst': '30' } def setUp(self): super(LibvirtVifTestCase, self).setUp() self.flags(allow_same_net_traffic=True) self.executes = [] def fake_execute(*cmd, **kwargs): self.executes.append(cmd) return None, None self.stubs.Set(utils, 'execute', fake_execute) def _get_conn(self, uri="qemu:///session", ver=None): def __inner(): if ver is None: return fakelibvirt.Connection(uri, False) else: return fakelibvirt.Connection(uri, False, ver) return __inner def _get_node(self, xml): doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') self.assertEqual(len(ret), 1) return ret[0] def _assertMacEquals(self, node, vif): mac = node.find("mac").get("address") self.assertEqual(mac, vif['address']) def _assertTypeEquals(self, node, type, attr, source, br_want, prefix=None): self.assertEqual(node.get("type"), type) br_name = node.find(attr).get(source) if prefix is None: self.assertEqual(br_name, br_want) else: self.assertTrue(br_name.startswith(prefix)) def _assertTypeAndMacEquals(self, node, type, attr, source, vif, br_want=None, size=0, prefix=None): ret = node.findall("filterref") self.assertEqual(len(ret), size) self._assertTypeEquals(node, type, attr, source, br_want, prefix) self._assertMacEquals(node, vif) def _assertModel(self, xml, model_want=None, driver_want=None): node = self._get_node(xml) if model_want is None: ret = node.findall("model") self.assertEqual(len(ret), 0) else: model = node.find("model").get("type") self.assertEqual(model, model_want) if driver_want is None: ret = node.findall("driver") self.assertEqual(len(ret), 0) else: driver = node.find("driver").get("name") self.assertEqual(driver, driver_want) def _get_conf(self): conf = vconfig.LibvirtConfigGuest() conf.virt_type = "qemu" conf.name = "fake-name" conf.uuid = "fake-uuid" conf.memory = 100 * 1024 conf.vcpus = 4 return conf def _get_instance_xml(self, driver, vif, image_meta=None): default_inst_type = flavors.get_default_flavor() extra_specs = default_inst_type['extra_specs'].items() quota_bandwidth = self.bandwidth.items() default_inst_type['extra_specs'] = dict(extra_specs + quota_bandwidth) conf = self._get_conf() nic = driver.get_config(self.instance, vif, image_meta, default_inst_type) conf.add_device(nic) return conf.to_xml() def test_multiple_nics(self): conf = self._get_conf() # Tests multiple nic configuration and that target_dev is # set for each nics = [{'net_type': 'bridge', 'mac_addr': '00:00:00:00:00:0b', 'source_dev': 'b_source_dev', 'target_dev': 'b_target_dev'}, {'net_type': 'ethernet', 'mac_addr': '00:00:00:00:00:0e', 'source_dev': 'e_source_dev', 'target_dev': 'e_target_dev'}, {'net_type': 'direct', 'mac_addr': '00:00:00:00:00:0d', 'source_dev': 'd_source_dev', 'target_dev': 'd_target_dev'}] for nic in nics: nic_conf = vconfig.LibvirtConfigGuestInterface() nic_conf.net_type = nic['net_type'] nic_conf.target_dev = nic['target_dev'] nic_conf.mac_addr = nic['mac_addr'] nic_conf.source_dev = nic['source_dev'] conf.add_device(nic_conf) xml = conf.to_xml() doc = etree.fromstring(xml) for nic in nics: path = "./devices/interface/[@type='%s']" % nic['net_type'] node = doc.find(path) self.assertEqual(nic['net_type'], node.get("type")) self.assertEqual(nic['mac_addr'], node.find("mac").get("address")) self.assertEqual(nic['target_dev'], node.find("target").get("dev")) def test_model_novirtio(self): self.flags(use_virtio_for_bridges=False, virt_type='kvm', group='libvirt') d = vif.LibvirtGenericVIFDriver(self._get_conn()) xml = self._get_instance_xml(d, self.vif_bridge) self._assertModel(xml) def test_model_kvm(self): self.flags(use_virtio_for_bridges=True, virt_type='kvm', group='libvirt') d = vif.LibvirtGenericVIFDriver(self._get_conn()) xml = self._get_instance_xml(d, self.vif_bridge) self._assertModel(xml, "virtio") def test_model_kvm_custom(self): self.flags(use_virtio_for_bridges=True, virt_type='kvm', group='libvirt') d = vif.LibvirtGenericVIFDriver(self._get_conn()) image_meta = {'properties': {'hw_vif_model': 'e1000'}} xml = self._get_instance_xml(d, self.vif_bridge, image_meta) self._assertModel(xml, "e1000") def test_model_kvm_bogus(self): self.flags(use_virtio_for_bridges=True, virt_type='kvm', group='libvirt') d = vif.LibvirtGenericVIFDriver(self._get_conn()) image_meta = {'properties': {'hw_vif_model': 'acme'}} self.assertRaises(exception.UnsupportedHardware, self._get_instance_xml, d, self.vif_bridge, image_meta) def _test_model_qemu(self, *vif_objs, **kw): libvirt_version = kw.get('libvirt_version') self.flags(use_virtio_for_bridges=True, virt_type='qemu', group='libvirt') for vif_obj in vif_objs: d = vif.LibvirtGenericVIFDriver(self._get_conn()) if libvirt_version is not None: d.libvirt_version = libvirt_version xml = self._get_instance_xml(d, vif_obj) doc = etree.fromstring(xml) bandwidth = doc.find('./devices/interface/bandwidth') self.assertNotEqual(bandwidth, None) inbound = bandwidth.find('inbound') self.assertEqual(inbound.get("average"), self.bandwidth['quota:vif_inbound_average']) self.assertEqual(inbound.get("peak"), self.bandwidth['quota:vif_inbound_peak']) self.assertEqual(inbound.get("burst"), self.bandwidth['quota:vif_inbound_burst']) outbound = bandwidth.find('outbound') self.assertEqual(outbound.get("average"), self.bandwidth['quota:vif_outbound_average']) self.assertEqual(outbound.get("peak"), self.bandwidth['quota:vif_outbound_peak']) self.assertEqual(outbound.get("burst"), self.bandwidth['quota:vif_outbound_burst']) self._assertModel(xml, "virtio", "qemu") def test_model_qemu_no_firewall(self): self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") self._test_model_qemu( self.vif_bridge, self.vif_8021qbh, self.vif_8021qbg, self.vif_iovisor, self.vif_mlnx, ) self._test_model_qemu(self.vif_ovs, libvirt_version=vif.LIBVIRT_OVS_VPORT_VERSION) def test_model_qemu_iptables(self): self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver") self._test_model_qemu( self.vif_bridge, self.vif_ovs, self.vif_ivs, self.vif_8021qbh, self.vif_8021qbg, self.vif_iovisor, self.vif_mlnx, ) def test_model_xen(self): self.flags(use_virtio_for_bridges=True, virt_type='xen', group='libvirt') d = vif.LibvirtGenericVIFDriver(self._get_conn("xen:///system")) xml = self._get_instance_xml(d, self.vif_bridge) self._assertModel(xml) def test_generic_driver_none(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) self.assertRaises(exception.NovaException, self._get_instance_xml, d, self.vif_none) def _check_bridge_driver(self, d, vif, br_want): xml = self._get_instance_xml(d, vif) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "bridge", "source", "bridge", self.vif_bridge, br_want, 1) def test_generic_driver_bridge(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) self._check_bridge_driver(d, self.vif_bridge, self.vif_bridge['network']['bridge']) def _check_ivs_ethernet_driver(self, d, vif, dev_prefix): self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") xml = self._get_instance_xml(d, vif) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", self.vif_ivs, prefix=dev_prefix) script = node.find("script").get("path") self.assertEqual(script, "") def _check_ovs_ethernet_driver(self, d, vif, dev_prefix): self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") xml = self._get_instance_xml(d, vif) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", self.vif_ovs, prefix=dev_prefix) script = node.find("script").get("path") self.assertEqual(script, "") def test_ovs_ethernet_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) self._check_ovs_ethernet_driver(d, self.vif_ovs, "tap") def test_unplug_ivs_ethernet(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete: delete.side_effect = processutils.ProcessExecutionError d.unplug_ivs_ethernet(None, self.vif_ovs) def test_unplug_ovs_hybrid(self): calls = { 'device_exists': [mock.call('qbrvif-xxx-yyy'), mock.call('qvovif-xxx-yyy')], 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy', 'qvbvif-xxx-yyy', run_as_root=True), mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'down', run_as_root=True), mock.call('brctl', 'delbr', 'qbrvif-xxx-yyy', run_as_root=True), mock.call('ovs-vsctl', '--timeout=120', 'del-port', 'br0', 'qvovif-xxx-yyy', run_as_root=True), mock.call('ip', 'link', 'delete', 'qvovif-xxx-yyy', run_as_root=True, check_exit_code=[0, 2, 254])] } with contextlib.nested( mock.patch.object(linux_net, 'device_exists', return_value=True), mock.patch.object(utils, 'execute'), mock.patch.object(linux_net, 'delete_ivs_vif_port') ) as (device_exists, execute, delete_ivs_vif_port): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) d.unplug_ovs_hybrid(None, self.vif_ovs) device_exists.assert_has_calls(calls['device_exists']) execute.assert_has_calls(calls['execute']) delete_ivs_vif_port.assert_has_calls([]) def test_unplug_ovs_hybrid_bridge_does_not_exist(self): calls = { 'device_exists': [mock.call('qbrvif-xxx-yyy'), mock.call('qvovif-xxx-yyy')], 'execute': [mock.call('ovs-vsctl', '--timeout=120', 'del-port', 'br0', 'qvovif-xxx-yyy', run_as_root=True)] } with contextlib.nested( mock.patch.object(linux_net, 'device_exists', return_value=False), mock.patch.object(utils, 'execute'), mock.patch.object(linux_net, 'delete_ivs_vif_port') ) as (device_exists, execute, delete_ivs_vif_port): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) d.unplug_ovs_hybrid(None, self.vif_ovs) device_exists.assert_has_calls(calls['device_exists']) execute.assert_has_calls(calls['execute']) delete_ivs_vif_port.assert_has_calls([]) def test_unplug_ivs_hybrid(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) with mock.patch.object(utils, 'execute') as execute: execute.side_effect = processutils.ProcessExecutionError d.unplug_ivs_hybrid(None, self.vif_ivs) def test_unplug_iovisor(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) with mock.patch.object(utils, 'execute') as execute: execute.side_effect = processutils.ProcessExecutionError mynetwork = network_model.Network(id='network-id-xxx-yyy-zzz', label='mylabel') myvif = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=mynetwork) d.unplug_iovisor(None, myvif) @mock.patch('nova.network.linux_net.device_exists') def test_plug_iovisor(self, device_exists): device_exists.return_value = True d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) with mock.patch.object(utils, 'execute') as execute: execute.side_effect = processutils.ProcessExecutionError instance = { 'name': 'instance-name', 'uuid': 'instance-uuid', 'project_id': 'myproject' } d.plug_iovisor(instance, self.vif_ivs) def test_ivs_ethernet_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) self._check_ivs_ethernet_driver(d, self.vif_ivs, "tap") def _check_ivs_virtualport_driver(self, d, vif, want_iface_id): self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") xml = self._get_instance_xml(d, vif) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", vif, vif['devname']) def _check_ovs_virtualport_driver(self, d, vif, want_iface_id): self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") xml = self._get_instance_xml(d, vif) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "bridge", "source", "bridge", vif, "br0") vp = node.find("virtualport") self.assertEqual(vp.get("type"), "openvswitch") iface_id_found = False for p_elem in vp.findall("parameters"): iface_id = p_elem.get("interfaceid", None) if iface_id: self.assertEqual(iface_id, want_iface_id) iface_id_found = True self.assertTrue(iface_id_found) def test_generic_ovs_virtualport_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011)) want_iface_id = self.vif_ovs['ovs_interfaceid'] self._check_ovs_virtualport_driver(d, self.vif_ovs, want_iface_id) def test_generic_ivs_virtualport_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011)) want_iface_id = self.vif_ivs['ovs_interfaceid'] self._check_ivs_virtualport_driver(d, self.vif_ivs, want_iface_id) def _check_neutron_hybrid_driver(self, d, vif, br_want): self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver") xml = self._get_instance_xml(d, vif) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "bridge", "source", "bridge", vif, br_want, 1) def test_generic_hybrid_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) br_want = "qbr" + self.vif_ovs['id'] br_want = br_want[:network_model.NIC_NAME_LEN] self._check_neutron_hybrid_driver(d, self.vif_ovs, br_want) def test_ivs_hybrid_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) br_want = "qbr" + self.vif_ivs['id'] br_want = br_want[:network_model.NIC_NAME_LEN] self._check_neutron_hybrid_driver(d, self.vif_ivs, br_want) def test_mlnx_direct_vif_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) xml = self._get_instance_xml(d, self.vif_mlnx) node = self._get_node(xml) self.assertEqual(node.get("type"), "direct") self._assertTypeEquals(node, "direct", "source", "dev", "eth-xxx-yyy-zzz") self._assertTypeEquals(node, "direct", "source", "mode", "passthrough") self._assertMacEquals(node, self.vif_mlnx) self._assertModel(xml, "virtio") def test_midonet_ethernet_vif_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") br_want = self.vif_midonet['devname'] xml = self._get_instance_xml(d, self.vif_midonet) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", self.vif_midonet, br_want) def test_generic_8021qbh_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) xml = self._get_instance_xml(d, self.vif_8021qbh) node = self._get_node(xml) self._assertTypeEquals(node, "direct", "source", "dev", "eth0") self._assertMacEquals(node, self.vif_8021qbh) vp = node.find("virtualport") self.assertEqual(vp.get("type"), "802.1Qbh") profile_id_found = False for p_elem in vp.findall("parameters"): wantparams = self.vif_8021qbh['qbh_params'] profile_id = p_elem.get("profileid", None) if profile_id: self.assertEqual(profile_id, wantparams['profileid']) profile_id_found = True self.assertTrue(profile_id_found) def test_generic_iovisor_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") br_want = self.vif_ivs['devname'] xml = self._get_instance_xml(d, self.vif_ivs) node = self._get_node(xml) self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", self.vif_ivs, br_want) def test_generic_8021qbg_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) xml = self._get_instance_xml(d, self.vif_8021qbg) node = self._get_node(xml) self._assertTypeEquals(node, "direct", "source", "dev", "eth0") self._assertMacEquals(node, self.vif_8021qbg) vp = node.find("virtualport") self.assertEqual(vp.get("type"), "802.1Qbg") manager_id_found = False type_id_found = False typeversion_id_found = False instance_id_found = False for p_elem in vp.findall("parameters"): wantparams = self.vif_8021qbg['qbg_params'] manager_id = p_elem.get("managerid", None) type_id = p_elem.get("typeid", None) typeversion_id = p_elem.get("typeidversion", None) instance_id = p_elem.get("instanceid", None) if manager_id: self.assertEqual(manager_id, wantparams['managerid']) manager_id_found = True if type_id: self.assertEqual(type_id, wantparams['typeid']) type_id_found = True if typeversion_id: self.assertEqual(typeversion_id, wantparams['typeidversion']) typeversion_id_found = True if instance_id: self.assertEqual(instance_id, wantparams['instanceid']) instance_id_found = True self.assertTrue(manager_id_found) self.assertTrue(type_id_found) self.assertTrue(typeversion_id_found) self.assertTrue(instance_id_found)
""" Creates the BitFlag and BitFlagType object, which enables efficient storage of the boolean array for each Handle catch points. """ from collections import OrderedDict as ODict from sqlalchemy.types import TypeDecorator, Integer from sqlalchemy.ext.mutable import Mutable class BitFlag(Mutable, object): """ An object used to encode and decode a boolean array as an an integer representing bitwise logic-flags. There are 7 hardcoded flags: - raise - warn - email - dblog - txtlog - stdout - report Each can be set to True or False, with convenience either at instantiation, or key-base set operations. Example of instatiation, setting the email and stdout flag to True:: BitFlag(['email','stdout']) Example of instatiation, setting the email then, later setting stdout flag to True:: bf = BitFlag(['email']) bf['stdout'] = True After either running either of these, the BitFlag will have a value of:: >>> bf.val == 36 True >>> print bf raise warn EMAIL dblog txtlog STDOUT report >>> print bf.bin_str 00100100 >>> print bf.email True ...because the 3rd and 6th bit are set. .. warning:: Flag state can be read from the accessors named after the flags, however, they can't be written to. """ flags = ['raise', 'warn', 'email', 'dblog', 'txtlog', 'stdout', 'report'] def __init__(self, obj, defaultflags=None): """ :param obj, (int, dict): either the decimal form of the bitwise array, or a dictionary (complete or otherwise) of the form {flag : bool, flag : bool, ...} :param defaultflags, dict: a dictionary representing the default for one or more flags. Only applicable when a dictionary is passed to obj. It's ignored when obj is an integer. """ # calculate an msb-like number, which is actually # the msb * 2 to get one more digit than # the number of flags self.msb = 2 ** (len(BitFlag.flags) + 1) # if an integer was passed... # convert it to a boolean array if isinstance(obj, int): self.val = obj % self.msb tmp = self.val + self.msb self.bools = [] while tmp != 0: self.bools.append(tmp % 2 == 1) tmp >>= 1 self.bools = self.bools[:-1] for i, key in enumerate(BitFlag.flags): setattr(self, key, self.bools[i]) # a dict of the form {flags : bool, } was passed... # convert it to the boolean array just the same. elif isinstance(obj, (dict, list)): if isinstance(obj, list): obj = dict(zip(obj, [True] * len(obj))) # if there are defaultflags, use them, otherwise assume all flags # are unset if defaultflags: defaults = defaultflags else: defaults = zip(BitFlag.flags, [False] * len(BitFlag.flags)) defaults = ODict(defaults) self.bools = [] self.val = 0 for i, key in enumerate(BitFlag.flags): if key in obj: val = obj[key] else: val = defaults[key] setattr(self, key, val) self.bools.append(val) if val: self.val += 2 ** i # coerce is required to complete the SQLA mutability contract. @classmethod def coerce(cls, key, value): if not isinstance(value, BitFlag): if isinstance(value, int): return BitFlag(value) return Mutable.coerce(key, value) else: return value @property def bin(self): """ the binary equivalent """ return bin(self.val) @property def bin_str(self): """ the binary equivalent, as a string """ return str(bin(self.val + self.msb))[3:] def asdict(self): """ convert the flags to a dictionary, with keys as flags. """ return {bit: self[bit] for bit in BitFlag.flags} def flagged(self): return [b.upper() if self[b] else b for b in BitFlag.flags] def __str__(self): tmp = self.flagged() return " ".join(tmp) def __repr__(self): return "BitFlag({})".format(self.val) def __getitem__(self, key): if isinstance(key, int): return self.bools[key] else: return self.__getattribute__(key) def __setitem__(self, key, value): if isinstance(key, int): setattr(self, BitFlag.flags[key], value) self.bools[key] = value else: setattr(self, key, value) self.bools[BitFlag.flags.index(key)] = value # recalculate the value from scratch... self.val = 0 for i, val in enumerate(self.bools): if val: self.val += 2 ** i self.changed() def __call__(self): """ Calling a BitFlag object returns it's integer value. :return: int """ return self.val def __and__(self, other): """ :param other: int, BitFlag BitFlag and integers work with the and operator using bitwise logic. :return: BitFlag """ if isinstance(other, BitFlag): return BitFlag(other() & self()) elif isinstance(other, int): return BitFlag(other & self()) def __or__(self, other): """ :param other: int, BitFlag BitFlag and integers work with the or operator using bitwise logic. :return: BitFlag """ if isinstance(other, BitFlag): return BitFlag(other() | self()) elif isinstance(other, int): return BitFlag(other | self()) class BitFlagType(TypeDecorator): """ SQLAlchemy type definition for the BitFlag implementation. A BitFlag is a python object that wraps bitwise logic for hardcoded flags into a single integer value for quick database access and use.""" impl = Integer def __init__(self, *args, **kwargs): super(BitFlagType, self).__init__(*args, **kwargs) def process_bind_param(self, value, dialect): """ When SQLAlchemy binds a BitFlag, it converts it to an integer for storage in the database. """ if value is not None: value = value.val return value def process_result_value(self, value, dialect): """ When SQLAlchemy gets an integer from a BitFlagType column, it converts it to a BitFlag object. """ if value is not None: value = BitFlag(value) return value def copy(self): return BitFlagType()
import unittest import mock import logging import tests.utils as utils from tests.utils import compare_json_return_errormessage as error_message # Import of code to be tested: import esgfpid.assistant.publish from esgfpid.exceptions import ArgumentError # Logging LOGGER = logging.getLogger(__name__) LOGGER.addHandler(logging.NullHandler()) # Test resources: from resources.TESTVALUES import * import resources.TESTVALUES as TESTHELPERS from resources.TESTVALUES import TEST_RABBIT_CREDS_OPEN from resources.TESTVALUES import TEST_RABBIT_CREDS_TRUSTED # Some tests rely on open nodes import tests.globalvar import globalvar if globalvar.RABBIT_OPEN_NOT_ALLOWED: print('Skipping tests that need open RabbitMQ nodes in module "%s".' % __name__) class ConnectorTestCase(unittest.TestCase): def setUp(self): LOGGER.info('######## Next test (%s) ##########', __name__) def tearDown(self): LOGGER.info('#############################') # # Init # ''' Test the constructor, with trusted (and open) node. If open nodes are allowed, this should work fine. Otherwise, we expect an exception. ''' @unittest.skipIf(not(globalvar.RABBIT_OPEN_NOT_ALLOWED), '(this test cannot cope with open rabbit nodes)') def test_init_trusted_and_open_ok_1(self): # Preparations: Connector args. # Use trusted and open node: rabbit_creds = [copy.deepcopy(TEST_RABBIT_CREDS_TRUSTED), copy.deepcopy(TEST_RABBIT_CREDS_OPEN)] args = TESTHELPERS.get_connector_args( messaging_service_credentials = rabbit_creds ) # Run code to be tested and check exception: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Missing password', str(e.exception)) ''' Test the constructor, with trusted (and open) node. If open nodes are allowed, this should work fine. Otherwise, we expect an exception. ''' @unittest.skipIf(globalvar.RABBIT_OPEN_NOT_ALLOWED, '(this test uses open rabbit nodes)') def test_init_trusted_and_open_ok_2(self): # Preparations: Connector args. # Use trusted and open node: rabbit_creds = [copy.deepcopy(TEST_RABBIT_CREDS_TRUSTED), copy.deepcopy(TEST_RABBIT_CREDS_OPEN)] args = TESTHELPERS.get_connector_args( messaging_service_credentials = rabbit_creds ) # Run code to be tested: Connector constructior testconnector = esgfpid.Connector(**args) # Check result: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check result: Did the module get the right number of # trusted and open rabbit nodes? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager self.assertEqual(node_manager.get_num_left_trusted(), 1) self.assertEqual(node_manager.get_num_left_open(),1) def test_init_no_prefix(self): # Preparations: Connector args. rabbit_creds = [copy.deepcopy(TEST_RABBIT_CREDS_TRUSTED)] args = TESTHELPERS.get_connector_args( messaging_service_credentials = rabbit_creds, handle_prefix = None ) # Run code to be tested and check exception: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Missing handle prefix', str(e.exception)) def test_init_wrong_prefix(self): # Preparations: Connector args. rabbit_creds = [copy.deepcopy(TEST_RABBIT_CREDS_TRUSTED)] args = TESTHELPERS.get_connector_args( messaging_service_credentials = rabbit_creds, handle_prefix = '987654321' ) # Run code to be tested and check exception: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('is not a valid prefix', str(e.exception)) def test_init_no_rabbit_url(self): # Preparations: Connector args. rabbit_creds = dict( user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested and check exception: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Missing url for', str(e.exception)) def test_init_no_rabbit_user(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, password = RABBIT_PASSWORD ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested and check exception: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Missing user', str(e.exception)) ''' Test the constructor, with trusted node. ''' def test_init_trusted_only_ok(self): # Preparation: Connector args. # Use trusted node: rabbit_creds = [copy.deepcopy(TEST_RABBIT_CREDS_TRUSTED)] args = TESTHELPERS.get_connector_args( messaging_service_credentials = rabbit_creds ) # Run code to be tested: Connector constructor testconnector = esgfpid.Connector(**args) # Check results: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check results: Did the module get the right number of # trusted and open rabbit nodes? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager self.assertEqual(node_manager.get_num_left_trusted(), 1) self.assertEqual(node_manager.get_num_left_open(),0) ''' Test the constructor, with trusted node. ''' def test_init_trusted_only_more_args_ok(self): # Preparation: Connector args. # Use trusted node: rabbit_creds = TESTHELPERS.get_rabbit_credentials(vhost='foo', port=666, ssl_enabled=True) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: Connector constructor testconnector = esgfpid.Connector(**args) # Check results: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check results: Did the module get the right number of # trusted and open rabbit nodes? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager self.assertEqual(node_manager.get_num_left_trusted(), 1) self.assertEqual(node_manager.get_num_left_open(),0) # Check: Were the right values passed? node_manager.set_next_host() curr = node_manager._NodeManager__current_node self.assertEqual(curr['vhost'],'foo') self.assertEqual(curr['port'],666) self.assertEqual(curr['ssl_enabled'],True) ''' Test the constructor, with only open nodes. ''' @unittest.skipIf(not(globalvar.RABBIT_OPEN_NOT_ALLOWED), '(this test cannot deal with open rabbit nodes)') def test_init_open_ok(self): # Preparation: Connector args. # Use open node: rabbit_creds = [copy.deepcopy(TEST_RABBIT_CREDS_OPEN)] args = TESTHELPERS.get_connector_args( messaging_service_credentials = rabbit_creds ) # Run code to be tested and check exception: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Missing password', str(e.exception)) ''' Test the constructor, with only open nodes. ''' @unittest.skipIf(globalvar.RABBIT_OPEN_NOT_ALLOWED, '(this test uses open rabbit nodes)') def test_init_open_ok_2(self): # Preparation: Connector args. # Use open node: rabbit_creds = [copy.deepcopy(TEST_RABBIT_CREDS_OPEN)] args = TESTHELPERS.get_connector_args( messaging_service_credentials = rabbit_creds ) # Run code to be tested: Connector constructor testconnector = esgfpid.Connector(**args) # Check results: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check results: Did the module get the right number of # trusted and open rabbit nodes? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager self.assertEqual(node_manager.get_num_left_trusted(), 0) self.assertEqual(node_manager.get_num_left_open(),1) def test_init_rabbit_user_as_list(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = [RABBIT_USER_TRUSTED], password = RABBIT_PASSWORD ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: testconnector = esgfpid.Connector(**args) # Check result: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) def test_init_too_many_rabbit_users(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = [RABBIT_USER_TRUSTED, 'johndoe', 'alicedoe'], password = RABBIT_PASSWORD ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Wrong type', str(e.exception)) def test_init_vhost_no_string(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, vhost = 123 ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Wrong type', str(e.exception)) def test_init_sslenabled_no_bool(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, ssl_enabled = 123 ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Wrong type', str(e.exception)) def test_init_sslenabled_string_bool_true(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, ssl_enabled = 'tRuE' ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: testconnector = esgfpid.Connector(**args) # Check result: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check: Were the right values passed? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager node_manager.set_next_host() curr = node_manager._NodeManager__current_node self.assertEqual(curr['ssl_enabled'],True) def test_init_sslenabled_string_bool_false(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, ssl_enabled = 'fAlSe' ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: testconnector = esgfpid.Connector(**args) # Check result: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check: Were the right values passed? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager node_manager.set_next_host() curr = node_manager._NodeManager__current_node self.assertEqual(curr['ssl_enabled'],False) def test_init_sslenabled_string_bool_other(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, ssl_enabled = 'yes' ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Wrong type', str(e.exception)) def test_init_sslenabled_string_bool_other(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, ssl_enabled = '', vhost = '', port = '' ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: testconnector = esgfpid.Connector(**args) # Check result: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check: Were the right values passed? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager node_manager.set_next_host() curr = node_manager._NodeManager__current_node self.assertEqual(curr['ssl_enabled'],None) self.assertEqual(curr['vhost'],None) self.assertEqual(curr['port'],None) def test_init_port_no_int(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, port = 'foo' ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: with self.assertRaises(ArgumentError) as e: testconnector = esgfpid.Connector(**args) # Check result: Error message ok? self.assertIn('Wrong type', str(e.exception)) def test_init_port_string_int(self): # Preparations: Connector args. rabbit_creds = dict( url = RABBIT_URL_TRUSTED, user = RABBIT_USER_TRUSTED, password = RABBIT_PASSWORD, port = '123' ) args = TESTHELPERS.get_connector_args( messaging_service_credentials = [rabbit_creds] ) # Run code to be tested: testconnector = esgfpid.Connector(**args) # Check result: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) # Check: Were the right values passed? node_manager = testconnector._Connector__coupler._Coupler__rabbit_message_sender._RabbitMessageSender__node_manager node_manager.set_next_host() curr = node_manager._NodeManager__current_node self.assertEqual(curr['ssl_enabled'],None) self.assertEqual(curr['vhost'],None) self.assertEqual(curr['port'],123) ''' Test if the solr URL is passed to the consumer in the message. (The client wants to tell the consumer which solr instance to use for looking up dataset versions. Consumer has a default, if it is not passed. Consumer does not necessarily use solr, it might also use the handle database.) ''' def test_passing_consumer_solr_url_ok(self): # Run code to be tested: Create connector with consumer solr url: # (And with the two necessary args for dataset publication) args = TESTHELPERS.get_connector_args( consumer_solr_url='fake_solr_whatever', thredds_service_path='foo', data_node='bar' ) testconnector = esgfpid.Connector(**args) # Run code to be tested: Init dataset wizard wizard = testconnector.create_publication_assistant( drs_id='baz', version_number=2016, is_replica=False ) # Check result: Did init work? self.assertIsInstance(testconnector, esgfpid.Connector) self.assertIsInstance(wizard, esgfpid.assistant.publish.DatasetPublicationAssistant) # Check result: Was the consumer solr url passed? self.assertIsNotNone(testconnector._Connector__consumer_solr_url) self.assertIsNotNone(wizard._DatasetPublicationAssistant__consumer_solr_url) ''' Test whether the correct defaults are set if the connector is initialized with the minimum arguments. ''' def test_init_default_args_to_coupler(self): # Preparations: Minimum args for connector args = TESTHELPERS.get_connector_args() # Run code to be tested: Connector constructor testconnector = esgfpid.Connector(**args) # Check results: Check if the correct defaults # were set (i.e. passed to coupler.) coupler_args = testconnector._Connector__coupler.args self.assertEqual(coupler_args['data_node'],None) self.assertEqual(coupler_args['thredds_service_path'],None) self.assertEqual(coupler_args['test_publication'],False) self.assertEqual(coupler_args['solr_url'],None) self.assertEqual(coupler_args['solr_switched_off'],True) self.assertEqual(coupler_args['solr_https_verify'],False) self.assertEqual(coupler_args['disable_insecure_request_warning'],False) self.assertEqual(coupler_args['message_service_synchronous'],False) self.assertEqual(coupler_args['consumer_solr_url'],None) ''' Test whether the correct defaults are set if the connector is initialized with the minimum arguments. ''' def test_init_not_default_args_to_coupler(self): # Run code to be tested: Make a connector # with many non-default args: args = TESTHELPERS.get_connector_args( data_node=DATA_NODE, thredds_service_path=THREDDS, test_publication=True, solr_url=SOLR_URL_LIBRARY, solr_https_verify=True, disable_insecure_request_warning=True, message_service_synchronous=True, consumer_solr_url=SOLR_URL_CONSUMER ) testconnector = esgfpid.Connector(**args) # Check results: Check if the correct defaults # were set (i.e. passed to coupler.) coupler_args = testconnector._Connector__coupler.args self.assertEqual(coupler_args['data_node'],DATA_NODE) self.assertEqual(coupler_args['thredds_service_path'],THREDDS) self.assertEqual(coupler_args['test_publication'],True) self.assertEqual(coupler_args['solr_url'],SOLR_URL_LIBRARY) self.assertEqual(coupler_args['solr_switched_off'],False) self.assertEqual(coupler_args['solr_https_verify'],True) self.assertEqual(coupler_args['disable_insecure_request_warning'],True) self.assertEqual(coupler_args['message_service_synchronous'],True) self.assertEqual(coupler_args['consumer_solr_url'],SOLR_URL_CONSUMER) ''' Check if solr is not switched off if an URL given. ''' def test_init_solr_not_off(self): # Run code to be tested: Make connector with solr url args = TESTHELPERS.get_connector_args(solr_url='foo') testconnector = esgfpid.Connector(**args) # Check results: Check if the correct defaults # were set (i.e. passed to coupler.) coupler_args = testconnector._Connector__coupler.args self.assertEqual(coupler_args['solr_url'],'foo') self.assertEqual(coupler_args['solr_switched_off'],False) ''' Check if solr is not switched off if an URL given. ''' def test_init_solr_off(self): # Preparations: Minimum args for connector args = TESTHELPERS.get_connector_args() # Run code to be tested: Connector constructor testconnector = esgfpid.Connector(**args) # Check results: Check if the correct defaults # were set (i.e. passed to coupler.) coupler_args = testconnector._Connector__coupler.args self.assertEqual(coupler_args['solr_url'],None) self.assertEqual(coupler_args['solr_switched_off'],True) # # Publication # ''' This passes the correct arguments. ''' def test_create_publication_assistant_ok(self): # Preparations: Create connector with data node and thredds: args = TESTHELPERS.get_connector_args( thredds_service_path='foo', data_node='bar' ) testconnector = esgfpid.Connector(**args) # Run code to be tested: Init dataset wizard wizard = testconnector.create_publication_assistant( drs_id='baz', version_number=2016, is_replica=False ) # Check result: self.assertIsInstance(wizard, esgfpid.assistant.publish.DatasetPublicationAssistant) ''' If we want to publish a dataset, "data_node" and "thredds_service_path" have to be specified in the beginning! ''' def test_create_publication_assistant_missing_thredds(self): # Preparations: Make connector without thredds: args = TESTHELPERS.get_connector_args(data_node = 'foo') testconnector = esgfpid.Connector(**args) # Run code to be tested and check exception: Init dataset wizard with self.assertRaises(esgfpid.exceptions.ArgumentError) as e: wizard = testconnector.create_publication_assistant( drs_id='bar', version_number=2016, is_replica=False ) # Check result: Error message ok? self.assertIn('No thredds_service_path given', str(e.exception)) ''' If we want to publish a dataset, "data_node" and "thredds_service_path" have to be specified in the beginning! ''' def test_create_publication_assistant_missing_datanode(self): # Preparations: Make connector without thredds: args = TESTHELPERS.get_connector_args(thredds_service_path = 'foo') testconnector = esgfpid.Connector(**args) # Run code to be tested and check exception: Init dataset wizard with self.assertRaises(esgfpid.exceptions.ArgumentError) as e: wizard = testconnector.create_publication_assistant( drs_id='bar', version_number=2016, is_replica=False ) # Check result: Error message ok? self.assertIn('No data_node given', str(e.exception)) # # Unpublication # ''' If we want to unpublish a dataset, "data_node" has to be specified in the beginning! ''' def test_unpublish_all_versions_missing_data_node(self): # Preparations: Make patched connector without the # necessary data node (needed for unpublish) testconnector = TESTHELPERS.get_connector() TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Unpublish with self.assertRaises(esgfpid.exceptions.ArgumentError) as e: testconnector.unpublish_all_versions(drs_id=DRS_ID) # Check result: Error message ok? self.assertIn('No data_node given', str(e.exception)) ''' This passes the correct args. ''' def test_unpublish_one_version_ok(self): # Preparations: Make patched connector with data node (needed for unpublish) testconnector = TESTHELPERS.get_connector(data_node=DATA_NODE) TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Unpublish testconnector.unpublish_one_version( drs_id=DRS_ID, version_number=DS_VERSION ) # Check result: expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "unpublish_one_version", "message_timestamp":"anydate", "aggregation_level":"dataset", "data_node": DATA_NODE, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-onevers', "drs_id":DRS_ID, "version_number": int(DS_VERSION) } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) ''' This passes the correct args. ''' def test_unpublish_one_version_by_handle_ok(self): # Preparations: Make patched connector with data node (needed for unpublish) testconnector = TESTHELPERS.get_connector(data_node=DATA_NODE) TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Unpublish testconnector.unpublish_one_version( dataset_handle = DATASETHANDLE_HDL ) # Check result: expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "unpublish_one_version", "message_timestamp":"anydate", "aggregation_level":"dataset", "data_node": DATA_NODE, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-onevers', "drs_id": None, } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) ''' We unpublish one version. For this, no solr url is needed, as the version is already specified. So the consumer_solr_url is not passed on. ''' def test_unpublish_one_version_with_consumer_url_ok(self): # Preparations: Make connector, but without # solr mock. This is to see that with consumer_solr_url, # the library does not try to access solr. testconnector = TESTHELPERS.get_connector( consumer_solr_url=SOLR_URL_CONSUMER, data_node=DATA_NODE ) TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Unpublish one version testconnector.unpublish_one_version( drs_id=DRS_ID, version_number=DS_VERSION ) # Check result: # We don't get the consumer_solr_url, because it is only # needed for unpublishing all versions. expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "unpublish_one_version", "message_timestamp":"anydate", "aggregation_level":"dataset", "data_node": DATA_NODE, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-onevers', "drs_id":DRS_ID, "version_number": int(DS_VERSION) } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) ''' Test unpublishing all versions. For this, the library would call solr to find handles of all versions. We make our mock solr return None, so the library must tell the consumer to look for the versions itself. This time, we tell the consumer to use its default solr url. ''' @mock.patch('esgfpid.coupling.Coupler.retrieve_datasethandles_or_versionnumbers_of_allversions') def test_unpublish_all_versions_nosolr_ok(self, solr_asker_patch): # Patch: Make the coupler return None when we ask it # to find dataset versions... mydict = dict(dataset_handles=None, version_numbers=None) solr_asker_patch.return_value = mydict # Preparations: Create patched connector testconnector = TESTHELPERS.get_connector(data_node=DATA_NODE) TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: testconnector.unpublish_all_versions(drs_id=DRS_ID) # Check result: expected_rabbit_task = { "operation": "unpublish_all_versions", "message_timestamp": "anydate", "data_node": DATA_NODE, "aggregation_level":"dataset", "drs_id":DRS_ID, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-allvers', } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) def test_unpublish_all_versions_solr_off(self): # Preparations: Create patched connector testconnector = TESTHELPERS.get_connector( data_node=DATA_NODE, solr_switched_off=True ) TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: testconnector.unpublish_all_versions(drs_id=DRS_ID) # Check result: expected_rabbit_task = { "operation": "unpublish_all_versions", "message_timestamp": "anydate", "data_node": DATA_NODE, "aggregation_level":"dataset", "drs_id":DRS_ID, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-allvers', } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) ''' Test unpublishing all versions. For this, the library would call solr to find handles of all versions. We make our mock solr return None, so the library must tell the consumer to look for the versions itself. This time, we tell the consumer to use the solr url we provide. ''' @mock.patch('esgfpid.coupling.Coupler.retrieve_datasethandles_or_versionnumbers_of_allversions') def test_unpublish_all_versions_nosolr__butconsumersolr_ok(self, solr_asker_patch): # Patch coupler mydict = dict(dataset_handles=None, version_numbers=None) solr_asker_patch.return_value = mydict # Preparations: Create patched connector testconnector = TESTHELPERS.get_connector( data_node = DATA_NODE, solr_url = SOLR_URL_LIBRARY, consumer_solr_url=SOLR_URL_CONSUMER ) TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: testconnector.unpublish_all_versions( drs_id=DRS_ID, data_node=DATA_NODE) # Check result: expected_rabbit_task = { "operation": "unpublish_all_versions", "message_timestamp": "anydate", "data_node": DATA_NODE, "aggregation_level":"dataset", "drs_id":DRS_ID, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-allvers', "consumer_solr_url":SOLR_URL_CONSUMER } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) # # Errata # def test_add_errata_id_several_ok(self): # Preparations: Create patched connector # (Patched to avoid that message be sent, and to retrieve the created message) testconnector = TESTHELPERS.get_connector() TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Add errata testconnector.add_errata_ids( drs_id=DRS_ID, version_number=DS_VERSION, errata_ids=ERRATA_SEVERAL ) # Check result: Was correct errata message created? expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "add_errata_ids", "message_timestamp":"anydate", "errata_ids":ERRATA_SEVERAL, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.errata-add', "drs_id":DRS_ID, "version_number":DS_VERSION } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) def test_add_errata_id_one_ok(self): # Preparations: Create patched connector # (Patched to avoid that message be sent, and to retrieve the created message) testconnector = TESTHELPERS.get_connector() TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Add errata testconnector.add_errata_ids( drs_id=DRS_ID, version_number=DS_VERSION, errata_ids=ERRATA ) # Check result: Was correct errata message created? expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "add_errata_ids", "message_timestamp":"anydate", "errata_ids":[ERRATA], "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.errata-add', "drs_id":DRS_ID, "version_number":DS_VERSION } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) def test_remove_errata_id_one_ok(self): # Preparations: Create patched connector # (Patched to avoid that message be sent, and to retrieve the created message) testconnector = TESTHELPERS.get_connector() TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Add errata testconnector.remove_errata_ids( drs_id=DRS_ID, version_number=DS_VERSION, errata_ids=ERRATA ) # Check result: Was correct errata message created? expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "remove_errata_ids", "message_timestamp":"anydate", "errata_ids":[ERRATA], "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.errata-rem', "drs_id":DRS_ID, "version_number":DS_VERSION } received_rabbit_msg = TESTHELPERS.get_received_message_from_rabbitmock(testconnector) is_same = utils.is_json_same(expected_rabbit_task, received_rabbit_msg) self.assertTrue(is_same, utils.compare_json_return_errormessage(expected_rabbit_task, received_rabbit_msg)) # # Data Cart # def test_make_data_cart_pid(self): # Test variables content1 = {'foo':'foo', 'bar':'bar'} content2 = {'foo':'foo', 'bar': None} # Preparations: Create patched connector # (Patched to avoid that message be sent, and to retrieve the created message) testconnector = TESTHELPERS.get_connector() TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Create data cart PIDs # And retrieve the messages pid1 = testconnector.create_data_cart_pid(content1) pid2 = testconnector.create_data_cart_pid(content2) # Check result: Were the correct messages created? expected_handle_both_cases = PREFIX_WITH_HDL+"/b597a79e-1dc7-3d3f-b689-75ac5a78167f" expected_rabbit_task1 = { "handle": expected_handle_both_cases, "operation": "shopping_cart", "message_timestamp":"anydate", "data_cart_content":content1, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.datacart' } expected_rabbit_task2 = { "handle": expected_handle_both_cases, "operation": "shopping_cart", "message_timestamp":"anydate", "data_cart_content":content2, "ROUTING_KEY":PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.datacart' } received_rabbit_msg1 = TESTHELPERS.get_received_message_from_rabbitmock(testconnector, 0) received_rabbit_msg2 = TESTHELPERS.get_received_message_from_rabbitmock(testconnector, 1) same1 = utils.is_json_same(expected_rabbit_task1, received_rabbit_msg1) same2 = utils.is_json_same(expected_rabbit_task2, received_rabbit_msg2) self.assertTrue(same1, error_message(expected_rabbit_task1, received_rabbit_msg1)) self.assertTrue(same2, error_message(expected_rabbit_task2, received_rabbit_msg2)) self.assertTrue(pid1==pid2, 'Both pids are not the same.') # # Threads # def test_start_messaging_thread(self): LOGGER.debug('Thread test') testconnector = TESTHELPERS.get_connector() rabbitmock = TESTHELPERS.patch_with_rabbit_mock(testconnector, mock.MagicMock()) testconnector.start_messaging_thread() rabbitmock.start.assert_called_with() def test_finish_messaging_thread(self): LOGGER.debug('Thread test') testconnector = TESTHELPERS.get_connector() rabbitmock = TESTHELPERS.patch_with_rabbit_mock(testconnector, mock.MagicMock()) testconnector.finish_messaging_thread() rabbitmock.finish.assert_called_with() def test_force_finish_messaging_thread(self): LOGGER.debug('Thread test') testconnector = TESTHELPERS.get_connector() rabbitmock = TESTHELPERS.patch_with_rabbit_mock(testconnector, mock.MagicMock()) testconnector.force_finish_messaging_thread() rabbitmock.force_finish.assert_called_with() def test_make_handle(self): testconnector = TESTHELPERS.get_connector() received_handle = testconnector.make_handle_from_drsid_and_versionnumber( drs_id=DRS_ID, version_number=DS_VERSION ) expected_handle = DATASETHANDLE_HDL self.assertEqual(received_handle, expected_handle)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from oslo_concurrency import processutils as putils import six from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import iet class TestIetAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestIetAdmDriver, self).setUp() self.target = iet.IetAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) def test_get_target(self): tmp_file = six.StringIO() tmp_file.write( 'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa ' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa ' cid:0 ip:10.9.8.7 state:active hd:none dd:none') tmp_file.seek(0) with mock.patch('__builtin__.open') as mock_open: mock_open.return_value = contextlib.closing(tmp_file) self.assertEqual('1', self.target._get_target( 'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa )) # Test the failure case: Failed to handle the config file mock_open.side_effect = StandardError() self.assertRaises(StandardError, self.target._get_target, '') @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.utils.temporary_chown') def test_get_target_chap_auth(self, mock_chown, mock_exists): tmp_file = six.StringIO() tmp_file.write( 'Target iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa ' IncomingUser otzLy2UYbYfnP4zXLG5z 234Zweo38VGBBvrpK9nt\n' ' Lun 0 Path=/dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45,Type=fileio\n' # noqa ) tmp_file.seek(0) expected = ('otzLy2UYbYfnP4zXLG5z', '234Zweo38VGBBvrpK9nt') with mock.patch('__builtin__.open') as mock_open: ictx = context.get_admin_context() mock_open.return_value = contextlib.closing(tmp_file) self.assertEqual(expected, self.target._get_target_chap_auth(ictx, self.test_vol)) self.assertTrue(mock_open.called) # Test the failure case: Failed to handle the config file mock_open.side_effect = StandardError() self.assertRaises(StandardError, self.target._get_target_chap_auth, ictx, self.test_vol) @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=0) @mock.patch('cinder.utils.execute') @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.utils.temporary_chown') def test_create_iscsi_target(self, mock_chown, mock_exists, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') tmp_file = six.StringIO() with mock.patch('__builtin__.open') as mock_open: mock_open.return_value = contextlib.closing(tmp_file) self.assertEqual( 0, self.target.create_iscsi_target( self.test_vol, 0, 0, self.fake_volumes_dir)) self.assertTrue(mock_execute.called) self.assertTrue(mock_open.called) self.assertTrue(mock_get_targ.called) # Test the failure case: Failed to chown the config file mock_open.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetCreateFailed, self.target.create_iscsi_target, self.test_vol, 0, 0, self.fake_volumes_dir) # Test the failure case: Failed to set new auth mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetCreateFailed, self.target.create_iscsi_target, self.test_vol, 0, 0, self.fake_volumes_dir) @mock.patch('cinder.utils.execute') @mock.patch('os.path.exists', return_value=True) def test_update_config_file_failure(self, mock_exists, mock_execute): # Test the failure case: conf file does not exist mock_exists.return_value = False mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetCreateFailed, self.target.update_config_file, self.test_vol, 0, self.fake_volumes_dir, "foo bar") @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') def test_create_iscsi_target_already_exists(self, mock_execute, mock_get_targ): mock_execute.return_value = ('fake out', 'fake err') self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) self.assertTrue(mock_get_targ.called) self.assertTrue(mock_execute.called) @mock.patch('cinder.volume.targets.iet.IetAdm._find_sid_cid_for_target', return_value=None) @mock.patch('os.path.exists', return_value=False) @mock.patch('cinder.utils.execute') def test_remove_iscsi_target(self, mock_execute, mock_exists, mock_find): # Test the normal case self.target.remove_iscsi_target(1, 0, self.testvol['id'], self.testvol['name']) mock_execute.assert_any_calls('ietadm', '--op', 'delete', '--tid=1', run_as_root=True) # Test the failure case: putils.ProcessExecutionError mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 1, 0, self.testvol['id'], self.testvol['name']) def test_find_sid_cid_for_target(self): tmp_file = six.StringIO() tmp_file.write( 'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa ' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa ' cid:0 ip:10.9.8.7 state:active hd:none dd:none') tmp_file.seek(0) with mock.patch('__builtin__.open') as mock_open: mock_open.return_value = contextlib.closing(tmp_file) self.assertEqual(('844427031282176', '0'), self.target._find_sid_cid_for_target( '1', 'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45', # noqa 'volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa )) @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') @mock.patch.object(iet.IetAdm, '_get_target_chap_auth') def test_create_export(self, mock_get_chap, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') mock_get_chap.return_value = ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') expected_result = {'location': '10.9.8.7:3260,1 ' 'iqn.2010-10.org.openstack:testvol 0', 'auth': 'CHAP ' 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} ctxt = context.get_admin_context() self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) self.assertTrue(mock_execute.called) @mock.patch('cinder.volume.targets.iet.IetAdm._get_target_chap_auth', return_value=None) @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=1) def test_ensure_export(self, mock_get_targetm, mock_get_chap): ctxt = context.get_admin_context() with mock.patch.object(self.target, 'create_iscsi_target'): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 1, 0, self.fake_volumes_dir, None, portals_ips=[self.configuration.iscsi_ip_address], portals_port=int(self.configuration.iscsi_port), check_exit_code=False, old_name=None)
# Copyright ClusterHQ Inc. See LICENSE file for details. """ The command-line certificate authority tool. """ import os import sys import textwrap from twisted.internet.defer import maybeDeferred, succeed from twisted.python.filepath import FilePath from twisted.python.usage import Options, UsageError from zope.interface import implementer from ..common.script import (flocker_standard_options, ICommandLineScript, FlockerScriptRunner) from ._ca import (RootCredential, ControlCredential, NodeCredential, UserCredential, CertificateAlreadyExistsError, KeyAlreadyExistsError, PathError) class PrettyOptions(Options): """ Base class with improved output formatting for help text over ``twisted.python.usage.Options``. Includes ``self.helptext`` attribute in the wrapped help output of a CLI. Use ``self.helptext`` in place of ``self.longdesc``. A similar solution can be upstreamed to Twisted to fix ``self.longdesc``. https://twistedmatrix.com/trac/ticket/7864 """ def __str__(self): base = super(PrettyOptions, self).__str__() helptext = self.helptext if getattr(self, "helptext", None) else "" helptext_list = helptext.splitlines() description_list = [] for line in helptext_list: description_list.append('\n'.join(textwrap.wrap(line, 80)).strip()) description = '\n'.join(description_list) return base + description def getSynopsis(self): """ Modified from ``twisted.python.usage.Options.getSynopsis``. Does not include the parent synopsis if inside a subcommand. This allows separate synopses to be defined for each subcommand and the parent command, facilitating a prettier output, e.g. flocker-ca --help Usage: flocker-ca <command> [options] flocker-ca initialize --help Usage: flocker-ca initialize <name> Instead of: flocker-ca --help Usage: flocker-ca <command> [options] flocker-ca initialize --help Usage: flocker-ca <command> [options] initialize <name> Returns a string containing a description of these options and how to pass them to the executed file. """ default = "%s%s" % (os.path.basename(sys.argv[0]), (self.longOpt and " [options]") or '') if self.parent is None: default = "Usage: %s%s" % (os.path.basename(sys.argv[0]), (self.longOpt and " [options]") or '') else: default = '%s' % ((self.longOpt and "[options]") or '') synopsis = getattr(self, "synopsis", default) synopsis = synopsis.rstrip() if self.parent is not None: commandName = getattr( self.parent, "command_name", os.path.basename(sys.argv[0])) synopsis = "Usage: %s %s" % ( commandName, ' '.join((self.parent.subCommand, synopsis))) return synopsis def getUsage(self, width=None): base = super(PrettyOptions, self).getUsage(width) usage = base if self.subCommand is not None: subUsage = ( "Run flocker-ca " + self.subCommand + " --help for command usage and help." ) usage = usage + "\n\n" + subUsage + "\n\n" return usage @flocker_standard_options class UserCertificateOptions(PrettyOptions): """ Command line options for ``flocker-ca create-api-certificate``. """ helptext = """Create a new certificate for an API end user. Creates a certificate signed by a previously generated certificate authority (see flocker-ca initialize command for more information). Usage: flocker-ca create-api-certificate <username> Parameters: * username: A username for which the certificate should be created. """ synopsis = "<name> [options]" optParameters = [ ['inputpath', 'i', None, ('Path to directory containing root certificate.' 'Defaults to current working directory.')], ['outputpath', 'o', None, ('Path to directory to write control service certificate.' 'Defaults to current working directory.')], ] def parseArgs(self, name): self["name"] = name def run(self): """ Create a new node certificate signed by the root and write it out to the current directory. :raise PathError: When the root certificate and key cannot be found. """ if self["inputpath"] is None: self["inputpath"] = os.getcwd() if self["outputpath"] is None: self["outputpath"] = os.getcwd() self["inputpath"] = FilePath(self["inputpath"]) self["outputpath"] = FilePath(self["outputpath"]) try: try: self["name"] = self["name"].decode("utf-8") ca = RootCredential.from_path(self["inputpath"]) uc = UserCredential.initialize( self["outputpath"], ca, self["name"]) self._sys_module.stdout.write( u"Created {user}.crt. You can now give it to your " u"API enduser so they can access the control service " u"API.\n".format(user=uc.username).encode("utf-8") ) except PathError as e: raise UsageError(str(e)) except (UnicodeEncodeError, UnicodeDecodeError): raise UsageError( u"Invalid username: Could not be converted to UTF-8") except UsageError as e: raise SystemExit(u"Error: {error}".format(error=str(e))) return succeed(None) @flocker_standard_options class NodeCertificateOptions(PrettyOptions): """ Command line options for ``flocker-ca create-node-certificate``. """ helptext = """Create a new certificate for a node agent. Creates a certificate signed by a previously generated certificate authority (see flocker-ca initialize command for more information). """ synopsis = "[options]" optParameters = [ ['inputpath', 'i', None, ('Path to directory containing root certificate. ' 'Defaults to current working directory.')], ['outputpath', 'o', None, ('Path to directory to write control service certificate. ' 'Defaults to current working directory.')], ] def run(self): """ Check if root key and certificate files (either default or as specified on the command line) exist in the path and error out if they do not. If there are no path errors, create a new node certificate signed by the root and write it out to the current directory. """ if self["inputpath"] is None: self["inputpath"] = os.getcwd() if self["outputpath"] is None: self["outputpath"] = os.getcwd() self["inputpath"] = FilePath(self["inputpath"]) self["outputpath"] = FilePath(self["outputpath"]) try: try: ca = RootCredential.from_path(self["inputpath"]) nc = NodeCredential.initialize(self["outputpath"], ca) self._sys_module.stdout.write( u"Created {uuid}.crt. Copy it over to " u"/etc/flocker/node.crt on your node " u"machine and make sure to chmod 0600 it.\n".format( uuid=nc.uuid ).encode("utf-8") ) except PathError as e: raise UsageError(str(e)) except UsageError as e: raise SystemExit(u"Error: {error}".format(error=str(e))) return succeed(None) @flocker_standard_options class ControlCertificateOptions(PrettyOptions): """ Command line options for ``flocker-ca create-control-certificate``. Might want to support more than one domain name: https://clusterhq.atlassian.net/browse/FLOC-1977 """ helptext = """Create a new certificate for the control service. Creates a certificate signed by a previously generated certificate authority (see flocker-ca initialize command for more information). The hostname will be encoded into the generated certificate for validation by HTTPS clients, so it should match the external domain name of the machine the control service will run on. The generated files will be stored in the specified output directory (defaults to current working directory) with the names "control-<hostname>.crt" and "control-<hostname>.key". Usage: flocker-ca create-control-certificate <hostname> Parameters: * hostname: The hostname that this control service's API will run on. e.g. "example.org". """ synopsis = "[options] <hostname>" optParameters = [ ['inputpath', 'i', None, ('Path to directory containing root certificate. ' 'Defaults to current working directory.')], ['outputpath', 'o', None, ('Path to directory to write control service certificate. ' 'Defaults to current working directory.')], ] def parseArgs(self, hostname): self["hostname"] = hostname def run(self): """ Check if control service certificate already exist in current directory. If it does, error out. Also check if root key and certificate files (either default or as specified on the command line) exist in the path and error out if they do not. If there are no path errors, create a new control service certificate signed by the root and write it out to the current directory. """ if self["inputpath"] is None: self["inputpath"] = os.getcwd() if self["outputpath"] is None: self["outputpath"] = os.getcwd() self["inputpath"] = FilePath(self["inputpath"]) self["outputpath"] = FilePath(self["outputpath"]) try: try: ca = RootCredential.from_path(self["inputpath"]) ControlCredential.initialize(self["outputpath"], ca, self["hostname"]) output_certificate_filename = ( b"control-" + self["hostname"] + b".crt").decode("utf-8") output_key_filename = ( b"control-" + self["hostname"] + b".key").decode("utf-8") success_message = ( u"Created {certificate} and {key}\n" u"Copy these files to the directory /etc/flocker on your " u"control service machine.\nRename the files to " u"control-service.crt and control-service.key and " u"set the correct permissions by running chmod 0600 on " u"both files." u"\n".format( certificate=output_certificate_filename, key=output_key_filename ).encode("utf-8") ) self._sys_module.stdout.write(success_message) except ( CertificateAlreadyExistsError, KeyAlreadyExistsError, PathError ) as e: raise UsageError(str(e)) except UsageError as e: raise SystemExit(u"Error: {error}".format(error=str(e))) return succeed(None) @flocker_standard_options class InitializeOptions(PrettyOptions): """ Command line options for ``flocker-ca initialize``. """ helptext = """Create a new certificate authority. Creates a private/public key pair and self-signs the public key to produce a new certificate authority root certificate. These are stored in the current working directory. Once this has been done other ``flocker-ca`` commands can be run in this directory to create certificates singed by this particular certificate authority. Usage: flocker-ca initialize <name> Parameters: * name: Will be used as the name of the certificate authority, e.g. "mycluster". """ synopsis = "<name>" def parseArgs(self, name): self["name"] = name self["path"] = FilePath(os.getcwd()) def run(self): """ Check if files already exist in current directory. If they do, error out. Otherwise calling APIs on CertificateAuthority, create new private/public key pair, self-sign, write out to files locally. """ try: try: RootCredential.initialize(self["path"], self["name"]) self._sys_module.stdout.write( b"Created cluster.key and cluster.crt. " b"Please keep cluster.key secret, as anyone who can " b"access it will be able to control your cluster.\n" ) except ( KeyAlreadyExistsError, CertificateAlreadyExistsError, PathError ) as e: raise UsageError(str(e)) except UsageError as e: raise SystemExit(u"Error: {error}".format(error=str(e))) return succeed(None) @flocker_standard_options class CAOptions(PrettyOptions): """ Command line options for ``flocker-ca``. """ helptext = """flocker-ca is used to create TLS certificates. The certificates are used to identify the control service, nodes and API clients within a Flocker cluster. """ synopsis = "Usage: flocker-ca <command> [options]" subCommands = [ ["initialize", None, InitializeOptions, ("Initialize a certificate authority in the " "current working directory.")], ["create-control-certificate", None, ControlCertificateOptions, "Create a certificate for the control service."], ["create-node-certificate", None, NodeCertificateOptions, "Create a certificate for a node agent."], ["create-api-certificate", None, UserCertificateOptions, "Create a certificate for an API user."], ] @implementer(ICommandLineScript) class CAScript(object): """ Command-line script for ``flocker-ca``. """ def main(self, reactor, options): if options.subCommand is not None: return maybeDeferred(options.subOptions.run) else: return options.opt_help() def flocker_ca_main(): return FlockerScriptRunner( CAScript(), CAOptions(), logging=False).main()
# -*- coding: utf-8 -*- """some measures for evaluation of prediction, tests and model selection Created on Tue Nov 08 15:23:20 2011 Author: Josef Perktold License: BSD-3 """ import numpy as np def mse(x1, x2, axis=0): '''mean squared error ''' return np.mean((x1-x2)**2, axis=axis) def rmse(x1, x2, axis=0): '''root mean squared error ''' return np.sqrt(mse(x1, x2, axis=axis)) def maxabs(x1, x2, axis=0): '''maximum absolute error ''' return np.max(np.abs(x1-x2), axis=axis) def meanabs(x1, x2, axis=0): '''mean absolute error ''' return np.mean(np.abs(x1-x2), axis=axis) def medianabs(x1, x2, axis=0): '''median absolute error ''' return np.median(np.abs(x1-x2), axis=axis) def bias(x1, x2, axis=0): '''bias, mean error ''' return np.mean(x1-x2, axis=axis) def medianbias(x1, x2, axis=0): '''median bias, median error ''' return np.median(x1-x2, axis=axis) def vare(x1, x2, ddof=0, axis=0): '''variance of error ''' return np.var(x1-x2, ddof=0, axis=axis) def stde(x1, x2, ddof=0, axis=0): '''variance of error ''' return np.std(x1-x2, ddof=0, axis=axis) def iqr(x1, x2, axis=0): '''interquartile range of error rounded index, no interpolations this could use newer numpy function instead ''' if axis is None: x1 = np.ravel(x1) x2 = np.ravel(x2) axis = 0 xdiff = np.sort(x1 - x2) nobs = x1.shape[axis] idx = np.round((nobs-1) * np.array([0.25, 0.75])).astype(int) sl = [slice(None)] * xdiff.ndim sl[axis] = idx iqr = np.diff(xdiff[sl], axis=axis) iqr = np.squeeze(iqr) #drop reduced dimension if iqr.size == 1: return iqr #[0] else: return iqr # Information Criteria #--------------------- def aic(llf, nobs, df_modelwc): '''Akaike information criterion Parameters ---------- llf : float value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- aic : float information criterion References ---------- http://en.wikipedia.org/wiki/Akaike_information_criterion ''' return -2. * llf + 2. * df_modelwc def aicc(llf, nobs, df_modelwc): '''Akaike information criterion (AIC) with small sample correction Parameters ---------- llf : float value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- aicc : float information criterion References ---------- http://en.wikipedia.org/wiki/Akaike_information_criterion#AICc ''' return -2. * llf + 2. * df_modelwc * nobs / (nobs - df_modelwc - 1.) #float division def bic(llf, nobs, df_modelwc): '''Bayesian information criterion (BIC) or Schwarz criterion Parameters ---------- llf : float value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- bic : float information criterion References ---------- http://en.wikipedia.org/wiki/Bayesian_information_criterion ''' return -2. * llf + np.log(nobs) * df_modelwc def hqic(llf, nobs, df_modelwc): '''Hannan-Quinn information criterion (HQC) Parameters ---------- llf : float value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- hqic : float information criterion References ---------- Wikipedia doesn't say much ''' return -2. * llf + 2 * np.log(np.log(nobs)) * df_modelwc #IC based on residual sigma def aic_sigma(sigma2, nobs, df_modelwc, islog=False): '''Akaike information criterion Parameters ---------- sigma2 : float estimate of the residual variance or determinant of Sigma_hat in the multivariate case. If islog is true, then it is assumed that sigma is already log-ed, for example logdetSigma. nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- aic : float information criterion Notes ----- A constant has been dropped in comparison to the loglikelihood base information criteria. The information criteria should be used to compare only comparable models. For example, AIC is defined in terms of the loglikelihood as -2 llf + 2 k in terms of sigma_hat log(sigma_hat^2) + 2 k / n in terms of the determinant of Sigma_hat log(|sigma_hat|) + 2 k / n Note: In our definition we do not divide by n in the log-likelihood version. TODO: Latex math reference for example lecture notes by Herman Bierens References ---------- http://en.wikipedia.org/wiki/Akaike_information_criterion ''' if not islog: sigma2 = np.log(sigma2) return sigma2 + aic(0, nobs, df_modelwc) / nobs def aicc_sigma(sigma2, nobs, df_modelwc, islog=False): '''Akaike information criterion (AIC) with small sample correction Parameters ---------- sigma2 : float estimate of the residual variance or determinant of Sigma_hat in the multivariate case. If islog is true, then it is assumed that sigma is already log-ed, for example logdetSigma. nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- aicc : float information criterion Notes ----- A constant has been dropped in comparison to the loglikelihood base information criteria. These should be used to compare for comparable models. References ---------- http://en.wikipedia.org/wiki/Akaike_information_criterion#AICc ''' if not islog: sigma2 = np.log(sigma2) return sigma2 + aicc(0, nobs, df_modelwc) / nobs #float division def bic_sigma(sigma2, nobs, df_modelwc, islog=False): '''Bayesian information criterion (BIC) or Schwarz criterion Parameters ---------- sigma2 : float estimate of the residual variance or determinant of Sigma_hat in the multivariate case. If islog is true, then it is assumed that sigma is already log-ed, for example logdetSigma. nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- bic : float information criterion Notes ----- A constant has been dropped in comparison to the loglikelihood base information criteria. These should be used to compare for comparable models. References ---------- http://en.wikipedia.org/wiki/Bayesian_information_criterion ''' if not islog: sigma2 = np.log(sigma2) return sigma2 + bic(0, nobs, df_modelwc) / nobs def hqic_sigma(sigma2, nobs, df_modelwc, islog=False): '''Hannan-Quinn information criterion (HQC) Parameters ---------- sigma2 : float estimate of the residual variance or determinant of Sigma_hat in the multivariate case. If islog is true, then it is assumed that sigma is already log-ed, for example logdetSigma. nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- hqic : float information criterion Notes ----- A constant has been dropped in comparison to the loglikelihood base information criteria. These should be used to compare for comparable models. References ---------- xxx ''' if not islog: sigma2 = np.log(sigma2) return sigma2 + hqic(0, nobs, df_modelwc) / nobs #from var_model.py, VAR only? separates neqs and k_vars per equation #def fpe_sigma(): # ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld) __all__ = [maxabs, meanabs, medianabs, medianbias, mse, rmse, stde, vare, aic, aic_sigma, aicc, aicc_sigma, bias, bic, bic_sigma, hqic, hqic_sigma, iqr]
"""The tests for the DirecTV Media player platform.""" from datetime import datetime, timedelta from typing import Optional from pytest import fixture from homeassistant.components.directv.media_player import ( ATTR_MEDIA_CURRENTLY_RECORDING, ATTR_MEDIA_RATING, ATTR_MEDIA_RECORDED, ATTR_MEDIA_START_TIME, ) from homeassistant.components.media_player.const import ( ATTR_INPUT_SOURCE, ATTR_MEDIA_ALBUM_NAME, ATTR_MEDIA_ARTIST, ATTR_MEDIA_CHANNEL, ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_DURATION, ATTR_MEDIA_ENQUEUE, ATTR_MEDIA_POSITION, ATTR_MEDIA_POSITION_UPDATED_AT, ATTR_MEDIA_SERIES_TITLE, ATTR_MEDIA_TITLE, DOMAIN as MP_DOMAIN, MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, SERVICE_PLAY_MEDIA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, ) from homeassistant.const import ( ATTR_ENTITY_ID, SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_STOP, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_UNAVAILABLE, ) from homeassistant.helpers.typing import HomeAssistantType from homeassistant.util import dt as dt_util from tests.async_mock import patch from tests.components.directv import setup_integration from tests.test_util.aiohttp import AiohttpClientMocker ATTR_UNIQUE_ID = "unique_id" CLIENT_ENTITY_ID = f"{MP_DOMAIN}.client" MAIN_ENTITY_ID = f"{MP_DOMAIN}.host" MUSIC_ENTITY_ID = f"{MP_DOMAIN}.music_client" RESTRICTED_ENTITY_ID = f"{MP_DOMAIN}.restricted_client" STANDBY_ENTITY_ID = f"{MP_DOMAIN}.standby_client" UNAVAILABLE_ENTITY_ID = f"{MP_DOMAIN}.unavailable_client" # pylint: disable=redefined-outer-name @fixture def mock_now() -> datetime: """Fixture for dtutil.now.""" return dt_util.utcnow() async def async_turn_on( hass: HomeAssistantType, entity_id: Optional[str] = None ) -> None: """Turn on specified media player or all.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_ON, data) async def async_turn_off( hass: HomeAssistantType, entity_id: Optional[str] = None ) -> None: """Turn off specified media player or all.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_OFF, data) async def async_media_pause( hass: HomeAssistantType, entity_id: Optional[str] = None ) -> None: """Send the media player the command for pause.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PAUSE, data) async def async_media_play( hass: HomeAssistantType, entity_id: Optional[str] = None ) -> None: """Send the media player the command for play/pause.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PLAY, data) async def async_media_stop( hass: HomeAssistantType, entity_id: Optional[str] = None ) -> None: """Send the media player the command for stop.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_STOP, data) async def async_media_next_track( hass: HomeAssistantType, entity_id: Optional[str] = None ) -> None: """Send the media player the command for next track.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data) async def async_media_previous_track( hass: HomeAssistantType, entity_id: Optional[str] = None ) -> None: """Send the media player the command for prev track.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data) async def async_play_media( hass: HomeAssistantType, media_type: str, media_id: str, entity_id: Optional[str] = None, enqueue: Optional[str] = None, ) -> None: """Send the media player the command for playing media.""" data = {ATTR_MEDIA_CONTENT_TYPE: media_type, ATTR_MEDIA_CONTENT_ID: media_id} if entity_id: data[ATTR_ENTITY_ID] = entity_id if enqueue: data[ATTR_MEDIA_ENQUEUE] = enqueue await hass.services.async_call(MP_DOMAIN, SERVICE_PLAY_MEDIA, data) async def test_setup( hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker ) -> None: """Test setup with basic config.""" await setup_integration(hass, aioclient_mock) assert hass.states.get(MAIN_ENTITY_ID) assert hass.states.get(CLIENT_ENTITY_ID) assert hass.states.get(UNAVAILABLE_ENTITY_ID) async def test_unique_id( hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker ) -> None: """Test unique id.""" await setup_integration(hass, aioclient_mock) entity_registry = await hass.helpers.entity_registry.async_get_registry() main = entity_registry.async_get(MAIN_ENTITY_ID) assert main.unique_id == "028877455858" client = entity_registry.async_get(CLIENT_ENTITY_ID) assert client.unique_id == "2CA17D1CD30X" unavailable_client = entity_registry.async_get(UNAVAILABLE_ENTITY_ID) assert unavailable_client.unique_id == "9XXXXXXXXXX9" async def test_supported_features( hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker ) -> None: """Test supported features.""" await setup_integration(hass, aioclient_mock) # Features supported for main DVR state = hass.states.get(MAIN_ENTITY_ID) assert ( SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK | SUPPORT_PLAY == state.attributes.get("supported_features") ) # Feature supported for clients. state = hass.states.get(CLIENT_ENTITY_ID) assert ( SUPPORT_PAUSE | SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK | SUPPORT_PLAY == state.attributes.get("supported_features") ) async def test_check_attributes( hass: HomeAssistantType, mock_now: dt_util.dt.datetime, aioclient_mock: AiohttpClientMocker, ) -> None: """Test attributes.""" await setup_integration(hass, aioclient_mock) state = hass.states.get(MAIN_ENTITY_ID) assert state.state == STATE_PLAYING assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "17016356" assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MOVIE assert state.attributes.get(ATTR_MEDIA_DURATION) == 7200 assert state.attributes.get(ATTR_MEDIA_POSITION) == 4437 assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) assert state.attributes.get(ATTR_MEDIA_TITLE) == "Snow Bride" assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("HALLHD", "312") assert state.attributes.get(ATTR_INPUT_SOURCE) == "312" assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING) assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-G" assert not state.attributes.get(ATTR_MEDIA_RECORDED) assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime( 2020, 3, 21, 13, 0, tzinfo=dt_util.UTC ) state = hass.states.get(CLIENT_ENTITY_ID) assert state.state == STATE_PLAYING assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "4405732" assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_TVSHOW assert state.attributes.get(ATTR_MEDIA_DURATION) == 1791 assert state.attributes.get(ATTR_MEDIA_POSITION) == 263 assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) assert state.attributes.get(ATTR_MEDIA_TITLE) == "Tyler's Ultimate" assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) == "Spaghetti and Clam Sauce" assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("FOODHD", "231") assert state.attributes.get(ATTR_INPUT_SOURCE) == "231" assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING) assert state.attributes.get(ATTR_MEDIA_RATING) == "No Rating" assert state.attributes.get(ATTR_MEDIA_RECORDED) assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime( 2010, 7, 5, 15, 0, 8, tzinfo=dt_util.UTC ) state = hass.states.get(MUSIC_ENTITY_ID) assert state.state == STATE_PLAYING assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "76917562" assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MUSIC assert state.attributes.get(ATTR_MEDIA_DURATION) == 86400 assert state.attributes.get(ATTR_MEDIA_POSITION) == 15050 assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) assert state.attributes.get(ATTR_MEDIA_TITLE) == "Sparkle In Your Eyes" assert state.attributes.get(ATTR_MEDIA_ARTIST) == "Gerald Albright" assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) == "Slam Dunk (2014)" assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("MCSJ", "851") assert state.attributes.get(ATTR_INPUT_SOURCE) == "851" assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING) assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-PG" assert not state.attributes.get(ATTR_MEDIA_RECORDED) assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime( 2020, 3, 21, 10, 0, 0, tzinfo=dt_util.UTC ) state = hass.states.get(STANDBY_ENTITY_ID) assert state.state == STATE_OFF assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None assert state.attributes.get(ATTR_MEDIA_DURATION) is None assert state.attributes.get(ATTR_MEDIA_POSITION) is None assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None assert state.attributes.get(ATTR_MEDIA_TITLE) is None assert state.attributes.get(ATTR_MEDIA_ARTIST) is None assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None assert state.attributes.get(ATTR_INPUT_SOURCE) is None assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING) assert state.attributes.get(ATTR_MEDIA_RATING) is None assert not state.attributes.get(ATTR_MEDIA_RECORDED) state = hass.states.get(RESTRICTED_ENTITY_ID) assert state.state == STATE_PLAYING assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None assert state.attributes.get(ATTR_MEDIA_DURATION) is None assert state.attributes.get(ATTR_MEDIA_POSITION) is None assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None assert state.attributes.get(ATTR_MEDIA_TITLE) is None assert state.attributes.get(ATTR_MEDIA_ARTIST) is None assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None assert state.attributes.get(ATTR_INPUT_SOURCE) is None assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING) assert state.attributes.get(ATTR_MEDIA_RATING) is None assert not state.attributes.get(ATTR_MEDIA_RECORDED) state = hass.states.get(UNAVAILABLE_ENTITY_ID) assert state.state == STATE_UNAVAILABLE async def test_attributes_paused( hass: HomeAssistantType, mock_now: dt_util.dt.datetime, aioclient_mock: AiohttpClientMocker, ): """Test attributes while paused.""" await setup_integration(hass, aioclient_mock) state = hass.states.get(CLIENT_ENTITY_ID) last_updated = state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) # Test to make sure that ATTR_MEDIA_POSITION_UPDATED_AT is not # updated if TV is paused. with patch( "homeassistant.util.dt.utcnow", return_value=mock_now + timedelta(minutes=5) ): await async_media_pause(hass, CLIENT_ENTITY_ID) await hass.async_block_till_done() state = hass.states.get(CLIENT_ENTITY_ID) assert state.state == STATE_PAUSED assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) == last_updated async def test_main_services( hass: HomeAssistantType, mock_now: dt_util.dt.datetime, aioclient_mock: AiohttpClientMocker, ) -> None: """Test the different services.""" await setup_integration(hass, aioclient_mock) with patch("directv.DIRECTV.remote") as remote_mock: await async_turn_off(hass, MAIN_ENTITY_ID) await hass.async_block_till_done() remote_mock.assert_called_once_with("poweroff", "0") with patch("directv.DIRECTV.remote") as remote_mock: await async_turn_on(hass, MAIN_ENTITY_ID) await hass.async_block_till_done() remote_mock.assert_called_once_with("poweron", "0") with patch("directv.DIRECTV.remote") as remote_mock: await async_media_pause(hass, MAIN_ENTITY_ID) await hass.async_block_till_done() remote_mock.assert_called_once_with("pause", "0") with patch("directv.DIRECTV.remote") as remote_mock: await async_media_play(hass, MAIN_ENTITY_ID) await hass.async_block_till_done() remote_mock.assert_called_once_with("play", "0") with patch("directv.DIRECTV.remote") as remote_mock: await async_media_next_track(hass, MAIN_ENTITY_ID) await hass.async_block_till_done() remote_mock.assert_called_once_with("ffwd", "0") with patch("directv.DIRECTV.remote") as remote_mock: await async_media_previous_track(hass, MAIN_ENTITY_ID) await hass.async_block_till_done() remote_mock.assert_called_once_with("rew", "0") with patch("directv.DIRECTV.remote") as remote_mock: await async_media_stop(hass, MAIN_ENTITY_ID) await hass.async_block_till_done() remote_mock.assert_called_once_with("stop", "0") with patch("directv.DIRECTV.tune") as tune_mock: await async_play_media(hass, "channel", 312, MAIN_ENTITY_ID) await hass.async_block_till_done() tune_mock.assert_called_once_with("312", "0")
import warnings from datetime import datetime, timedelta import datetime as pydt import numpy as np from dateutil.relativedelta import relativedelta import matplotlib.units as units import matplotlib.dates as dates from matplotlib.ticker import Formatter, AutoLocator, Locator from matplotlib.transforms import nonsingular from pandas.core.dtypes.common import ( is_float, is_integer, is_integer_dtype, is_float_dtype, is_datetime64_ns_dtype, is_period_arraylike, is_nested_list_like ) from pandas.core.dtypes.generic import ABCSeries from pandas.compat import lrange import pandas.compat as compat from pandas._libs import tslib import pandas.core.common as com from pandas.core.index import Index from pandas.core.indexes.datetimes import date_range import pandas.core.tools.datetimes as tools from pandas._libs.tslibs import resolution import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import FreqGroup from pandas.core.indexes.period import Period, PeriodIndex from pandas.plotting._compat import _mpl_le_2_0_0 # constants HOURS_PER_DAY = 24. MIN_PER_HOUR = 60. SEC_PER_MIN = 60. SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY MUSEC_PER_DAY = 1e6 * SEC_PER_DAY _WARN = True # Global for whether pandas has registered the units explicitly _mpl_units = {} # Cache for units overwritten by us def get_pairs(): pairs = [ (tslib.Timestamp, DatetimeConverter), (Period, PeriodConverter), (pydt.datetime, DatetimeConverter), (pydt.date, DatetimeConverter), (pydt.time, TimeConverter), (np.datetime64, DatetimeConverter), ] return pairs def register(explicit=True): """Register Pandas Formatters and Converters with matplotlib This function modifies the global ``matplotlib.units.registry`` dictionary. Pandas adds custom converters for * pd.Timestamp * pd.Period * np.datetime64 * datetime.datetime * datetime.date * datetime.time See Also -------- deregister_matplotlib_converter """ # Renamed in pandas.plotting.__init__ global _WARN if explicit: _WARN = False pairs = get_pairs() for type_, cls in pairs: converter = cls() if type_ in units.registry: previous = units.registry[type_] _mpl_units[type_] = previous units.registry[type_] = converter def deregister(): """Remove pandas' formatters and converters Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before pandas registered its own units. Converters for pandas' own types like Timestamp and Period are removed completely. Converters for types pandas overwrites, like ``datetime.datetime``, are restored to their original value. See Also -------- deregister_matplotlib_converters """ # Renamed in pandas.plotting.__init__ for type_, cls in get_pairs(): # We use type to catch our classes directly, no inheritance if type(units.registry.get(type_)) is cls: units.registry.pop(type_) # restore the old keys for unit, formatter in _mpl_units.items(): if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: # make it idempotent by excluding ours. units.registry[unit] = formatter def _check_implicitly_registered(): global _WARN if _WARN: msg = ("Using an implicitly registered datetime converter for a " "matplotlib plotting method. The converter was registered " "by pandas on import. Future versions of pandas will require " "you to explicitly register matplotlib converters.\n\n" "To register the converters:\n\t" ">>> from pandas.plotting import register_matplotlib_converters" "\n\t" ">>> register_matplotlib_converters()") warnings.warn(msg, FutureWarning) _WARN = False def _to_ordinalf(tm): tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second + float(tm.microsecond / 1e6)) return tot_sec def time2num(d): if isinstance(d, compat.string_types): parsed = tools.to_datetime(d) if not isinstance(parsed, datetime): raise ValueError('Could not parse time {d}'.format(d=d)) return _to_ordinalf(parsed.time()) if isinstance(d, pydt.time): return _to_ordinalf(d) return d class TimeConverter(units.ConversionInterface): @staticmethod def convert(value, unit, axis): valid_types = (str, pydt.time) if (isinstance(value, valid_types) or is_integer(value) or is_float(value)): return time2num(value) if isinstance(value, Index): return value.map(time2num) if isinstance(value, (list, tuple, np.ndarray, Index)): return [time2num(x) for x in value] return value @staticmethod def axisinfo(unit, axis): if unit != 'time': return None majloc = AutoLocator() majfmt = TimeFormatter(majloc) return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time') @staticmethod def default_units(x, axis): return 'time' # time formatter class TimeFormatter(Formatter): def __init__(self, locs): self.locs = locs def __call__(self, x, pos=0): """ Return the time of day as a formatted string. Parameters ---------- x : float The time of day specified as seconds since 00:00 (midnight), with up to microsecond precision. pos Unused Returns ------- str A string in HH:MM:SS.mmmuuu format. Microseconds, milliseconds and seconds are only displayed if non-zero. """ fmt = '%H:%M:%S.%f' s = int(x) msus = int(round((x - s) * 1e6)) ms = msus // 1000 us = msus % 1000 m, s = divmod(s, 60) h, m = divmod(m, 60) _, h = divmod(h, 24) if us != 0: return pydt.time(h, m, s, msus).strftime(fmt) elif ms != 0: return pydt.time(h, m, s, msus).strftime(fmt)[:-3] elif s != 0: return pydt.time(h, m, s).strftime('%H:%M:%S') return pydt.time(h, m).strftime('%H:%M') # Period Conversion class PeriodConverter(dates.DateConverter): @staticmethod def convert(values, units, axis): if is_nested_list_like(values): values = [PeriodConverter._convert_1d(v, units, axis) for v in values] else: values = PeriodConverter._convert_1d(values, units, axis) return values @staticmethod def _convert_1d(values, units, axis): if not hasattr(axis, 'freq'): raise TypeError('Axis must have `freq` set to convert to Periods') valid_types = (compat.string_types, datetime, Period, pydt.date, pydt.time, np.datetime64) if (isinstance(values, valid_types) or is_integer(values) or is_float(values)): return get_datevalue(values, axis.freq) if isinstance(values, PeriodIndex): return values.asfreq(axis.freq)._ndarray_values if isinstance(values, Index): return values.map(lambda x: get_datevalue(x, axis.freq)) if is_period_arraylike(values): return PeriodIndex(values, freq=axis.freq)._ndarray_values if isinstance(values, (list, tuple, np.ndarray, Index)): return [get_datevalue(x, axis.freq) for x in values] return values def get_datevalue(date, freq): if isinstance(date, Period): return date.asfreq(freq).ordinal elif isinstance(date, (compat.string_types, datetime, pydt.date, pydt.time, np.datetime64)): return Period(date, freq).ordinal elif (is_integer(date) or is_float(date) or (isinstance(date, (np.ndarray, Index)) and (date.size == 1))): return date elif date is None: return None raise ValueError("Unrecognizable date '{date}'".format(date=date)) def _dt_to_float_ordinal(dt): """ Convert :mod:`datetime` to the Gregorian date as UTC float days, preserving hours, minutes, seconds and microseconds. Return value is a :func:`float`. """ if (isinstance(dt, (np.ndarray, Index, ABCSeries) ) and is_datetime64_ns_dtype(dt)): base = dates.epoch2num(dt.asi8 / 1.0E9) else: base = dates.date2num(dt) return base # Datetime Conversion class DatetimeConverter(dates.DateConverter): @staticmethod def convert(values, unit, axis): # values might be a 1-d array, or a list-like of arrays. _check_implicitly_registered() if is_nested_list_like(values): values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] else: values = DatetimeConverter._convert_1d(values, unit, axis) return values @staticmethod def _convert_1d(values, unit, axis): def try_parse(values): try: return _dt_to_float_ordinal(tools.to_datetime(values)) except Exception: return values if isinstance(values, (datetime, pydt.date)): return _dt_to_float_ordinal(values) elif isinstance(values, np.datetime64): return _dt_to_float_ordinal(tslib.Timestamp(values)) elif isinstance(values, pydt.time): return dates.date2num(values) elif (is_integer(values) or is_float(values)): return values elif isinstance(values, compat.string_types): return try_parse(values) elif isinstance(values, (list, tuple, np.ndarray, Index)): if isinstance(values, Index): values = values.values if not isinstance(values, np.ndarray): values = com._asarray_tuplesafe(values) if is_integer_dtype(values) or is_float_dtype(values): return values try: values = tools.to_datetime(values) if isinstance(values, Index): values = _dt_to_float_ordinal(values) else: values = [_dt_to_float_ordinal(x) for x in values] except Exception: values = _dt_to_float_ordinal(values) return values @staticmethod def axisinfo(unit, axis): """ Return the :class:`~matplotlib.units.AxisInfo` for *unit*. *unit* is a tzinfo instance or None. The *axis* argument is required but not used. """ tz = unit majloc = PandasAutoDateLocator(tz=tz) majfmt = PandasAutoDateFormatter(majloc, tz=tz) datemin = pydt.date(2000, 1, 1) datemax = pydt.date(2010, 1, 1) return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='', default_limits=(datemin, datemax)) class PandasAutoDateFormatter(dates.AutoDateFormatter): def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'): dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) # matplotlib.dates._UTC has no _utcoffset called by pandas if self._tz is dates.UTC: self._tz._utcoffset = self._tz.utcoffset(None) # For mpl > 2.0 the format strings are controlled via rcparams # so do not mess with them. For mpl < 2.0 change the second # break point and add a musec break point if _mpl_le_2_0_0(): self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S' self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f' class PandasAutoDateLocator(dates.AutoDateLocator): def get_locator(self, dmin, dmax): 'Pick the best locator based on a distance.' _check_implicitly_registered() delta = relativedelta(dmax, dmin) num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds tot_sec = num_days * 86400. + num_sec if abs(tot_sec) < self.minticks: self._freq = -1 locator = MilliSecondLocator(self.tz) locator.set_axis(self.axis) locator.set_view_interval(*self.axis.get_view_interval()) locator.set_data_interval(*self.axis.get_data_interval()) return locator return dates.AutoDateLocator.get_locator(self, dmin, dmax) def _get_unit(self): return MilliSecondLocator.get_unit_generic(self._freq) class MilliSecondLocator(dates.DateLocator): UNIT = 1. / (24 * 3600 * 1000) def __init__(self, tz): dates.DateLocator.__init__(self, tz) self._interval = 1. def _get_unit(self): return self.get_unit_generic(-1) @staticmethod def get_unit_generic(freq): unit = dates.RRuleLocator.get_unit_generic(freq) if unit < 0: return MilliSecondLocator.UNIT return unit def __call__(self): # if no data have been set, this will tank with a ValueError _check_implicitly_registered() try: dmin, dmax = self.viewlim_to_dt() except ValueError: return [] if dmin > dmax: dmax, dmin = dmin, dmax # We need to cap at the endpoints of valid datetime # TODO(wesm) unused? # delta = relativedelta(dmax, dmin) # try: # start = dmin - delta # except ValueError: # start = _from_ordinal(1.0) # try: # stop = dmax + delta # except ValueError: # # The magic number! # stop = _from_ordinal(3652059.9999999) nmax, nmin = dates.date2num((dmax, dmin)) num = (nmax - nmin) * 86400 * 1000 max_millis_ticks = 6 for interval in [1, 10, 50, 100, 200, 500]: if num <= interval * (max_millis_ticks - 1): self._interval = interval break else: # We went through the whole loop without breaking, default to 1 self._interval = 1000. estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) if estimate > self.MAXTICKS * 2: raise RuntimeError(('MillisecondLocator estimated to generate ' '{estimate:d} ticks from {dmin} to {dmax}: ' 'exceeds Locator.MAXTICKS' '* 2 ({arg:d}) ').format( estimate=estimate, dmin=dmin, dmax=dmax, arg=self.MAXTICKS * 2)) freq = '%dL' % self._get_interval() tz = self.tz.tzname(None) st = _from_ordinal(dates.date2num(dmin)) # strip tz ed = _from_ordinal(dates.date2num(dmax)) all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) try: if len(all_dates) > 0: locs = self.raise_if_exceeds(dates.date2num(all_dates)) return locs except Exception: # pragma: no cover pass lims = dates.date2num([dmin, dmax]) return lims def _get_interval(self): return self._interval def autoscale(self): """ Set the view limits to include the data range. """ dmin, dmax = self.datalim_to_dt() if dmin > dmax: dmax, dmin = dmin, dmax # We need to cap at the endpoints of valid datetime # TODO(wesm): unused? # delta = relativedelta(dmax, dmin) # try: # start = dmin - delta # except ValueError: # start = _from_ordinal(1.0) # try: # stop = dmax + delta # except ValueError: # # The magic number! # stop = _from_ordinal(3652059.9999999) dmin, dmax = self.datalim_to_dt() vmin = dates.date2num(dmin) vmax = dates.date2num(dmax) return self.nonsingular(vmin, vmax) def _from_ordinal(x, tz=None): ix = int(x) dt = datetime.fromordinal(ix) remainder = float(x) - ix hour, remainder = divmod(24 * remainder, 1) minute, remainder = divmod(60 * remainder, 1) second, remainder = divmod(60 * remainder, 1) microsecond = int(1e6 * remainder) if microsecond < 10: microsecond = 0 # compensate for rounding errors dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute), int(second), microsecond) if tz is not None: dt = dt.astimezone(tz) if microsecond > 999990: # compensate for rounding errors dt += timedelta(microseconds=1e6 - microsecond) return dt # Fixed frequency dynamic tick locators and formatters # ------------------------------------------------------------------------- # --- Locators --- # ------------------------------------------------------------------------- def _get_default_annual_spacing(nyears): """ Returns a default spacing between consecutive ticks for annual data. """ if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: (min_spacing, maj_spacing) = (1, 2) elif nyears < 50: (min_spacing, maj_spacing) = (1, 5) elif nyears < 100: (min_spacing, maj_spacing) = (5, 10) elif nyears < 200: (min_spacing, maj_spacing) = (5, 25) elif nyears < 600: (min_spacing, maj_spacing) = (10, 50) else: factor = nyears // 1000 + 1 (min_spacing, maj_spacing) = (factor * 20, factor * 100) return (min_spacing, maj_spacing) def period_break(dates, period): """ Returns the indices where the given period changes. Parameters ---------- dates : PeriodIndex Array of intervals to monitor. period : string Name of the period to monitor. """ current = getattr(dates, period) previous = getattr(dates - 1, period) return np.nonzero(current - previous)[0] def has_level_label(label_flags, vmin): """ Returns true if the ``label_flags`` indicate there is at least one label for this level. if the minimum view limit is not an exact integer, then the first tick label won't be shown, so we must adjust for that. """ if label_flags.size == 0 or (label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0): return False else: return True def _daily_finder(vmin, vmax, freq): periodsperday = -1 if freq >= FreqGroup.FR_HR: if freq == FreqGroup.FR_NS: periodsperday = 24 * 60 * 60 * 1000000000 elif freq == FreqGroup.FR_US: periodsperday = 24 * 60 * 60 * 1000000 elif freq == FreqGroup.FR_MS: periodsperday = 24 * 60 * 60 * 1000 elif freq == FreqGroup.FR_SEC: periodsperday = 24 * 60 * 60 elif freq == FreqGroup.FR_MIN: periodsperday = 24 * 60 elif freq == FreqGroup.FR_HR: periodsperday = 24 else: # pragma: no cover raise ValueError("unexpected frequency: {freq}".format(freq=freq)) periodsperyear = 365 * periodsperday periodspermonth = 28 * periodsperday elif freq == FreqGroup.FR_BUS: periodsperyear = 261 periodspermonth = 19 elif freq == FreqGroup.FR_DAY: periodsperyear = 365 periodspermonth = 28 elif resolution.get_freq_group(freq) == FreqGroup.FR_WK: periodsperyear = 52 periodspermonth = 3 else: # pragma: no cover raise ValueError("unexpected frequency") # save this for later usage vmin_orig = vmin (vmin, vmax) = (Period(ordinal=int(vmin), freq=freq), Period(ordinal=int(vmax), freq=freq)) span = vmax.ordinal - vmin.ordinal + 1 dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq) # Initialize the output info = np.zeros(span, dtype=[('val', np.int64), ('maj', bool), ('min', bool), ('fmt', '|S20')]) info['val'][:] = dates_._ndarray_values info['fmt'][:] = '' info['maj'][[0, -1]] = True # .. and set some shortcuts info_maj = info['maj'] info_min = info['min'] info_fmt = info['fmt'] def first_label(label_flags): if (label_flags[0] == 0) and (label_flags.size > 1) and \ ((vmin_orig % 1) > 0.0): return label_flags[1] else: return label_flags[0] # Case 1. Less than a month if span <= periodspermonth: day_start = period_break(dates_, 'day') month_start = period_break(dates_, 'month') def _hour_finder(label_interval, force_year_start): _hour = dates_.hour _prev_hour = (dates_ - 1).hour hour_start = (_hour - _prev_hour) != 0 info_maj[day_start] = True info_min[hour_start & (_hour % label_interval == 0)] = True year_start = period_break(dates_, 'year') info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M' info_fmt[day_start] = '%H:%M\n%d-%b' info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' if force_year_start and not has_level_label(year_start, vmin_orig): info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y' def _minute_finder(label_interval): hour_start = period_break(dates_, 'hour') _minute = dates_.minute _prev_minute = (dates_ - 1).minute minute_start = (_minute - _prev_minute) != 0 info_maj[hour_start] = True info_min[minute_start & (_minute % label_interval == 0)] = True year_start = period_break(dates_, 'year') info_fmt = info['fmt'] info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M' info_fmt[day_start] = '%H:%M\n%d-%b' info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' def _second_finder(label_interval): minute_start = period_break(dates_, 'minute') _second = dates_.second _prev_second = (dates_ - 1).second second_start = (_second - _prev_second) != 0 info['maj'][minute_start] = True info['min'][second_start & (_second % label_interval == 0)] = True year_start = period_break(dates_, 'year') info_fmt = info['fmt'] info_fmt[second_start & (_second % label_interval == 0)] = '%H:%M:%S' info_fmt[day_start] = '%H:%M:%S\n%d-%b' info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y' if span < periodsperday / 12000.0: _second_finder(1) elif span < periodsperday / 6000.0: _second_finder(2) elif span < periodsperday / 2400.0: _second_finder(5) elif span < periodsperday / 1200.0: _second_finder(10) elif span < periodsperday / 800.0: _second_finder(15) elif span < periodsperday / 400.0: _second_finder(30) elif span < periodsperday / 150.0: _minute_finder(1) elif span < periodsperday / 70.0: _minute_finder(2) elif span < periodsperday / 24.0: _minute_finder(5) elif span < periodsperday / 12.0: _minute_finder(15) elif span < periodsperday / 6.0: _minute_finder(30) elif span < periodsperday / 2.5: _hour_finder(1, False) elif span < periodsperday / 1.5: _hour_finder(2, False) elif span < periodsperday * 1.25: _hour_finder(3, False) elif span < periodsperday * 2.5: _hour_finder(6, True) elif span < periodsperday * 4: _hour_finder(12, True) else: info_maj[month_start] = True info_min[day_start] = True year_start = period_break(dates_, 'year') info_fmt = info['fmt'] info_fmt[day_start] = '%d' info_fmt[month_start] = '%d\n%b' info_fmt[year_start] = '%d\n%b\n%Y' if not has_level_label(year_start, vmin_orig): if not has_level_label(month_start, vmin_orig): info_fmt[first_label(day_start)] = '%d\n%b\n%Y' else: info_fmt[first_label(month_start)] = '%d\n%b\n%Y' # Case 2. Less than three months elif span <= periodsperyear // 4: month_start = period_break(dates_, 'month') info_maj[month_start] = True if freq < FreqGroup.FR_HR: info['min'] = True else: day_start = period_break(dates_, 'day') info['min'][day_start] = True week_start = period_break(dates_, 'week') year_start = period_break(dates_, 'year') info_fmt[week_start] = '%d' info_fmt[month_start] = '\n\n%b' info_fmt[year_start] = '\n\n%b\n%Y' if not has_level_label(year_start, vmin_orig): if not has_level_label(month_start, vmin_orig): info_fmt[first_label(week_start)] = '\n\n%b\n%Y' else: info_fmt[first_label(month_start)] = '\n\n%b\n%Y' # Case 3. Less than 14 months ............... elif span <= 1.15 * periodsperyear: year_start = period_break(dates_, 'year') month_start = period_break(dates_, 'month') week_start = period_break(dates_, 'week') info_maj[month_start] = True info_min[week_start] = True info_min[year_start] = False info_min[month_start] = False info_fmt[month_start] = '%b' info_fmt[year_start] = '%b\n%Y' if not has_level_label(year_start, vmin_orig): info_fmt[first_label(month_start)] = '%b\n%Y' # Case 4. Less than 2.5 years ............... elif span <= 2.5 * periodsperyear: year_start = period_break(dates_, 'year') quarter_start = period_break(dates_, 'quarter') month_start = period_break(dates_, 'month') info_maj[quarter_start] = True info_min[month_start] = True info_fmt[quarter_start] = '%b' info_fmt[year_start] = '%b\n%Y' # Case 4. Less than 4 years ................. elif span <= 4 * periodsperyear: year_start = period_break(dates_, 'year') month_start = period_break(dates_, 'month') info_maj[year_start] = True info_min[month_start] = True info_min[year_start] = False month_break = dates_[month_start].month jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] info_fmt[jan_or_jul] = '%b' info_fmt[year_start] = '%b\n%Y' # Case 5. Less than 11 years ................ elif span <= 11 * periodsperyear: year_start = period_break(dates_, 'year') quarter_start = period_break(dates_, 'quarter') info_maj[year_start] = True info_min[quarter_start] = True info_min[year_start] = False info_fmt[year_start] = '%Y' # Case 6. More than 12 years ................ else: year_start = period_break(dates_, 'year') year_break = dates_[year_start].year nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) major_idx = year_start[(year_break % maj_anndef == 0)] info_maj[major_idx] = True minor_idx = year_start[(year_break % min_anndef == 0)] info_min[minor_idx] = True info_fmt[major_idx] = '%Y' return info def _monthly_finder(vmin, vmax, freq): periodsperyear = 12 vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) span = vmax - vmin + 1 # Initialize the output info = np.zeros(span, dtype=[('val', int), ('maj', bool), ('min', bool), ('fmt', '|S8')]) info['val'] = np.arange(vmin, vmax + 1) dates_ = info['val'] info['fmt'] = '' year_start = (dates_ % 12 == 0).nonzero()[0] info_maj = info['maj'] info_fmt = info['fmt'] if span <= 1.15 * periodsperyear: info_maj[year_start] = True info['min'] = True info_fmt[:] = '%b' info_fmt[year_start] = '%b\n%Y' if not has_level_label(year_start, vmin_orig): if dates_.size > 1: idx = 1 else: idx = 0 info_fmt[idx] = '%b\n%Y' elif span <= 2.5 * periodsperyear: quarter_start = (dates_ % 3 == 0).nonzero() info_maj[year_start] = True # TODO: Check the following : is it really info['fmt'] ? info['fmt'][quarter_start] = True info['min'] = True info_fmt[quarter_start] = '%b' info_fmt[year_start] = '%b\n%Y' elif span <= 4 * periodsperyear: info_maj[year_start] = True info['min'] = True jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) info_fmt[jan_or_jul] = '%b' info_fmt[year_start] = '%b\n%Y' elif span <= 11 * periodsperyear: quarter_start = (dates_ % 3 == 0).nonzero() info_maj[year_start] = True info['min'][quarter_start] = True info_fmt[year_start] = '%Y' else: nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) years = dates_[year_start] // 12 + 1 major_idx = year_start[(years % maj_anndef == 0)] info_maj[major_idx] = True info['min'][year_start[(years % min_anndef == 0)]] = True info_fmt[major_idx] = '%Y' return info def _quarterly_finder(vmin, vmax, freq): periodsperyear = 4 vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) span = vmax - vmin + 1 info = np.zeros(span, dtype=[('val', int), ('maj', bool), ('min', bool), ('fmt', '|S8')]) info['val'] = np.arange(vmin, vmax + 1) info['fmt'] = '' dates_ = info['val'] info_maj = info['maj'] info_fmt = info['fmt'] year_start = (dates_ % 4 == 0).nonzero()[0] if span <= 3.5 * periodsperyear: info_maj[year_start] = True info['min'] = True info_fmt[:] = 'Q%q' info_fmt[year_start] = 'Q%q\n%F' if not has_level_label(year_start, vmin_orig): if dates_.size > 1: idx = 1 else: idx = 0 info_fmt[idx] = 'Q%q\n%F' elif span <= 11 * periodsperyear: info_maj[year_start] = True info['min'] = True info_fmt[year_start] = '%F' else: years = dates_[year_start] // 4 + 1 nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) major_idx = year_start[(years % maj_anndef == 0)] info_maj[major_idx] = True info['min'][year_start[(years % min_anndef == 0)]] = True info_fmt[major_idx] = '%F' return info def _annual_finder(vmin, vmax, freq): (vmin, vmax) = (int(vmin), int(vmax + 1)) span = vmax - vmin + 1 info = np.zeros(span, dtype=[('val', int), ('maj', bool), ('min', bool), ('fmt', '|S8')]) info['val'] = np.arange(vmin, vmax + 1) info['fmt'] = '' dates_ = info['val'] (min_anndef, maj_anndef) = _get_default_annual_spacing(span) major_idx = dates_ % maj_anndef == 0 info['maj'][major_idx] = True info['min'][(dates_ % min_anndef == 0)] = True info['fmt'][major_idx] = '%Y' return info def get_finder(freq): if isinstance(freq, compat.string_types): freq = frequencies.get_freq(freq) fgroup = resolution.get_freq_group(freq) if fgroup == FreqGroup.FR_ANN: return _annual_finder elif fgroup == FreqGroup.FR_QTR: return _quarterly_finder elif freq == FreqGroup.FR_MTH: return _monthly_finder elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK): return _daily_finder else: # pragma: no cover errmsg = "Unsupported frequency: {freq}".format(freq=freq) raise NotImplementedError(errmsg) class TimeSeries_DateLocator(Locator): """ Locates the ticks along an axis controlled by a :class:`Series`. Parameters ---------- freq : {var} Valid frequency specifier. minor_locator : {False, True}, optional Whether the locator is for minor ticks (True) or not. dynamic_mode : {True, False}, optional Whether the locator should work in dynamic mode. base : {int}, optional quarter : {int}, optional month : {int}, optional day : {int}, optional """ def __init__(self, freq, minor_locator=False, dynamic_mode=True, base=1, quarter=1, month=1, day=1, plot_obj=None): if isinstance(freq, compat.string_types): freq = frequencies.get_freq(freq) self.freq = freq self.base = base (self.quarter, self.month, self.day) = (quarter, month, day) self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 self.plot_obj = plot_obj self.finder = get_finder(freq) def _get_default_locs(self, vmin, vmax): "Returns the default locations of ticks." if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) locator = self.plot_obj.date_axis_info if self.isminor: return np.compress(locator['min'], locator['val']) return np.compress(locator['maj'], locator['val']) def __call__(self): 'Return the locations of the ticks.' # axis calls Locator.set_axis inside set_m<xxxx>_formatter _check_implicitly_registered() vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None self.plot_obj.view_interval = vi vmin, vmax = vi if vmax < vmin: vmin, vmax = vmax, vmin if self.isdynamic: locs = self._get_default_locs(vmin, vmax) else: # pragma: no cover base = self.base (d, m) = divmod(vmin, base) vmin = (d + 1) * base locs = lrange(vmin, vmax + 1, base) return locs def autoscale(self): """ Sets the view limits to the nearest multiples of base that contain the data. """ # requires matplotlib >= 0.98.0 (vmin, vmax) = self.axis.get_data_interval() locs = self._get_default_locs(vmin, vmax) (vmin, vmax) = locs[[0, -1]] if vmin == vmax: vmin -= 1 vmax += 1 return nonsingular(vmin, vmax) # ------------------------------------------------------------------------- # --- Formatter --- # ------------------------------------------------------------------------- class TimeSeries_DateFormatter(Formatter): """ Formats the ticks along an axis controlled by a :class:`PeriodIndex`. Parameters ---------- freq : {int, string} Valid frequency specifier. minor_locator : {False, True} Whether the current formatter should apply to minor ticks (True) or major ticks (False). dynamic_mode : {True, False} Whether the formatter works in dynamic mode or not. """ def __init__(self, freq, minor_locator=False, dynamic_mode=True, plot_obj=None): if isinstance(freq, compat.string_types): freq = frequencies.get_freq(freq) self.format = None self.freq = freq self.locs = [] self.formatdict = None self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 self.plot_obj = plot_obj self.finder = get_finder(freq) def _set_default_format(self, vmin, vmax): "Returns the default ticks spacing." if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) info = self.plot_obj.date_axis_info if self.isminor: format = np.compress(info['min'] & np.logical_not(info['maj']), info) else: format = np.compress(info['maj'], info) self.formatdict = {x: f for (x, _, _, f) in format} return self.formatdict def set_locs(self, locs): 'Sets the locations of the ticks' # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax _check_implicitly_registered() self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None self.plot_obj.view_interval = vi if vmax < vmin: (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax) def __call__(self, x, pos=0): _check_implicitly_registered() if self.formatdict is None: return '' else: fmt = self.formatdict.pop(x, '') return Period(ordinal=int(x), freq=self.freq).strftime(fmt) class TimeSeries_TimedeltaFormatter(Formatter): """ Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. """ @staticmethod def format_timedelta_ticks(x, pos, n_decimals): """ Convert seconds to 'D days HH:MM:SS.F' """ s, ns = divmod(x, 1e9) m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) decimals = int(ns * 10**(n_decimals - 9)) s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) if n_decimals > 0: s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) if d != 0: s = '{:d} days '.format(int(d)) + s return s def __call__(self, x, pos=0): _check_implicitly_registered() (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) if n_decimals > 9: n_decimals = 9 return self.format_timedelta_ticks(x, pos, n_decimals)
#!/usr/bin/env python """ Copyright 2013 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import syndicate.client.common.log as Log import syndicate.syndicate as c_syndicate from syndicate.volume import Volume import singleton from Crypto.PublicKey import RSA as CryptoKey import os import errno import collections import json import random import binascii import hashlib import shutil import pickle log = Log.get_logger() VOLUME_ROOT_DIR = None # root directory on the Volume; overwritten at runtime LOCAL_ROOT_DIR = None # root directory on local disk CACHE_DIR = ".syndicatemail.cache" # root directory for cached data; overwritten at runtime PATH_SALT = None # loaded at runtime PATH_SALT_FILENAME = "/config/salt" GET_FROM_SESSION=True VOLUME_STORAGE_DIRS = [ "/config" ] MY_PUBKEY_PEM = None MY_PRIVKEY_PEM = None # ------------------------------------- def path_join( a, *b ): b_stripped = [] for c in b: b_stripped.append( c.strip("/") ) return os.path.join( a, *b_stripped ) # ------------------------------------- def volume_path( a, *b ): if VOLUME_ROOT_DIR is None: raise Exception("VOLUME_ROOT_DIR not set") parts = [a] + list(b) return path_join( VOLUME_ROOT_DIR, *parts ) # ------------------------------------- def local_path( a, *b ): if LOCAL_ROOT_DIR is None: raise Exception("LOCAL_ROOT_DIR not set") parts = [a] + list(b) return path_join( LOCAL_ROOT_DIR, *parts ) # ------------------------------------- def tuple_to_dict( tuple_inst ): fields_dict = {} for f in tuple_inst._fields: fields_dict[f] = getattr(tuple_inst, f) tuple_dict = { "type": tuple_inst.__class__.__name__, "fields": fields_dict } return tuple_dict # ------------------------------------- def tuple_to_json( tuple_inst ): json_dict = tuple_to_dict( tuple_inst ) try: json_str = json.dumps( json_dict ) except Exception, e: log.error("Failed to serialize") raise e return json_str # ------------------------------------- def json_to_tuple( tuple_class, json_str ): json_dict = {} try: json_dict = json.loads( json_str ) except Exception, e: log.error("Failed to unserialize") raise e for field_name in ["type", "fields"]: assert field_name in json_dict, "Missing field: %s" % field_name _type = str(json_dict["type"]) _fields = json_dict["fields"] if _type != tuple_class.__name__: raise Exception("JSON encodes '%s'; expected '%s'" % (_type, tuple_class.__name__)) assert isinstance( _fields, dict ), "Expected dictionary for 'fields'" # check fields for field_name in _fields.keys(): assert field_name in tuple_class._fields, "Unexpected field '%s'" % field_name return tuple_class( **_fields ) # ------------------------------------- def volume_makedirs( volume, dir_abspath, mode=0700 ): pieces = dir_abspath.split("/") prefix = "/" for piece in pieces: prefix = os.path.join( prefix, piece ) if not path_exists( prefix, volume=volume ): rc = volume.mkdir( prefix, mode ) if rc != 0 and rc != -errno.EEXIST: raise Exception("Volume mkdir rc = %s" % rc ) return True # ------------------------------------- def setup_dirs( dir_names, prefix="/", volume=GET_FROM_SESSION ): if volume == GET_FROM_SESSION: volume = singleton.get_volume() if prefix is None: raise Exception("Invalid argument: paseed None as prefix") for dirname in dir_names: if dirname is None: raise Exception("Invalid argument: passed None as a directory") try: dir_path = None if volume is None: dir_path = local_path(prefix, dirname) os.makedirs( dir_path, mode=0700 ) else: dir_path = volume_path(prefix, dirname) volume_makedirs( volume, dir_path, mode=0700 ) except OSError, oe: if oe.errno != errno.EEXIST: log.error("Failed to mkdir '%s'" % dir_path ) log.exception(oe) return False else: pass except Exception, e: log.error("Failed to mkdir '%s'" % dirname ) log.exception(e) return False return True # ------------------------------------- def volume_listdir( volume, dir_path ): dfd = volume.opendir( dir_path ) dents = volume.readdir( dfd ) volume.closedir( dfd ) return [x.name for x in dents] # ------------------------------------- def listdir( path, volume=GET_FROM_SESSION ): if volume == GET_FROM_SESSION: volume = singleton.get_volume() if volume is None: return os.listdir(path) else: return volume_listdir( volume, path) # ------------------------------------- def volume_rmtree( volume, dir_abspath ): dfd = volume.opendir( dir_abspath ) if dfd is None or dfd < 0: if isinstance(dfd, int) and dfd == -errno.ENOENT: return True raise Exception("Could not open %s" % dir_abspath) dents = volume.readdir( dfd ) if dents is None or dents < 0: raise Exception("Could not read %s, rc = %s" % (dir_abspath, dents)) volume.closedir( dfd ) for dent in dents: if dent.name in [".", ".."]: continue if dent.type == volume.ENT_TYPE_DIR: child_dir_path = os.path.join( dir_abspath, dent.name ) volume_rmtree( volume, child_dir_path ) elif dent.type == volume.ENT_TYPE_FILE: child_ent_path = os.path.join( dir_abspath, dent.name ) volume.unlink( child_ent_path ) return True # ------------------------------------- def delete_dirs( dirs, prefix="/", remove_contents=True, volume=GET_FROM_SESSION ): if volume == GET_FROM_SESSION: volume = singleton.get_volume() for dirname in dirs: if volume is None: dir_path = local_path( prefix, dirname ) if remove_contents: try: shutil.rmtree( dir_path ) except: return False else: try: os.rmdir( dir_path ) except: return False else: dir_path = volume_path( prefix, dirname ) if remove_contents: try: volume_rmtree( volume, dir_path ) except Exception, e: log.exception(e) return False else: try: volume.rmdir( dir_path ) except Exception, e: log.exception(e) return False return True # ------------------------------------- def setup_volume_storage( volume_root_dir, modules, volume ): global VOLUME_ROOT_DIR global PATH_SALT global PATH_SALT_FILENAME all_volume_dirs = [] VOLUME_ROOT_DIR = volume_root_dir for mod in modules: volume_dirs = getattr(mod, "VOLUME_STORAGE_DIRS", None) if volume_dirs is not None: all_volume_dirs += volume_dirs # set up volume directories if volume_root_dir is not None: rc = setup_dirs( ["/"] + all_volume_dirs, volume=volume ) if not rc: log.error("Volume setup_dirs failed") return rc salt_path = volume_path( PATH_SALT_FILENAME ) if not path_exists( salt_path, volume=volume ): salt_dirname = os.path.dirname(PATH_SALT_FILENAME) rc = setup_dirs( [salt_dirname], volume=volume ) if not rc: log.error("Failed to create '%s'" % dirname) return False # make a 512-bit (64-byte) salt salt = binascii.b2a_hex( os.urandom(64) ) rc = write_file( salt_path, salt, volume=volume ) if not rc: log.error("Failed to write '%s'" % salt_path ) return False PATH_SALT = salt log.info("wrote salt to %s" % salt_path ) else: salt = read_file( salt_path, volume=volume ) if salt is None: log.error("Failed to read '%s'" % salt_path ) return False PATH_SALT = salt return True # ------------------------------------- def setup_local_storage( local_dir, modules ): global LOCAL_ROOT_DIR global CACHE_DIR if local_dir is not None: LOCAL_ROOT_DIR = local_dir all_local_dirs = [] for mod in modules: local_dirs = getattr(mod, "LOCAL_STORAGE_DIRS", None) if local_dirs is not None: all_local_dirs += local_dirs # set up local directories rc = setup_dirs( ["/"] + all_local_dirs, volume=None ) if not rc: log.error("Local setup_dirs failed") return rc # set up cache rc = setup_dirs( ["/"] + [".syndicatemail.cache"], volume=None ) if not rc: log.error("Local cache setup failed") return rc return True # ------------------------------------- def setup_storage_access( my_pkey_pem ): global MY_PUBKEY_PEM, MY_PRIVKEY_PEM try: pkey = CryptoKey.importKey( my_pkey_pem ) MY_PUBKEY_PEM = pkey.publickey().exportKey() MY_PRIVKEY_PEM = pkey.exportKey() except Exception, e: log.exception(e) log.error("Failed to parse private key") return False return True # ------------------------------------- def setup_storage( my_pkey_pem, volume_root_dir, local_dir, modules, volume=GET_FROM_SESSION ): global MY_PUBKEY_PEM, MY_PRIVKEY_PEM if volume == GET_FROM_SESSION: volume = singleton.get_volume() rc = setup_local_storage( local_dir, modules ) if not rc: log.error("Failed to set up local storage") return False rc = setup_volume_storage( volume_root_dir, modules, volume ) if not rc: log.error("Failed to set up volume storage") return False rc = setup_storage_access( my_pkey_pem ) if not rc: log.error("Failed to set up storage access") return False return True # ------------------------------------- def salt_string( name, salt=None, iterations=10000 ): global PATH_SALT if salt is None: salt = PATH_SALT if not PATH_SALT: raise Exception("call setup_storage() first") m = hashlib.sha256() for i in xrange(0,iterations): m.update( PATH_SALT ) m.update( name ) return m.hexdigest() # ------------------------------------- def read_file( file_path, volume=GET_FROM_SESSION ): if volume == GET_FROM_SESSION: volume = singleton.get_volume() if volume is None: try: fd = open( file_path, "r" ) except: log.error("Failed to open '%s' for reading" % file_path) return None try: buf = fd.read() except: log.error("Failed to read '%s'" % file_path) try: fd.close() except: pass return None try: fd.close() return buf except: log.error("Failed to close '%s'" % file_path) return None else: try: statbuf = volume.stat( file_path ) if isinstance(statbuf, int) and statbuf < 0: raise Exception("volume.stat rc = %d", statbuf) size = statbuf.st_size fd = volume.open( file_path, os.O_RDONLY ) if fd is None: raise Exception("Failed to open Volume file %s" % file_path) buf = volume.read( fd, size ) volume.close( fd ) return buf except Exception, e: log.exception(e) log.error("Failed to read Volume file %s" % file_path) return None # ------------------------------------- def write_file( file_path, data, volume=GET_FROM_SESSION, create_mode=0600 ): if volume == GET_FROM_SESSION: volume = singleton.get_volume() if volume is None: try: fd = open( file_path, "w+" ) except Exception, e: log.exception(e) log.error("Failed to open '%s' for writing" % file_path) return False try: fd.write(data) fd.flush() except Exception, e: log.exception(e) log.error("Failed to write '%s'" % file_path) try: os.unlink(file_path) except: pass try: fd.close() except: pass return False try: fd.close() except: log.error("Failed to close '%s'" % file_path) try: os.unlink(file_path) except: pass return False return True else: try: fd = volume.create( file_path, create_mode ) if fd < 0: # try to open? fd = volume.open( file_path, os.O_WRONLY | os.O_TRUNC ) if fd < 0: raise Exception("Failed to open Volume file %s for writing: rc = %d" % (file_path, fd) ) rc = volume.write( fd, data ) if rc != len(data): raise Exception("Volume write rc = %s (expected %s)" % (rc, len(data))) rc = volume.close( fd ) if rc != 0: raise Exception("Volume close rc = %s" % rc) return True except Exception, e: log.exception(e) log.error("Failed to write Volume file %s" % file_path ) return False # ------------------------------------- def encrypt_data( sender_privkey_pem, receiver_pubkey_pem, data ): rc, enc_data = c_syndicate.encrypt_data( sender_privkey_pem, receiver_pubkey_pem, data ) if rc != 0: log.error("encrypt_data rc = %s" % rc) return None return enc_data # ------------------------------------- def decrypt_data( sender_pubkey_pem, receiver_privkey_pem, enc_data ): rc, data = c_syndicate.decrypt_data( sender_pubkey_pem, receiver_privkey_pem, enc_data ) if rc != 0: log.error("decrypt_data rc = %s" % rc) return None return data # ------------------------------------- def read_encrypted_file( receiver_privkey_pem, file_path, volume=GET_FROM_SESSION, sender_pubkey_pem=None ): global MY_PUBKEY_PEM use_my_key = False if sender_pubkey_pem is None: use_my_key = True sender_pubkey_pem = MY_PUBKEY_PEM if sender_pubkey_pem is None: raise Exception("No storage access key set.") print "read (%s) %s, use_my_key = %s" % (str(volume), file_path, use_my_key) if volume == GET_FROM_SESSION: volume = singleton.get_volume() # get file data try: enc_data = read_file( file_path, volume=volume ) if enc_data == None: log.error( "No data for %s" % file_path ) return None except Exception, e: log.error("read_file(%s) failed" % file_path) log.exception(e) return None return decrypt_data( sender_pubkey_pem, receiver_privkey_pem, enc_data ) # ------------------------------------- def write_encrypted_file( receiver_pubkey_pem, file_path, data, volume=GET_FROM_SESSION, sender_privkey_pem=None ): global MY_PRIVKEY_PEM if sender_privkey_pem is None: sender_privkey_pem = MY_PRIVKEY_PEM if sender_privkey_pem is None: raise Exception("No storage access key set.") if volume == GET_FROM_SESSION: volume = singleton.get_volume() print "write (%s) %s" % (str(volume), file_path) enc_data = encrypt_data( sender_privkey_pem, receiver_pubkey_pem, data ) if enc_data is None: log.error("encrypt_data returned None") return False try: rc = write_file( file_path, enc_data, volume=volume ) if not rc: log.error("write_file failed") return False return True except Exception, e: log.error("write_file(%s) failed" % file_path) log.exception(e) return False # ------------------------------------- def delete_file( file_path, volume=GET_FROM_SESSION ): if volume == GET_FROM_SESSION: volume = singleton.get_volume() if volume is None: try: os.unlink( file_path ) except OSError, oe: if oe.errno != errno.EEXIST: return False else: log.exception(oe) return True except Exception, e: log.exception(e) return False return True else: try: volume.unlink( file_path ) return True except Exception, e: log.exception(e) return False # ------------------------------------- def erase_file( file_path, volume=GET_FROM_SESSION ): # unlike delete_file, overwrite the file data with random bits a few times if local if volume == GET_FROM_SESSION: volume = singleton.get_volume() if volume is None: try: size = os.stat(file_path).st_size fd = open(file_path, "w") for i in xrange(0,10): fd.seek(0) # overwrite with junk buf = ''.join(chr(random.randint(0,255)) for i in xrange(0,size)) fd.write( buf ) fd.flush() fd.close() # now safe to unlink os.unlink( file_path ) return True except Exception, e: log.exception(e) return False else: return delete_file( file_path, volume=volume ) # ------------------------------------- def path_exists( file_path, volume=GET_FROM_SESSION ): if volume == GET_FROM_SESSION: volume = singleton.get_volume() if volume is None: return os.path.exists( file_path ) else: try: statbuf = volume.stat( file_path ) if statbuf is None: return False elif isinstance(statbuf, int) and statbuf < 0: return False else: return True except Exception, e: return False # ------------------------------------- def cache_path( cache_name ): ret = local_path( CACHE_DIR, cache_name ) return ret # ------------------------------------- def purge_cache( cache_name ): # purge cache try: cpath = cache_path( cache_name ) os.unlink( cpath ) except Exception, e: log.info("No cache to purge") pass return True # ------------------------------------- def cache_data( receiver_pubkey_pem, cache_name, data, sender_privkey_pem=None ): global CACHE_DIR if "/" in cache_name: setup_dirs( [os.path.dirname(cache_name)], prefix=CACHE_DIR, volume=None ) try: data_serialized = pickle.dumps( data ) except Exception, e: log.error("Failed to serialize data for caching") return False return write_encrypted_file( receiver_pubkey_pem, cache_path( cache_name ), data_serialized, volume=None, sender_privkey_pem=sender_privkey_pem ) # ------------------------------------- def get_cached_data( receiver_privkey_pem, cache_name, sender_pubkey_pem=None ): cp = cache_path( cache_name ) if path_exists( cp, volume=None ): data_serialized = read_encrypted_file( receiver_privkey_pem, cp, volume=None, sender_pubkey_pem=None ) if data_serialized != None: # cache hit try: data = pickle.loads(data_serialized) except Exception, e: log.warning("Failed to deserialize cache") purge_cache( cache_name ) return None else: return data else: return None # ------------------------------------- if __name__ == "__main__": pubkey_str = """ -----BEGIN PUBLIC KEY----- MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxwhi2mh+f/Uxcx6RuO42 EuVpxDHuciTMguJygvAHEuGTM/0hEW04Im1LfXldfpKv772XrCq+M6oKfUiee3tl sVhTf+8SZfbTdR7Zz132kdP1grNafGrp57mkOwxjFRE3FA23T1bHXpIaEcdhBo0R rXyEnxpJmnLyNYHaLN8rTOig5WFbnmhIZD+xCNtG7hFy39hKt+vNTWK98kMCOMsY QPywYw8nJaax/kY5SEiUup32BeZWV9HRljjJYlB5kMdzeAXcjQKvn5y47qmluVmx L1LRX5T2v11KLSpArSDO4At5qPPnrXhbsH3C2Z5L4jqStdLYB5ZYZdaAsaRKcc8V WpsmzZaFExJ9Nj05sDS1YMFMvoINqaPEftS6Be+wgF8/klZoHFkuslUNLK9k2f65 A7d9Fn/B42n+dCDYx0SR6obABd89cR8/AASkZl3QKeCzW/wl9zrt5dL1iydOq2kw JtgiKSCt6m7Hwx2kwHBGI8zUfNMBlfIlFu5CP+4xLTOlRdnXqYPylT56JQcjA2CB hGBRJQFWVutrVtTXlbvT2OmUkRQT9+P5wr0c7fl+iOVXh2TwfaFeug9Fm8QWoGyP GuKX1KO5JLQjcNTnZ3h3y9LIWHsCTCf2ltycUBguq8Mwzb5df2EkOVgFeLTfWyR2 lPCia/UWfs9eeGgdGe+Wr4sCAwEAAQ== -----END PUBLIC KEY----- """.strip() privkey_str = """ -----BEGIN RSA PRIVATE KEY----- MIIJKQIBAAKCAgEAxwhi2mh+f/Uxcx6RuO42EuVpxDHuciTMguJygvAHEuGTM/0h EW04Im1LfXldfpKv772XrCq+M6oKfUiee3tlsVhTf+8SZfbTdR7Zz132kdP1grNa fGrp57mkOwxjFRE3FA23T1bHXpIaEcdhBo0RrXyEnxpJmnLyNYHaLN8rTOig5WFb nmhIZD+xCNtG7hFy39hKt+vNTWK98kMCOMsYQPywYw8nJaax/kY5SEiUup32BeZW V9HRljjJYlB5kMdzeAXcjQKvn5y47qmluVmxL1LRX5T2v11KLSpArSDO4At5qPPn rXhbsH3C2Z5L4jqStdLYB5ZYZdaAsaRKcc8VWpsmzZaFExJ9Nj05sDS1YMFMvoIN qaPEftS6Be+wgF8/klZoHFkuslUNLK9k2f65A7d9Fn/B42n+dCDYx0SR6obABd89 cR8/AASkZl3QKeCzW/wl9zrt5dL1iydOq2kwJtgiKSCt6m7Hwx2kwHBGI8zUfNMB lfIlFu5CP+4xLTOlRdnXqYPylT56JQcjA2CBhGBRJQFWVutrVtTXlbvT2OmUkRQT 9+P5wr0c7fl+iOVXh2TwfaFeug9Fm8QWoGyPGuKX1KO5JLQjcNTnZ3h3y9LIWHsC TCf2ltycUBguq8Mwzb5df2EkOVgFeLTfWyR2lPCia/UWfs9eeGgdGe+Wr4sCAwEA AQKCAgEAl1fvIzkWB+LAaVMzZ7XrdE7yL/fv4ufMgzIB9ULjfh39Oykd/gxZBQSq xIyG5XpRQjGepZIS82I3e7C+ohLg7wvE4qE+Ej6v6H0/DonatmTAaVRMWBNMLaJi GWx/40Ml6J/NZg0MqQLbw+0iAENAz/TBO+JXWZRSTRGif0Brwp2ZyxJPApM1iNVN nvhuZRTrjv7/Qf+SK2gMG62MgPceSDxdO9YH5H9vFXT8ldRrE8SNkUrnGPw5LMud hp6+8bJYQUnjvW3vcaVQklp55AkpzFxjTRUO09DyWImqiHtME91l820UHDpLLldS 1PujpDD54jyjfJF8QmPrlCjjWssm5ll8AYpZFn1mp3SDY6CQhKGdLXjmPlBvEaoR 7yfNa7JRuJAM8ntrfxj3fk0B8t2e5NMylZsBICtposCkVTXpBVJt50gs7hHjiR3/ Q/P7t19ywEMlHx5edy+E394q8UL94YRf7gYEF4VFCxT1k3BhYGw8m3Ov22HS7EZy 2vFqro+RMOR7VkQZXvGecsaZ/5xhL8YIOS+9S90P0tmMVYmuMgp7L+Lm6DZi0Od6 cwKxB7LYabzrpfHXSIfqE5JUgpkV5iTVo4kbmHsrBQB1ysNFR74E1PJFy5JuFfHZ Tpw0KDBCIXVRFFanQ19pCcbP85MucKWif/DhjOr6nE/js/8O6XECggEBAN0lhYmq cPH9TucoGnpoRv2o+GkA0aA4HMIXQq4u89LNxOH+zBiom47AAj2onWl+Zo3Dliyy jBSzKkKSVvBwsuxgz9xq7VNBDiaK+wj1rS6MPqa/0Iyz5Fhi0STp2Fm/elDonYJ8 Jp8MRIWDk0luMgaAh7DuKpIm9dsg45wQmm/4LAGJw6WbbbZ4TUGrT684qIRXk8Q5 1Z08hgSOKUIyDwmv4LqenV6n4XemTq3zs8R0abQiJm81YqSOXwsJppXXgZoUM8sg L/gxX5pXxCzAfC2QpLI94VJcVtRUNGBK5rMmrANd2uITg6h/wDCy9FxRKWG8f+p4 qAcxr/oXXXebI98CggEBAOZmppx+PoRWaZM547VebUrEDKuZ/lp10hXnr3gkDAKz 2av8jy3YdtCKq547LygpBbjd1i/zFNDZ/r4XT+w/PfnNRMuJR5td29T+lWMi3Hm3 ant/o8qAyVISgkRW1YQjTAhPwYbHc2Y24n/roCutrtIBG9WMLQNEbJUXjU5uNF/0 +ezKKNFIruCX/JafupBfXl1zAEVuT0IkqlHbmSL4oxYafhPorLzjIPLiJgjAB6Wb iIOVIUJt61O6vkmeBWOP+bj5x1be6h35MlhKT+p4rMimaUALvbGlGQBX+Bm54/cN Ih0Kqx/gsDoD5rribQhuY0RANo1wfXdkW/ajHZihCdUCggEABO01EGAPrBRskZG/ JUL1cek1v4EZKmyVl21VOvQo0mVrIW2/tjzrWj7EzgLXnuYF+tqEmfJQVJW5N0pz TV/1XHa7qrlnGBe27Pzjost2VDcjnitfxgKr75wj9KKRA07UtsC34ZRKd/iZ/i90 NIqT6rkqTLLBmAfuKjeNWoi0KBJrSI19Ik9YHlyHvBLI76pfdrNMw25WZ+5VPfy8 xpC+7QRSCVZHQziSOUwnLJDlTFcbk7u/B3M1A114mJJad7QZWwlgLgJFj03qR1H1 ONoA6jLyuFXQkzkjZg+KKysAALW310tb+PVeVX6jFXKnJvdX6Kl+YAbYF3Dv7q5e kq+OGQKCAQEAngEnoYqyNO9N17mLf4YSTYPFbKle1YqXWI5at3mBAxlz3Y6GYlpg oQN4TjsoS9JWKkF38coyLEhTeulh1hJI3lb3Jt4uTU5AxAETUblGmfI/BBK0sNtB NRecXmFubAAI1GpdvaBqc16QVkmwvkON8FbyT7Ch7euuy1Arh+3r3SKTgt/gviWq SDvy7Rj9SKUegdesB/FuSV37r8d5bZI1xaLFc8HNNHxOzEJq8vU+SUQwioxrErNu /yzB8pp795t1FnW1Ts3woD2VWRcdVx8K30/APjvPC1S9oI6zhnEE9Rf8nQ4D7QiZ 0i96vA8r1uxdByFCSB0s7gPVTX7vfQxzQQKCAQAnNWvIwXR1W40wS5kgKwNd9zyO +G9mWRvQgM3PptUXM6XV1kSPd+VofGvQ3ApYJ3I7f7VPPNTPVLI57vUkhOrKbBvh Td3OGzhV48behsSmOEsXkNcOiogtqQsACZzgzI+46akS87m+OHhP8H3KcdsvGUNM xwHi4nnnVSMQ+SWtSuCHgA+1gX5YlNKDjq3RLCRG//9XHIApfc9c52TJKZukLpfx chit4EZW1ws/JPkQ+Yer91mCQaSkPnIBn2crzce4yqm2dOeHlhsfo25Wr37uJtWY X8H/SaEdrJv+LaA61Fy4rJS/56Qg+LSy05lISwIHBu9SmhTuY1lBrr9jMa3Q -----END RSA PRIVATE KEY----- """.strip() import session fake_module = collections.namedtuple( "FakeModule", ["VOLUME_STORAGE_DIRS", "LOCAL_STORAGE_DIRS"] ) fake_vol = session.do_test_volume( "/tmp/storage-test/volume" ) singleton.set_volume( fake_vol ) print "------- setup --------" fake_mod = fake_module( LOCAL_STORAGE_DIRS=['/testroot-local'], VOLUME_STORAGE_DIRS=['/testroot-volume'] ) assert setup_storage( privkey_str, "/apps/syndicatemail/data", "/tmp/storage-test/local", [fake_mod] ), "setup_storage failed" foo_class = collections.namedtuple("Foo", ["bar", "baz"]) goo_class = collections.namedtuple("Xyzzy", ["foo", "baz"]) foo = foo_class( bar="a", baz="b" ) goo = goo_class( foo="c", baz="d" ) print "------- serialization --------" print "foo == %s" % str(foo) print "goo == %s" % str(goo) foo_json = tuple_to_json( foo ) print "foo_json == %s" % foo_json goo_json = tuple_to_json( goo ) print "goo_json == %s" % goo_json foo2 = json_to_tuple( foo_class, foo_json ) goo2 = json_to_tuple( goo_class, goo_json ) print "foo2 == %s" % str(foo2) print "goo2 == %s" % str(goo2) print "------ file I/O -------" data = "abcde" path = volume_path( "/test" ) rc = write_encrypted_file( pubkey_str, path, data ) if not rc: raise Exception("write_encrypted_file failed") buf = read_encrypted_file( privkey_str, path ) if not buf: raise Exception("read_encrypted_file failed") if buf != data: raise Exception("data not equal: got '%s', expected '%s'" % (buf, data)) delete_file( path ) print "------------- cache --------------" cache_data( pubkey_str, "/cache/test", "poop") dat = get_cached_data( privkey_str, "/cache/test" ) assert dat == "poop", "get_cached_data( cache_data( %s ) ) == %s" % ("poop", dat) purge_cache( "/cache/test" )
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse from .. import models class IntModel(object): """IntModel operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def get_null( self, custom_headers={}, raw=False, **operation_config): """ Get null Int value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: int :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/null' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('int', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_invalid( self, custom_headers={}, raw=False, **operation_config): """ Get invalid Int value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: int :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/invalid' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('int', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_overflow_int32( self, custom_headers={}, raw=False, **operation_config): """ Get overflow Int32 value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: int :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/overflowint32' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('int', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_underflow_int32( self, custom_headers={}, raw=False, **operation_config): """ Get underflow Int32 value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: int :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/underflowint32' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('int', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_overflow_int64( self, custom_headers={}, raw=False, **operation_config): """ Get overflow Int64 value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: long :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/overflowint64' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('long', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_underflow_int64( self, custom_headers={}, raw=False, **operation_config): """ Get underflow Int64 value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: long :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/underflowint64' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('long', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def put_max32( self, int_body, custom_headers={}, raw=False, **operation_config): """ Put max int32 value :param int_body: :type int_body: int :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/max/32' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(int_body, 'int') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def put_max64( self, int_body, custom_headers={}, raw=False, **operation_config): """ Put max int64 value :param int_body: :type int_body: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/max/64' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(int_body, 'long') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def put_min32( self, int_body, custom_headers={}, raw=False, **operation_config): """ Put min int32 value :param int_body: :type int_body: int :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/min/32' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(int_body, 'int') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def put_min64( self, int_body, custom_headers={}, raw=False, **operation_config): """ Put min int64 value :param int_body: :type int_body: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/min/64' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(int_body, 'long') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def get_unix_time( self, custom_headers={}, raw=False, **operation_config): """ Get datetime encoded as Unix time value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: long :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/unixtime' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('long', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def put_unix_time_date( self, int_body, custom_headers={}, raw=False, **operation_config): """ Put datetime encoded as Unix time :param int_body: :type int_body: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/unixtime' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(int_body, 'long') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def get_invalid_unix_time( self, custom_headers={}, raw=False, **operation_config): """ Get invalid Unix time value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: long :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/invalidunixtime' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('long', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_null_unix_time( self, custom_headers={}, raw=False, **operation_config): """ Get null Unix time value :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: long :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/int/nullunixtime' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('long', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
# # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import json import netrc from optparse import SUPPRESS_HELP import os import re import shutil import socket import subprocess import sys import tempfile import time from pyversion import is_python3 if is_python3(): import http.cookiejar as cookielib import urllib.error import urllib.parse import urllib.request import xmlrpc.client else: import cookielib import imp import urllib2 import urlparse import xmlrpclib urllib = imp.new_module('urllib') urllib.error = urllib2 urllib.parse = urlparse urllib.request = urllib2 xmlrpc = imp.new_module('xmlrpc') xmlrpc.client = xmlrpclib try: import threading as _threading except ImportError: import dummy_threading as _threading try: import resource def _rlimit_nofile(): return resource.getrlimit(resource.RLIMIT_NOFILE) except ImportError: def _rlimit_nofile(): return (256, 256) try: import multiprocessing except ImportError: multiprocessing = None from git_command import GIT, git_require from git_config import GetUrlCookieFile from git_refs import R_HEADS, HEAD import gitc_utils from project import Project from project import RemoteSpec from command import Command, MirrorSafeCommand from error import RepoChangedException, GitError, ManifestParseError from project import SyncBuffer from progress import Progress from wrapper import Wrapper from manifest_xml import GitcManifest _ONE_DAY_S = 24 * 60 * 60 class _FetchError(Exception): """Internal error thrown in _FetchHelper() when we don't want stack trace.""" pass class Sync(Command, MirrorSafeCommand): jobs = 1 common = True helpSummary = "Update working tree to the latest revision" helpUsage = """ %prog [<project>...] """ helpDescription = """ The '%prog' command synchronizes local project directories with the remote repositories specified in the manifest. If a local project does not yet exist, it will clone a new local directory from the remote repository and set up tracking branches as specified in the manifest. If the local project already exists, '%prog' will update the remote branches and rebase any new local changes on top of the new remote changes. '%prog' will synchronize all projects listed at the command line. Projects can be specified either by name, or by a relative or absolute path to the project's local directory. If no projects are specified, '%prog' will synchronize all projects listed in the manifest. The -d/--detach option can be used to switch specified projects back to the manifest revision. This option is especially helpful if the project is currently on a topic branch, but the manifest revision is temporarily needed. The -s/--smart-sync option can be used to sync to a known good build as specified by the manifest-server element in the current manifest. The -t/--smart-tag option is similar and allows you to specify a custom tag/label. The -u/--manifest-server-username and -p/--manifest-server-password options can be used to specify a username and password to authenticate with the manifest server when using the -s or -t option. If -u and -p are not specified when using the -s or -t option, '%prog' will attempt to read authentication credentials for the manifest server from the user's .netrc file. '%prog' will not use authentication credentials from -u/-p or .netrc if the manifest server specified in the manifest file already includes credentials. The -f/--force-broken option can be used to proceed with syncing other projects if a project sync fails. The --force-sync option can be used to overwrite existing git directories if they have previously been linked to a different object direcotry. WARNING: This may cause data to be lost since refs may be removed when overwriting. The --no-clone-bundle option disables any attempt to use $URL/clone.bundle to bootstrap a new Git repository from a resumeable bundle file on a content delivery network. This may be necessary if there are problems with the local Python HTTP client or proxy configuration, but the Git binary works. The --fetch-submodules option enables fetching Git submodules of a project from server. The -c/--current-branch option can be used to only fetch objects that are on the branch specified by a project's revision. The --optimized-fetch option can be used to only fetch projects that are fixed to a sha1 revision if the sha1 revision does not already exist locally. The --prune option can be used to remove any refs that no longer exist on the remote. SSH Connections --------------- If at least one project remote URL uses an SSH connection (ssh://, git+ssh://, or user@host:path syntax) repo will automatically enable the SSH ControlMaster option when connecting to that host. This feature permits other projects in the same '%prog' session to reuse the same SSH tunnel, saving connection setup overheads. To disable this behavior on UNIX platforms, set the GIT_SSH environment variable to 'ssh'. For example: export GIT_SSH=ssh %prog Compatibility ~~~~~~~~~~~~~ This feature is automatically disabled on Windows, due to the lack of UNIX domain socket support. This feature is not compatible with url.insteadof rewrites in the user's ~/.gitconfig. '%prog' is currently not able to perform the rewrite early enough to establish the ControlMaster tunnel. If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or later is required to fix a server side protocol bug. """ def _Options(self, p, show_smart=True): try: self.jobs = self.manifest.default.sync_j except ManifestParseError: self.jobs = 1 p.add_option('-f', '--force-broken', dest='force_broken', action='store_true', help="continue sync even if a project fails to sync") p.add_option('--force-sync', dest='force_sync', action='store_true', help="overwrite an existing git directory if it needs to " "point to a different object directory. WARNING: this " "may cause loss of data") p.add_option('-l', '--local-only', dest='local_only', action='store_true', help="only update working tree, don't fetch") p.add_option('-n', '--network-only', dest='network_only', action='store_true', help="fetch only, don't update working tree") p.add_option('-d', '--detach', dest='detach_head', action='store_true', help='detach projects back to manifest revision') p.add_option('-c', '--current-branch', dest='current_branch_only', action='store_true', help='fetch only current branch from server') p.add_option('-q', '--quiet', dest='quiet', action='store_true', help='be more quiet') p.add_option('-j', '--jobs', dest='jobs', action='store', type='int', help="projects to fetch simultaneously (default %d)" % self.jobs) p.add_option('-m', '--manifest-name', dest='manifest_name', help='temporary manifest to use for this sync', metavar='NAME.xml') p.add_option('--no-clone-bundle', dest='no_clone_bundle', action='store_true', help='disable use of /clone.bundle on HTTP/HTTPS') p.add_option('-u', '--manifest-server-username', action='store', dest='manifest_server_username', help='username to authenticate with the manifest server') p.add_option('-p', '--manifest-server-password', action='store', dest='manifest_server_password', help='password to authenticate with the manifest server') p.add_option('--fetch-submodules', dest='fetch_submodules', action='store_true', help='fetch submodules from server') p.add_option('--no-tags', dest='no_tags', action='store_true', help="don't fetch tags") p.add_option('--optimized-fetch', dest='optimized_fetch', action='store_true', help='only fetch projects fixed to sha1 if revision does not exist locally') p.add_option('--prune', dest='prune', action='store_true', help='delete refs that no longer exist on the remote') if show_smart: p.add_option('-s', '--smart-sync', dest='smart_sync', action='store_true', help='smart sync using manifest from the latest known good build') p.add_option('-t', '--smart-tag', dest='smart_tag', action='store', help='smart sync using manifest from a known tag') g = p.add_option_group('repo Version options') g.add_option('--no-repo-verify', dest='no_repo_verify', action='store_true', help='do not verify repo source code') g.add_option('--repo-upgraded', dest='repo_upgraded', action='store_true', help=SUPPRESS_HELP) def _FetchProjectList(self, opt, projects, sem, *args, **kwargs): """Main function of the fetch threads when jobs are > 1. Delegates most of the work to _FetchHelper. Args: opt: Program options returned from optparse. See _Options(). projects: Projects to fetch. sem: We'll release() this semaphore when we exit so that another thread can be started up. *args, **kwargs: Remaining arguments to pass to _FetchHelper. See the _FetchHelper docstring for details. """ try: for project in projects: success = self._FetchHelper(opt, project, *args, **kwargs) if not success and not opt.force_broken: break finally: sem.release() def _FetchHelper(self, opt, project, lock, fetched, pm, err_event): """Fetch git objects for a single project. Args: opt: Program options returned from optparse. See _Options(). project: Project object for the project to fetch. lock: Lock for accessing objects that are shared amongst multiple _FetchHelper() threads. fetched: set object that we will add project.gitdir to when we're done (with our lock held). pm: Instance of a Project object. We will call pm.update() (with our lock held). err_event: We'll set this event in the case of an error (after printing out info about the error). Returns: Whether the fetch was successful. """ # We'll set to true once we've locked the lock. did_lock = False if not opt.quiet: print('Fetching project %s' % project.name) # Encapsulate everything in a try/except/finally so that: # - We always set err_event in the case of an exception. # - We always make sure we call sem.release(). # - We always make sure we unlock the lock if we locked it. try: try: start = time.time() success = project.Sync_NetworkHalf( quiet=opt.quiet, current_branch_only=opt.current_branch_only, force_sync=opt.force_sync, clone_bundle=not opt.no_clone_bundle, no_tags=opt.no_tags, archive=self.manifest.IsArchive, optimized_fetch=opt.optimized_fetch, prune=opt.prune) self._fetch_times.Set(project, time.time() - start) # Lock around all the rest of the code, since printing, updating a set # and Progress.update() are not thread safe. lock.acquire() did_lock = True if not success: err_event.set() print('error: Cannot fetch %s' % project.name, file=sys.stderr) if opt.force_broken: print('warn: --force-broken, continuing to sync', file=sys.stderr) else: raise _FetchError() fetched.add(project.gitdir) pm.update() except _FetchError: pass except Exception as e: print('error: Cannot fetch %s (%s: %s)' \ % (project.name, type(e).__name__, str(e)), file=sys.stderr) err_event.set() raise finally: if did_lock: lock.release() return success def _Fetch(self, projects, opt): fetched = set() lock = _threading.Lock() pm = Progress('Fetching projects', len(projects)) objdir_project_map = dict() for project in projects: objdir_project_map.setdefault(project.objdir, []).append(project) threads = set() sem = _threading.Semaphore(self.jobs) err_event = _threading.Event() for project_list in objdir_project_map.values(): # Check for any errors before running any more tasks. # ...we'll let existing threads finish, though. if err_event.isSet() and not opt.force_broken: break sem.acquire() kwargs = dict(opt=opt, projects=project_list, sem=sem, lock=lock, fetched=fetched, pm=pm, err_event=err_event) if self.jobs > 1: t = _threading.Thread(target = self._FetchProjectList, kwargs = kwargs) # Ensure that Ctrl-C will not freeze the repo process. t.daemon = True threads.add(t) t.start() else: self._FetchProjectList(**kwargs) for t in threads: t.join() # If we saw an error, exit with code 1 so that other scripts can check. if err_event.isSet(): print('\nerror: Exited sync due to fetch errors', file=sys.stderr) sys.exit(1) pm.end() self._fetch_times.Save() if not self.manifest.IsArchive: self._GCProjects(projects) return fetched def _GCProjects(self, projects): gc_gitdirs = {} for project in projects: if len(project.manifest.GetProjectsWithName(project.name)) > 1: print('Shared project %s found, disabling pruning.' % project.name) project.bare_git.config('--replace-all', 'gc.pruneExpire', 'never') gc_gitdirs[project.gitdir] = project.bare_git has_dash_c = git_require((1, 7, 2)) if multiprocessing and has_dash_c: cpu_count = multiprocessing.cpu_count() else: cpu_count = 1 jobs = min(self.jobs, cpu_count) if jobs < 2: for bare_git in gc_gitdirs.values(): bare_git.gc('--auto') return config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1} threads = set() sem = _threading.Semaphore(jobs) err_event = _threading.Event() def GC(bare_git): try: try: bare_git.gc('--auto', config=config) except GitError: err_event.set() except: err_event.set() raise finally: sem.release() for bare_git in gc_gitdirs.values(): if err_event.isSet(): break sem.acquire() t = _threading.Thread(target=GC, args=(bare_git,)) t.daemon = True threads.add(t) t.start() for t in threads: t.join() if err_event.isSet(): print('\nerror: Exited sync due to gc errors', file=sys.stderr) sys.exit(1) def _ReloadManifest(self, manifest_name=None): if manifest_name: # Override calls _Unload already self.manifest.Override(manifest_name) else: self.manifest._Unload() def _DeleteProject(self, path): print('Deleting obsolete path %s' % path, file=sys.stderr) # Delete the .git directory first, so we're less likely to have a partially # working git repository around. There shouldn't be any git projects here, # so rmtree works. try: shutil.rmtree(os.path.join(path, '.git')) except OSError: print('Failed to remove %s' % os.path.join(path, '.git'), file=sys.stderr) print('error: Failed to delete obsolete path %s' % path, file=sys.stderr) print(' remove manually, then run sync again', file=sys.stderr) return -1 # Delete everything under the worktree, except for directories that contain # another git project dirs_to_remove = [] failed = False for root, dirs, files in os.walk(path): for f in files: try: os.remove(os.path.join(root, f)) except OSError: print('Failed to remove %s' % os.path.join(root, f), file=sys.stderr) failed = True dirs[:] = [d for d in dirs if not os.path.lexists(os.path.join(root, d, '.git'))] dirs_to_remove += [os.path.join(root, d) for d in dirs if os.path.join(root, d) not in dirs_to_remove] for d in reversed(dirs_to_remove): if os.path.islink(d): try: os.remove(d) except OSError: print('Failed to remove %s' % os.path.join(root, d), file=sys.stderr) failed = True elif len(os.listdir(d)) == 0: try: os.rmdir(d) except OSError: print('Failed to remove %s' % os.path.join(root, d), file=sys.stderr) failed = True continue if failed: print('error: Failed to delete obsolete path %s' % path, file=sys.stderr) print(' remove manually, then run sync again', file=sys.stderr) return -1 # Try deleting parent dirs if they are empty project_dir = path while project_dir != self.manifest.topdir: if len(os.listdir(project_dir)) == 0: os.rmdir(project_dir) else: break project_dir = os.path.dirname(project_dir) return 0 def UpdateProjectList(self): new_project_paths = [] for project in self.GetProjects(None, missing_ok=True): if project.relpath: new_project_paths.append(project.relpath) file_name = 'project.list' file_path = os.path.join(self.manifest.repodir, file_name) old_project_paths = [] if os.path.exists(file_path): fd = open(file_path, 'r') try: old_project_paths = fd.read().split('\n') finally: fd.close() for path in old_project_paths: if not path: continue if path not in new_project_paths: # If the path has already been deleted, we don't need to do it gitdir = os.path.join(self.manifest.topdir, path, '.git') if os.path.exists(gitdir): project = Project( manifest = self.manifest, name = path, remote = RemoteSpec('origin'), gitdir = gitdir, objdir = gitdir, worktree = os.path.join(self.manifest.topdir, path), relpath = path, revisionExpr = 'HEAD', revisionId = None, groups = None) if project.IsDirty(): print('error: Cannot remove project "%s": uncommitted changes ' 'are present' % project.relpath, file=sys.stderr) print(' commit changes, then run sync again', file=sys.stderr) return -1 elif self._DeleteProject(project.worktree): return -1 new_project_paths.sort() fd = open(file_path, 'w') try: fd.write('\n'.join(new_project_paths)) fd.write('\n') finally: fd.close() return 0 def Execute(self, opt, args): if opt.jobs: self.jobs = opt.jobs if self.jobs > 1: soft_limit, _ = _rlimit_nofile() self.jobs = min(self.jobs, (soft_limit - 5) / 3) if opt.network_only and opt.detach_head: print('error: cannot combine -n and -d', file=sys.stderr) sys.exit(1) if opt.network_only and opt.local_only: print('error: cannot combine -n and -l', file=sys.stderr) sys.exit(1) if opt.manifest_name and opt.smart_sync: print('error: cannot combine -m and -s', file=sys.stderr) sys.exit(1) if opt.manifest_name and opt.smart_tag: print('error: cannot combine -m and -t', file=sys.stderr) sys.exit(1) if opt.manifest_server_username or opt.manifest_server_password: if not (opt.smart_sync or opt.smart_tag): print('error: -u and -p may only be combined with -s or -t', file=sys.stderr) sys.exit(1) if None in [opt.manifest_server_username, opt.manifest_server_password]: print('error: both -u and -p must be given', file=sys.stderr) sys.exit(1) if opt.manifest_name: self.manifest.Override(opt.manifest_name) manifest_name = opt.manifest_name smart_sync_manifest_name = "smart_sync_override.xml" smart_sync_manifest_path = os.path.join( self.manifest.manifestProject.worktree, smart_sync_manifest_name) if opt.smart_sync or opt.smart_tag: if not self.manifest.manifest_server: print('error: cannot smart sync: no manifest server defined in ' 'manifest', file=sys.stderr) sys.exit(1) manifest_server = self.manifest.manifest_server if not opt.quiet: print('Using manifest server %s' % manifest_server) if not '@' in manifest_server: username = None password = None if opt.manifest_server_username and opt.manifest_server_password: username = opt.manifest_server_username password = opt.manifest_server_password else: try: info = netrc.netrc() except IOError: # .netrc file does not exist or could not be opened pass else: try: parse_result = urllib.parse.urlparse(manifest_server) if parse_result.hostname: auth = info.authenticators(parse_result.hostname) if auth: username, _account, password = auth else: print('No credentials found for %s in .netrc' % parse_result.hostname, file=sys.stderr) except netrc.NetrcParseError as e: print('Error parsing .netrc file: %s' % e, file=sys.stderr) if (username and password): manifest_server = manifest_server.replace('://', '://%s:%s@' % (username, password), 1) transport = PersistentTransport(manifest_server) if manifest_server.startswith('persistent-'): manifest_server = manifest_server[len('persistent-'):] try: server = xmlrpc.client.Server(manifest_server, transport=transport) if opt.smart_sync: p = self.manifest.manifestProject b = p.GetBranch(p.CurrentBranch) branch = b.merge if branch.startswith(R_HEADS): branch = branch[len(R_HEADS):] env = os.environ.copy() if 'SYNC_TARGET' in env: target = env['SYNC_TARGET'] [success, manifest_str] = server.GetApprovedManifest(branch, target) elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env: target = '%s-%s' % (env['TARGET_PRODUCT'], env['TARGET_BUILD_VARIANT']) [success, manifest_str] = server.GetApprovedManifest(branch, target) else: [success, manifest_str] = server.GetApprovedManifest(branch) else: assert(opt.smart_tag) [success, manifest_str] = server.GetManifest(opt.smart_tag) if success: manifest_name = smart_sync_manifest_name try: f = open(smart_sync_manifest_path, 'w') try: f.write(manifest_str) finally: f.close() except IOError as e: print('error: cannot write manifest to %s:\n%s' % (smart_sync_manifest_path, e), file=sys.stderr) sys.exit(1) self._ReloadManifest(manifest_name) else: print('error: manifest server RPC call failed: %s' % manifest_str, file=sys.stderr) sys.exit(1) except (socket.error, IOError, xmlrpc.client.Fault) as e: print('error: cannot connect to manifest server %s:\n%s' % (self.manifest.manifest_server, e), file=sys.stderr) sys.exit(1) except xmlrpc.client.ProtocolError as e: print('error: cannot connect to manifest server %s:\n%d %s' % (self.manifest.manifest_server, e.errcode, e.errmsg), file=sys.stderr) sys.exit(1) else: # Not smart sync or smart tag mode if os.path.isfile(smart_sync_manifest_path): try: os.remove(smart_sync_manifest_path) except OSError as e: print('error: failed to remove existing smart sync override manifest: %s' % e, file=sys.stderr) rp = self.manifest.repoProject rp.PreSync() mp = self.manifest.manifestProject mp.PreSync() if opt.repo_upgraded: _PostRepoUpgrade(self.manifest, quiet=opt.quiet) if not opt.local_only: mp.Sync_NetworkHalf(quiet=opt.quiet, current_branch_only=opt.current_branch_only, no_tags=opt.no_tags, optimized_fetch=opt.optimized_fetch) if mp.HasChanges: syncbuf = SyncBuffer(mp.config) mp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) self._ReloadManifest(manifest_name) if opt.jobs is None: self.jobs = self.manifest.default.sync_j if self.gitc_manifest: gitc_manifest_projects = self.GetProjects(args, missing_ok=True) gitc_projects = [] opened_projects = [] for project in gitc_manifest_projects: if project.relpath in self.gitc_manifest.paths and \ self.gitc_manifest.paths[project.relpath].old_revision: opened_projects.append(project.relpath) else: gitc_projects.append(project.relpath) if not args: gitc_projects = None if gitc_projects != [] and not opt.local_only: print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name) manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name) if manifest_name: manifest.Override(manifest_name) else: manifest.Override(self.manifest.manifestFile) gitc_utils.generate_gitc_manifest(self.gitc_manifest, manifest, gitc_projects) print('GITC client successfully synced.') # The opened projects need to be synced as normal, therefore we # generate a new args list to represent the opened projects. # TODO: make this more reliable -- if there's a project name/path overlap, # this may choose the wrong project. args = [os.path.relpath(self.manifest.paths[p].worktree, os.getcwd()) for p in opened_projects] if not args: return all_projects = self.GetProjects(args, missing_ok=True, submodules_ok=opt.fetch_submodules) self._fetch_times = _FetchTimes(self.manifest) if not opt.local_only: to_fetch = [] now = time.time() if _ONE_DAY_S <= (now - rp.LastFetch): to_fetch.append(rp) to_fetch.extend(all_projects) to_fetch.sort(key=self._fetch_times.Get, reverse=True) fetched = self._Fetch(to_fetch, opt) _PostRepoFetch(rp, opt.no_repo_verify) if opt.network_only: # bail out now; the rest touches the working tree return # Iteratively fetch missing and/or nested unregistered submodules previously_missing_set = set() while True: self._ReloadManifest(manifest_name) all_projects = self.GetProjects(args, missing_ok=True, submodules_ok=opt.fetch_submodules) missing = [] for project in all_projects: if project.gitdir not in fetched: missing.append(project) if not missing: break # Stop us from non-stopped fetching actually-missing repos: If set of # missing repos has not been changed from last fetch, we break. missing_set = set(p.name for p in missing) if previously_missing_set == missing_set: break previously_missing_set = missing_set fetched.update(self._Fetch(missing, opt)) if self.manifest.IsMirror or self.manifest.IsArchive: # bail out now, we have no working tree return if self.UpdateProjectList(): sys.exit(1) syncbuf = SyncBuffer(mp.config, detach_head = opt.detach_head) pm = Progress('Syncing work tree', len(all_projects)) for project in all_projects: pm.update() if project.worktree: project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync) pm.end() print(file=sys.stderr) if not syncbuf.Finish(): sys.exit(1) # If there's a notice that's supposed to print at the end of the sync, print # it now... if self.manifest.notice: print(self.manifest.notice) def _PostRepoUpgrade(manifest, quiet=False): wrapper = Wrapper() if wrapper.NeedSetupGnuPG(): wrapper.SetupGnuPG(quiet) for project in manifest.projects: if project.Exists: project.PostRepoUpgrade() def _PostRepoFetch(rp, no_repo_verify=False, verbose=False): if rp.HasChanges: print('info: A new version of repo is available', file=sys.stderr) print(file=sys.stderr) if no_repo_verify or _VerifyTag(rp): syncbuf = SyncBuffer(rp.config) rp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) print('info: Restarting repo with latest version', file=sys.stderr) raise RepoChangedException(['--repo-upgraded']) else: print('warning: Skipped upgrade to unverified version', file=sys.stderr) else: if verbose: print('repo version %s is current' % rp.work_git.describe(HEAD), file=sys.stderr) def _VerifyTag(project): gpg_dir = os.path.expanduser('~/.repoconfig/gnupg') if not os.path.exists(gpg_dir): print('warning: GnuPG was not available during last "repo init"\n' 'warning: Cannot automatically authenticate repo."""', file=sys.stderr) return True try: cur = project.bare_git.describe(project.GetRevisionId()) except GitError: cur = None if not cur \ or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur): rev = project.revisionExpr if rev.startswith(R_HEADS): rev = rev[len(R_HEADS):] print(file=sys.stderr) print("warning: project '%s' branch '%s' is not signed" % (project.name, rev), file=sys.stderr) return False env = os.environ.copy() env['GIT_DIR'] = project.gitdir.encode() env['GNUPGHOME'] = gpg_dir.encode() cmd = [GIT, 'tag', '-v', cur] proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = env) out = proc.stdout.read() proc.stdout.close() err = proc.stderr.read() proc.stderr.close() if proc.wait() != 0: print(file=sys.stderr) print(out, file=sys.stderr) print(err, file=sys.stderr) print(file=sys.stderr) return False return True class _FetchTimes(object): _ALPHA = 0.5 def __init__(self, manifest): self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json') self._times = None self._seen = set() def Get(self, project): self._Load() return self._times.get(project.name, _ONE_DAY_S) def Set(self, project, t): self._Load() name = project.name old = self._times.get(name, t) self._seen.add(name) a = self._ALPHA self._times[name] = (a*t) + ((1-a) * old) def _Load(self): if self._times is None: try: f = open(self._path) try: self._times = json.load(f) finally: f.close() except (IOError, ValueError): try: os.remove(self._path) except OSError: pass self._times = {} def Save(self): if self._times is None: return to_delete = [] for name in self._times: if name not in self._seen: to_delete.append(name) for name in to_delete: del self._times[name] try: f = open(self._path, 'w') try: json.dump(self._times, f, indent=2) finally: f.close() except (IOError, TypeError): try: os.remove(self._path) except OSError: pass # This is a replacement for xmlrpc.client.Transport using urllib2 # and supporting persistent-http[s]. It cannot change hosts from # request to request like the normal transport, the real url # is passed during initialization. class PersistentTransport(xmlrpc.client.Transport): def __init__(self, orig_host): self.orig_host = orig_host def request(self, host, handler, request_body, verbose=False): with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy): # Python doesn't understand cookies with the #HttpOnly_ prefix # Since we're only using them for HTTP, copy the file temporarily, # stripping those prefixes away. if cookiefile: tmpcookiefile = tempfile.NamedTemporaryFile() tmpcookiefile.write("# HTTP Cookie File") try: with open(cookiefile) as f: for line in f: if line.startswith("#HttpOnly_"): line = line[len("#HttpOnly_"):] tmpcookiefile.write(line) tmpcookiefile.flush() cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name) try: cookiejar.load() except cookielib.LoadError: cookiejar = cookielib.CookieJar() finally: tmpcookiefile.close() else: cookiejar = cookielib.CookieJar() proxyhandler = urllib.request.ProxyHandler if proxy: proxyhandler = urllib.request.ProxyHandler({ "http": proxy, "https": proxy }) opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cookiejar), proxyhandler) url = urllib.parse.urljoin(self.orig_host, handler) parse_results = urllib.parse.urlparse(url) scheme = parse_results.scheme if scheme == 'persistent-http': scheme = 'http' if scheme == 'persistent-https': # If we're proxying through persistent-https, use http. The # proxy itself will do the https. if proxy: scheme = 'http' else: scheme = 'https' # Parse out any authentication information using the base class host, extra_headers, _ = self.get_host_info(parse_results.netloc) url = urllib.parse.urlunparse(( scheme, host, parse_results.path, parse_results.params, parse_results.query, parse_results.fragment)) request = urllib.request.Request(url, request_body) if extra_headers is not None: for (name, header) in extra_headers: request.add_header(name, header) request.add_header('Content-Type', 'text/xml') try: response = opener.open(request) except urllib.error.HTTPError as e: if e.code == 501: # We may have been redirected through a login process # but our POST turned into a GET. Retry. response = opener.open(request) else: raise p, u = xmlrpc.client.getparser() while 1: data = response.read(1024) if not data: break p.feed(data) p.close() return u.close() def close(self): pass
import argparse import copy import sys from PyFBA import fba from PyFBA import gapfill from PyFBA import parse from PyFBA.metabolism import biomass from PyFBA.parse import model_seed __author__ = 'Rob Edwards' """ Gap-fill starting with an assigned functions file, and print a list of reactions in the model AND whether the reactions were added from the assigned_functions file or from gap filling. """ def resolve_additional_reactions(ori_reactions, adnl_reactions, cpds, rcts, mediaset, biomass_eqn): """ Iteratively resolve additional reactions that are required. :param cpds: Our compounds dictionary object :type cpds: dict :param ori_reactions: the set of original reactions that form the base of the model :type ori_reactions: set :param adnl_reactions: a list of tuples of how the reactions were suggested, and the set of additional reactions :type adnl_reactions: list of tuple :param rcts: our reactions object :type rcts: dict :param mediaset: our media object :type mediaset: set :param biomass_eqn: our biomass object :type biomass_eqn: metabolism.Reaction :return: set of additional reactions from all of the added_reactions :rtype: set """ reqd_additional = set() while adnl_reactions: ori = copy.copy(ori_reactions) ori.update(reqd_additional) (how, new) = adnl_reactions.pop() sys.stderr.write("Testing suggestions from " + how + "\n") # get all the other reactions we need to add for tple in adnl_reactions: ori.update(tple[1]) new_essential = gapfill.minimize_additional_reactions(ori, new, cpds, rcts, mediaset, biomass_eqn, verbose=True) for new_r in new_essential: reactions[new_r].is_gapfilled = True reactions[new_r].gapfill_method = how reqd_additional.update(new_essential) return reqd_additional if __name__ == '__main__': parser = argparse.ArgumentParser(description='Import a list of reactions and then iterate through our gapfilling' ' steps to see when we get growth') parser.add_argument('-a', help='assigned functions file', required=True) parser.add_argument('-m', help='media file', required=True) parser.add_argument('-c', help='close genomes reactions file') parser.add_argument('-g', help='other genera reactions file') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() # read the enzyme data compounds, reactions, enzymes = model_seed.compounds_reactions_enzymes('gramnegative') reactions2run = set() with open(args.r, 'r') as f: for l in f: if l.startswith('#'): continue if "biomass" in l: if args.v: sys.stderr.write("Biomass reaction was skipped from the list as it is auto-imported\n") continue r = l.strip() if r in reactions: reactions2run.add(r) media = parse.read_media_file(args.m) biomass_eqtn = biomass.biomass_equation('gramnegative') status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn, verbose=args.v) sys.stderr.write("For the initial run we get growth of {} which is {}\n".format(value, growth)) if growth: sys.exit("No need to gapfill!") added_reactions = [] original_reactions = copy.copy(reactions2run) # gapfill the model ############################################################################################# # ESSENTIAL PROTEINS # ############################################################################################# essential_reactions = gapfill.suggest_essential_reactions() # find only the new reactions essential_reactions.difference_update(reactions2run) added_reactions.append(("essential", essential_reactions)) reactions2run.update(essential_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} ESSENTIAL reactions we get {} (growth is {})\n\n".format(len(essential_reactions), value, growth)) # if this grows then we want to find the minimal set of reactions # that we need to add for growth and call it good. if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) print('reactions' + " : " + str(original_reactions.union(additions))) sys.exit(0) ############################################################################################# # Media import reactions # ############################################################################################# media_reactions = gapfill.suggest_from_media(compounds, reactions, reactions2run, media, verbose=args.v) added_reactions.append(("media", media_reactions)) reactions2run.update(media_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} MEDIA reactions we get {} (growth is {})\n\n".format(len(media_reactions), value, growth)) if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) print('reactions' + " : " + str(original_reactions.union(additions))) sys.exit(0) ############################################################################################# # Subsystems # ############################################################################################# subsystem_reactions = gapfill.suggest_reactions_from_subsystems(reactions, reactions2run, threshold=0.5) added_reactions.append(("subsystems", subsystem_reactions)) reactions2run.update(subsystem_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} SUBSYSTEM reactions we get {} (growth is {})\n\n".format(len(subsystem_reactions), value, growth)) if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) print('reactions' + " : " + str(original_reactions.union(additions))) sys.exit(0) ############################################################################################# # Orphan compounds # ############################################################################################# orphan_reactions = gapfill.suggest_by_compound(compounds, reactions, reactions2run, 1) added_reactions.append(("orphans", orphan_reactions)) reactions2run.update(orphan_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} ORPHAN reactions we get {} (growth is {})\n\n".format(len(orphan_reactions), value, growth)) if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) print('reactions' + " : " + str(original_reactions.union(additions))) sys.exit(0) ############################################################################################# # Other genomes and organisms # ############################################################################################# close_reactions = set() if args.c: # add reactions from roles in close genomes close_reactions = gapfill.suggest_from_roles(args.c, reactions, True) # find the new reactions close_reactions.difference_update(reactions2run) added_reactions.append(("close genomes ", close_reactions)) reactions2run.update(close_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} reactions in {} we get {} (growth is {})\n\n".format(len(close_reactions), args.c, value, growth)) # if this grows then we want to find the minimal set of reactions # that we need to add for growth and call it good. if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) # print("Additional reactions required: " + str(additions) + "\n") print("'reactions': {}".format(original_reactions.union(additions))) sys.exit(0) genus_reactions = set() if args.g: # add reactions from roles in similar genera genus_reactions = gapfill.suggest_from_roles(args.g, reactions, True) # find the new reactions genus_reactions.difference_update(reactions2run) added_reactions.append(("other genera", genus_reactions)) reactions2run.update(genus_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} reactions in {} we get {} (growth is {})\n\n".format(len(genus_reactions), args.g, value, growth)) # if this grows then we want to find the minimal set of reactions # that we need to add for growth and call it good. if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) # print("Additional reactions required: " + str(additions) + "\n") print("'reactions': {}".format(original_reactions.union(additions))) sys.exit(0) ############################################################################################# # Probability of inclusion # ############################################################################################# # use reactions wtih pLR or pRL > cutoff prob_reactions = gapfill.compound_probability(reactions, reactions2run, 0, True, True) prob_reactions.difference_update(reactions2run) added_reactions.append(("probability", prob_reactions)) reactions2run.update(prob_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} PROBABILITY reactions we get {} (growth is {})\n\n".format(len(prob_reactions), value, growth)) # if this grows then we want to find the minimal set of reactions # that we need to add for growth and call it good. if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) # print("Additional reactions required: " + str(additions) + "\n") print("'reactions': {}".format(original_reactions.union(additions))) sys.exit(0) ############################################################################################# # Reactions that [do or do not] map to proteins # ############################################################################################# # propose other reactions that we have proteins for with_p_reactions = gapfill.suggest_reactions_with_proteins(reactions, True) # find the new reactions with_p_reactions.difference_update(reactions2run) added_reactions.append(("With proteins", with_p_reactions)) reactions2run.update(with_p_reactions) status, value, growth = fba.run_fba(compounds, reactions, reactions2run, media, biomass_eqtn) sys.stderr.write("After adding {} ALL WITH PROTEINS reactions ".format(len(with_p_reactions)) + " we get {} (growth is {})\n\n".format(value, growth)) # if this grows then we want to find the minimal set of reactions # that we need to add for growth and call it good. if growth: additions = resolve_additional_reactions(original_reactions, added_reactions, compounds, reactions, media, biomass_eqtn) # print("Additional reactions required: " + str(additions) + "\n") print("'reactions': {}".format(original_reactions.union(additions))) sys.exit(0)
# Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import json import os import sys import time import traceback import urllib2 from os.path import expanduser from devops.helpers import helpers from fuelweb_test.helpers.checkers import check_action_logs from fuelweb_test.helpers.checkers import check_stats_on_collector from fuelweb_test.helpers.checkers import check_stats_private_info from fuelweb_test.helpers.checkers import count_stats_on_collector from proboscis import SkipTest from proboscis.asserts import assert_equal from fuelweb_test import logger from fuelweb_test import settings from fuelweb_test.helpers.regenerate_repo import CustomRepo from fuelweb_test.helpers.utils import pull_out_logs_via_ssh from fuelweb_test.helpers.utils import store_astute_yaml def save_logs(url, filename): logger.info('Saving logs to "{}" file'.format(filename)) try: with open(filename, 'w') as f: f.write( urllib2.urlopen(url).read() ) except (urllib2.HTTPError, urllib2.URLError) as e: logger.error(e) def log_snapshot_on_error(func): """Snapshot environment in case of error. Decorator to snapshot environment when error occurred in test. And always fetch diagnostic snapshot from master node """ @functools.wraps(func) def wrapper(*args, **kwargs): logger.info("\n" + "<" * 5 + "#" * 30 + "[ {} ]" .format(func.__name__) + "#" * 30 + ">" * 5 + "\n{}" .format(func.__doc__)) try: return func(*args, **kwargs) except SkipTest: raise SkipTest() except Exception as test_exception: exc_trace = sys.exc_traceback name = 'error_%s' % func.__name__ description = "Failed in method '%s'." % func.__name__ if args[0].env is not None: try: create_diagnostic_snapshot(args[0].env, "fail", name) except: logger.error("Fetching of diagnostic snapshot failed: {0}". format(traceback.format_exc())) try: admin_remote = args[0].env.get_admin_remote() pull_out_logs_via_ssh(admin_remote, name) except: logger.error("Fetching of raw logs failed: {0}". format(traceback.format_exc())) finally: logger.debug(args) try: args[0].env.make_snapshot(snapshot_name=name[-50:], description=description, is_make=True) except: logger.error("Error making the environment snapshot:" " {0}".format(traceback.format_exc())) raise test_exception, None, exc_trace return wrapper def json_parse(func): @functools.wraps(func) def wrapped(*args, **kwargs): response = func(*args, **kwargs) return json.loads(response.read()) return wrapped def upload_manifests(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) try: if settings.UPLOAD_MANIFESTS: logger.info("Uploading new manifests from %s" % settings.UPLOAD_MANIFESTS_PATH) if args[0].__class__.__name__ == "EnvironmentModel": environment = args[0] elif args[0].__class__.__name__ == "FuelWebClient": environment = args[0].environment else: logger.warning("Can't upload manifests: method of " "unexpected class is decorated.") return result remote = environment.get_admin_remote() remote.execute('rm -rf /etc/puppet/modules/*') remote.upload(settings.UPLOAD_MANIFESTS_PATH, '/etc/puppet/modules/') logger.info("Copying new site.pp from %s" % settings.SITEPP_FOR_UPLOAD) remote.execute("cp %s /etc/puppet/manifests" % settings.SITEPP_FOR_UPLOAD) if settings.SYNC_DEPL_TASKS: remote.execute("fuel release --sync-deployment-tasks" " --dir /etc/puppet/") except Exception: logger.error("Could not upload manifests") raise return result return wrapper def revert_info(snapshot_name, description=""): logger.info("<" * 5 + "*" * 100 + ">" * 5) logger.info("{} Make snapshot: {}".format(description, snapshot_name)) logger.info("You could revert this snapshot using [{command}]".format( command="dos.py revert {env} --snapshot-name {name} && " "dos.py resume {env} && virsh net-dumpxml {env}_admin | " "grep -P {pattern} -o " "| awk {awk_command}".format( env=settings.ENV_NAME, name=snapshot_name, pattern="\"(\d+\.){3}\"", awk_command="'{print \"Admin node IP: \"$0\"2\"}'" ) ) ) logger.info("<" * 5 + "*" * 100 + ">" * 5) def update_ostf(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) try: if settings.UPLOAD_PATCHSET: if not settings.GERRIT_REFSPEC: raise ValueError('REFSPEC should be set for CI tests.') logger.info("Uploading new patchset from {0}" .format(settings.GERRIT_REFSPEC)) remote = args[0].environment.get_admin_remote() remote.upload(settings.PATCH_PATH.rstrip('/'), '/var/www/nailgun/fuel-ostf') remote.execute('dockerctl shell ostf ' 'bash -c "cd /var/www/nailgun/fuel-ostf; ' 'python setup.py develop"') remote.execute('dockerctl shell ostf ' 'bash -c "supervisorctl restart ostf"') helpers.wait( lambda: "0" in remote.execute('dockerctl shell ostf ' 'bash -c "pgrep [o]stf; echo $?"') ['stdout'][1], timeout=60) logger.info("OSTF status: RUNNING") except Exception as e: logger.error("Could not upload patch set {e}".format(e=e)) raise return result return wrapper def create_diagnostic_snapshot(env, status, name=""): task = env.fuel_web.task_wait(env.fuel_web.client.generate_logs(), 60 * 5) url = "http://{}:8000{}".format( env.get_admin_node_ip(), task['message'] ) log_file_name = '{status}_{name}-{time}.tar.gz'.format( status=status, name=name, time=time.strftime("%Y_%m_%d__%H_%M_%S", time.gmtime()) ) save_logs(url, os.path.join(settings.LOGS_DIR, log_file_name)) def retry(count=3, delay=30): def wrapped(func): @functools.wraps(func) def wrapper(*args, **kwargs): i = 0 while True: try: return func(*args, **kwargs) except: i += 1 if i >= count: raise time.sleep(delay) return wrapper return wrapped def custom_repo(func): @functools.wraps(func) def wrapper(*args, **kwargs): custom_pkgs = CustomRepo(args[0].environment) try: if settings.CUSTOM_PKGS_MIRROR: custom_pkgs.prepare_repository() except Exception: logger.error("Unable to get custom packages from {0}\n{1}" .format(settings.CUSTOM_PKGS_MIRROR, traceback.format_exc())) raise try: return func(*args, **kwargs) except Exception: custom_pkgs.check_puppet_logs() raise return wrapper def check_fuel_statistics(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if not settings.FUEL_STATS_CHECK: return result logger.info('Test "{0}" passed. Checking stats.'.format(func.__name__)) fuel_settings = args[0].env.get_fuel_settings() nailgun_actions = args[0].env.nailgun_actions postgres_actions = args[0].env.postgres_actions remote_collector = args[0].env.get_ssh_to_remote_by_key( settings.FUEL_STATS_HOST, '{0}/.ssh/id_rsa'.format(expanduser("~"))) master_uuid = args[0].env.get_masternode_uuid() logger.info("Master Node UUID: '{0}'".format(master_uuid)) nailgun_actions.force_fuel_stats_sending() if not settings.FUEL_STATS_ENABLED: assert_equal(0, int(count_stats_on_collector(remote_collector, master_uuid)), "Sending of Fuel stats is disabled in test, but " "usage info was sent to collector!") assert_equal(args[0].env.postgres_actions.count_sent_action_logs(), 0, ("Sending of Fuel stats is disabled in test, but " "usage info was sent to collector!")) return result test_scenario = inspect.getdoc(func) if 'Scenario' not in test_scenario: logger.warning(("Can't check that fuel statistics was gathered " "and sent to collector properly because '{0}' " "test doesn't contain correct testing scenario. " "Skipping...").format(func.__name__)) return func(*args, **kwargs) try: check_action_logs(test_scenario, postgres_actions) check_stats_private_info(remote_collector, postgres_actions, master_uuid, fuel_settings) check_stats_on_collector(remote_collector, postgres_actions, master_uuid) return result except Exception: logger.error(traceback.format_exc()) raise return wrapper def download_astute_yaml(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if settings.STORE_ASTUTE_YAML: store_astute_yaml(args[0].env) return result return wrapper
""" Test parsing single Fortran lines. ----- Permission to use, modify, and distribute this software is given under the terms of the NumPy License. See http://scipy.org. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. Author: Pearu Peterson <pearu@cens.ioc.ee> Created: May 2006 ----- """ # from numpy.testing import * from fparser.block_statements import * from fparser.readfortran import Line, FortranStringReader from nose.tools import assert_equal def parse(cls, line, label='', isfree=True, isstrict=False): if label: line = label + ' : ' + line reader = FortranStringReader(line) reader.set_mode(isfree, isstrict) item = reader.next() if not cls.match(item.get_line()): raise ValueError, '%r does not match %s pattern' % (line, cls.__name__) stmt = cls(item, item) if stmt.isvalid: r = str(stmt) if not isstrict: r1 = parse(cls, r, isstrict=True) if r != r1: raise ValueError, 'Failed to parse %r with %s pattern in pyf mode, got %r' % (r, cls.__name__, r1) return r raise ValueError, 'parsing %r with %s pattern failed' % (line, cls.__name__) def test_assignment():#self): assert_equal(parse(Assignment,'a=b'), 'a = b') assert_equal(parse(PointerAssignment,'a=>b'), 'a => b') assert_equal(parse(Assignment,'a (2)=b(n,m)'), 'a(2) = b(n,m)') assert_equal(parse(Assignment,'a % 2(2,4)=b(a(i))'), 'a%2(2,4) = b(a(i))') def test_assign(): assert_equal(parse(Assign,'assign 10 to a'),'ASSIGN 10 TO a') def test_call(): assert_equal(parse(Call,'call a'),'CALL a') assert_equal(parse(Call,'call a()'),'CALL a') assert_equal(parse(Call,'call a(1)'),'CALL a(1)') assert_equal(parse(Call,'call a(1,2)'),'CALL a(1, 2)') assert_equal(parse(Call,'call a % 2 ( n , a+1 )'),'CALL a % 2(n, a+1)') def test_goto(): assert_equal(parse(Goto,'go to 19'),'GO TO 19') assert_equal(parse(Goto,'goto 19'),'GO TO 19') assert_equal(parse(ComputedGoto,'goto (1, 2 ,3) a+b(2)'), 'GO TO (1, 2, 3) a+b(2)') assert_equal(parse(ComputedGoto,'goto (1, 2 ,3) , a+b(2)'), 'GO TO (1, 2, 3) a+b(2)') assert_equal(parse(AssignedGoto,'goto a'),'GO TO a') assert_equal(parse(AssignedGoto,'goto a ( 1 )'),'GO TO a (1)') assert_equal(parse(AssignedGoto,'goto a ( 1 ,2)'),'GO TO a (1, 2)') def test_continue(): assert_equal(parse(Continue,'continue'),'CONTINUE') def test_return(): assert_equal(parse(Return,'return'),'RETURN') assert_equal(parse(Return,'return a'),'RETURN a') assert_equal(parse(Return,'return a+1'),'RETURN a+1') assert_equal(parse(Return,'return a(c, a)'),'RETURN a(c, a)') def test_stop(): assert_equal(parse(Stop,'stop'),'STOP') assert_equal(parse(Stop,'stop 1'),'STOP 1') assert_equal(parse(Stop,'stop "a"'),'STOP "a"') assert_equal(parse(Stop,'stop "a b"'),'STOP "a b"') def test_print(): assert_equal(parse(Print, 'print*'),'PRINT *') assert_equal(parse(Print, 'print "a b( c )"'),'PRINT "a b( c )"') assert_equal(parse(Print, 'print 12, a'),'PRINT 12, a') assert_equal(parse(Print, 'print 12, a , b'),'PRINT 12, a, b') assert_equal(parse(Print, 'print 12, a(c,1) , b'),'PRINT 12, a(c,1), b') def test_read(): assert_equal(parse(Read, 'read ( 10 )'),'READ (10)') assert_equal(parse(Read, 'read ( 10 ) a '),'READ (10) a') assert_equal(parse(Read, 'read ( 10 ) a , b'),'READ (10) a, b') assert_equal(parse(Read, 'read *'),'READ *') assert_equal(parse(Read, 'read 12'),'READ 12') assert_equal(parse(Read, 'read "a b"'),'READ "a b"') assert_equal(parse(Read, 'read "a b",a'),'READ "a b", a') assert_equal(parse(Read, 'read * , a'),'READ *, a') assert_equal(parse(Read, 'read "hey a" , a'),'READ "hey a", a') assert_equal(parse(Read, 'read * , a , b'),'READ *, a, b') assert_equal(parse(Read, 'read ( unit =10 )'),'READ (UNIT = 10)') def test_write(): assert_equal(parse(Write, 'write ( 10 )'),'WRITE (10)') assert_equal(parse(Write, 'write ( 10 , a )'),'WRITE (10, a)') assert_equal(parse(Write, 'write ( 10 ) b'),'WRITE (10) b') assert_equal(parse(Write, 'write ( 10 ) a(1) , b+2'),'WRITE (10) a(1), b+2') assert_equal(parse(Write, 'write ( unit=10 )'),'WRITE (UNIT = 10)') def test_flush(): assert_equal(parse(Flush, 'flush 10'),'FLUSH (10)') assert_equal(parse(Flush, 'flush (10)'),'FLUSH (10)') assert_equal(parse(Flush, 'flush (UNIT = 10)'),'FLUSH (UNIT = 10)') assert_equal(parse(Flush, 'flush (10, err= 23)'),'FLUSH (10, ERR = 23)') def test_wait(): assert_equal(parse(Wait, 'wait(10)'),'WAIT (10)') assert_equal(parse(Wait, 'wait(10,err=129)'),'WAIT (10, ERR = 129)') def test_contains(): assert_equal(parse(Contains, 'contains'),'CONTAINS') def test_allocate(): assert_equal(parse(Allocate, 'allocate (a)'), 'ALLOCATE (a)') assert_equal(parse(Allocate, \ 'allocate (a, stat=b)'), 'ALLOCATE (a, STAT = b)') assert_equal(parse(Allocate, 'allocate (a,b(:1))'), 'ALLOCATE (a, b(:1))') assert_equal(parse(Allocate, \ 'allocate (real(8)::a)'), 'ALLOCATE (REAL(KIND=8) :: a)') def test_deallocate(): assert_equal(parse(Deallocate, 'deallocate (a)'), 'DEALLOCATE (a)') assert_equal(parse(Deallocate, 'deallocate (a, stat=b)'), 'DEALLOCATE (a, STAT = b)') def test_moduleprocedure(): assert_equal(parse(ModuleProcedure,\ 'ModuleProcedure a'), 'MODULE PROCEDURE a') assert_equal(parse(ModuleProcedure,\ 'module procedure a , b'), 'MODULE PROCEDURE a, b') def test_access(): assert_equal(parse(Public,'Public'),'PUBLIC') assert_equal(parse(Public,'public a'),'PUBLIC a') assert_equal(parse(Public,'public :: a'),'PUBLIC a') assert_equal(parse(Public,'public a,b,c'),'PUBLIC a, b, c') assert_equal(parse(Public,'public :: a(:,:)'),'PUBLIC a(:,:)') assert_equal(parse(Private,'private'),'PRIVATE') assert_equal(parse(Private,'private :: a'),'PRIVATE a') def test_close(): assert_equal(parse(Close,'close (12)'),'CLOSE (12)') assert_equal(parse(Close,'close (12, err=99)'),'CLOSE (12, ERR = 99)') assert_equal(parse(Close,'close (12, status = a(1,2))'),'CLOSE (12, STATUS = a(1,2))') def test_cycle(): assert_equal(parse(Cycle,'cycle'),'CYCLE') assert_equal(parse(Cycle,'cycle ab'),'CYCLE ab') def test_rewind(): assert_equal(parse(Rewind,'rewind 1'),'REWIND (1)') assert_equal(parse(Rewind,'rewind (1)'),'REWIND (1)') assert_equal(parse(Rewind,'rewind (1, err = 123)'),'REWIND (1, ERR = 123)') def test_backspace(): assert_equal(parse(Backspace,'backspace 1'),'BACKSPACE (1)') assert_equal(parse(Backspace,'backspace (1)'),'BACKSPACE (1)') assert_equal(parse(Backspace,'backspace (1, err = 123)'),'BACKSPACE (1, ERR = 123)') def test_endfile(): assert_equal(parse(Endfile,'endfile 1'),'ENDFILE (1)') assert_equal(parse(Endfile,'endfile (1)'),'ENDFILE (1)') assert_equal(parse(Endfile,'endfile (1, err = 123)'),'ENDFILE (1, ERR = 123)') def test_open(): assert_equal(parse(Open,'open (1)'),'OPEN (1)') assert_equal(parse(Open,'open (1, err = 123)'),'OPEN (1, ERR = 123)') def test_format(): assert_equal(parse(Format,'1 format ()'),'1 FORMAT ()') assert_equal(parse(Format,'199 format (1)'),'199 FORMAT (1)') assert_equal(parse(Format,'2 format (1 , SS)'),'2 FORMAT (1, ss)') def test_save(): assert_equal(parse(Save,'save'), 'SAVE') assert_equal(parse(Save,'save :: a'), 'SAVE a') assert_equal(parse(Save,'save a,b'), 'SAVE a, b') def test_data(): assert_equal(parse(Data,'data a /b/'), 'DATA a / b /') assert_equal(parse(Data,'data a , c /b/'), 'DATA a, c / b /') assert_equal(parse(Data,'data a /b ,c/'), 'DATA a / b, c /') assert_equal(parse(Data,'data a /b/ c,e /d/'), 'DATA a / b / c, e / d /') assert_equal(parse(Data,'data a(1,2) /b/'), 'DATA a(1,2) / b /') assert_equal(parse(Data,'data a /b, c(1)/'), 'DATA a / b, c(1) /') def test_nullify(): assert_equal(parse(Nullify,'nullify(a)'),'NULLIFY (a)') assert_equal(parse(Nullify,'nullify(a ,b)'),'NULLIFY (a, b)') def test_use(): assert_equal(parse(Use, 'use a'), 'USE a') assert_equal(parse(Use, 'use :: a'), 'USE a') assert_equal(parse(Use, 'use, intrinsic:: a'), 'USE INTRINSIC :: a') assert_equal(parse(Use, 'use :: a ,only: b'), 'USE a, ONLY: b') assert_equal(parse(Use, 'use :: a , only: b=>c'), 'USE a, ONLY: b=>c') assert_equal(parse(Use, 'use :: a , b=>c'), 'USE a, b=>c') assert_equal(parse(Use,\ 'use :: a , only: operator(+) , b'),\ 'USE a, ONLY: operator(+), b') def test_exit(): assert_equal(parse(Exit,'exit'),'EXIT') assert_equal(parse(Exit,'exit ab'),'EXIT ab') def test_parameter(): assert_equal(parse(Parameter,'parameter (a = b(1,2))'), 'PARAMETER (a = b(1,2))') assert_equal(parse(Parameter,'parameter (a = b(1,2) , b=1)'), 'PARAMETER (a = b(1,2), b=1)') def test_equivalence(): assert_equal(parse(Equivalence,'equivalence (a , b)'),'EQUIVALENCE (a, b)') assert_equal(parse(Equivalence,'equivalence (a , b) , ( c, d(1) , g )'), 'EQUIVALENCE (a, b), (c, d(1), g)') def test_dimension(): assert_equal(parse(Dimension,'dimension a(b)'),'DIMENSION a(b)') assert_equal(parse(Dimension,'dimension::a(b)'),'DIMENSION a(b)') assert_equal(parse(Dimension,'dimension a(b) , c(d)'),'DIMENSION a(b), c(d)') assert_equal(parse(Dimension,'dimension a(b,c)'),'DIMENSION a(b,c)') def test_target(): assert_equal(parse(Target,'target a(b)'),'TARGET a(b)') assert_equal(parse(Target,'target::a(b)'),'TARGET a(b)') assert_equal(parse(Target,'target a(b) , c(d)'),'TARGET a(b), c(d)') assert_equal(parse(Target,'target a(b,c)'),'TARGET a(b,c)') def test_pointer(): assert_equal(parse(Pointer,'pointer a=b'),'POINTER a=b') assert_equal(parse(Pointer,'pointer :: a=b'),'POINTER a=b') assert_equal(parse(Pointer,'pointer a=b, c=d(1,2)'),'POINTER a=b, c=d(1,2)') def test_protected(): assert_equal(parse(Protected,'protected a'),'PROTECTED a') assert_equal(parse(Protected,'protected::a'),'PROTECTED a') assert_equal(parse(Protected,'protected a , b'),'PROTECTED a, b') def test_volatile(): assert_equal(parse(Volatile,'volatile a'),'VOLATILE a') assert_equal(parse(Volatile,'volatile::a'),'VOLATILE a') assert_equal(parse(Volatile,'volatile a , b'),'VOLATILE a, b') def test_value(): assert_equal(parse(Value,'value a'),'VALUE a') assert_equal(parse(Value,'value::a'),'VALUE a') assert_equal(parse(Value,'value a , b'),'VALUE a, b') def test_arithmeticif(): assert_equal(parse(ArithmeticIf,'if (a) 1,2,3'),'IF (a) 1, 2, 3') assert_equal(parse(ArithmeticIf,'if (a(1)) 1,2,3'),'IF (a(1)) 1, 2, 3') assert_equal(parse(ArithmeticIf,'if (a(1,2)) 1,2,3'),'IF (a(1,2)) 1, 2, 3') def test_intrinsic(): assert_equal(parse(Intrinsic,'intrinsic a'),'INTRINSIC a') assert_equal(parse(Intrinsic,'intrinsic::a'),'INTRINSIC a') assert_equal(parse(Intrinsic,'intrinsic a , b'),'INTRINSIC a, b') def test_inquire(): assert_equal(parse(Inquire, 'inquire (1)'),'INQUIRE (1)') assert_equal(parse(Inquire, 'inquire (1, err=123)'),'INQUIRE (1, ERR = 123)') assert_equal(parse(Inquire, 'inquire (iolength=a) b'),'INQUIRE (IOLENGTH = a) b') assert_equal(parse(Inquire, 'inquire (iolength=a) b ,c(1,2)'), 'INQUIRE (IOLENGTH = a) b, c(1,2)') def test_sequence(): assert_equal(parse(Sequence, 'sequence'),'SEQUENCE') def test_external(): assert_equal(parse(External,'external a'),'EXTERNAL a') assert_equal(parse(External,'external::a'),'EXTERNAL a') assert_equal(parse(External,'external a , b'),'EXTERNAL a, b') def test_common(): assert_equal(parse(Common, 'common a'),'COMMON a') assert_equal(parse(Common, 'common a , b'),'COMMON a, b') assert_equal(parse(Common, 'common a , b(1,2)'),'COMMON a, b(1,2)') assert_equal(parse(Common, 'common // a'),'COMMON a') assert_equal(parse(Common, 'common / name/ a'),'COMMON / name / a') assert_equal(parse(Common, 'common / name/ a , c'),'COMMON / name / a, c') assert_equal(parse(Common, 'common / name/ a /foo/ c(1) ,d'), 'COMMON / name / a / foo / c(1), d') assert_equal(parse(Common, 'common / name/ a, /foo/ c(1) ,d'), 'COMMON / name / a / foo / c(1), d') def test_optional(): assert_equal(parse(Optional,'optional a'),'OPTIONAL a') assert_equal(parse(Optional,'optional::a'),'OPTIONAL a') assert_equal(parse(Optional,'optional a , b'),'OPTIONAL a, b') def test_intent(): assert_equal(parse(Intent,'intent (in) a'),'INTENT (IN) a') assert_equal(parse(Intent,'intent(in)::a'),'INTENT (IN) a') assert_equal(parse(Intent,'intent(in) a , b'),'INTENT (IN) a, b') assert_equal(parse(Intent,'intent (in, out) a'),'INTENT (IN, OUT) a') def test_entry(): assert_equal(parse(Entry,'entry a'), 'ENTRY a') assert_equal(parse(Entry,'entry a()'), 'ENTRY a') assert_equal(parse(Entry,'entry a(b)'), 'ENTRY a (b)') assert_equal(parse(Entry,'entry a(b,*)'), 'ENTRY a (b, *)') assert_equal(parse(Entry,'entry a bind(c , name="a b")'), 'ENTRY a BIND (C, NAME = "a b")') assert_equal(parse(Entry,'entry a result (b)'), 'ENTRY a RESULT (b)') assert_equal(parse(Entry,'entry a bind(d) result (b)'), 'ENTRY a RESULT (b) BIND (D)') assert_equal(parse(Entry,'entry a result (b) bind( c )'), 'ENTRY a RESULT (b) BIND (C)') assert_equal(parse(Entry,'entry a(b,*) result (g)'), 'ENTRY a (b, *) RESULT (g)') def test_import(): assert_equal(parse(Import,'import'),'IMPORT') assert_equal(parse(Import,'import a'),'IMPORT a') assert_equal(parse(Import,'import::a'),'IMPORT a') assert_equal(parse(Import,'import a , b'),'IMPORT a, b') def test_forall(): assert_equal(parse(ForallStmt,'forall (i = 1:n(k,:) : 2) a(i) = i*i*b(i)'), 'FORALL (i = 1 : n(k,:) : 2) a(i) = i*i*b(i)') assert_equal(parse(ForallStmt,'forall (i=1:n,j=2:3) a(i) = b(i,i)'), 'FORALL (i = 1 : n, j = 2 : 3) a(i) = b(i,i)') assert_equal(parse(ForallStmt,'forall (i=1:n,j=2:3, 1+a(1,2)) a(i) = b(i,i)'), 'FORALL (i = 1 : n, j = 2 : 3, 1+a(1,2)) a(i) = b(i,i)') def test_specificbinding(): assert_equal(parse(SpecificBinding,'procedure a'),'PROCEDURE a') assert_equal(parse(SpecificBinding,'procedure :: a'),'PROCEDURE a') assert_equal(parse(SpecificBinding,'procedure , NOPASS :: a'),'PROCEDURE , NOPASS :: a') assert_equal(parse(SpecificBinding,'procedure , public, pass(x ) :: a'),'PROCEDURE , PUBLIC, PASS (x) :: a') assert_equal(parse(SpecificBinding,'procedure(n) a'),'PROCEDURE (n) a') assert_equal(parse(SpecificBinding,'procedure(n),pass :: a'), 'PROCEDURE (n) , PASS :: a') assert_equal(parse(SpecificBinding,'procedure(n) :: a'), 'PROCEDURE (n) a') assert_equal(parse(SpecificBinding,'procedure a= >b'),'PROCEDURE a => b') assert_equal(parse(SpecificBinding,'procedure(n),pass :: a =>c'), 'PROCEDURE (n) , PASS :: a => c') def test_genericbinding(): assert_equal(parse(GenericBinding,'generic :: a=>b'),'GENERIC :: a => b') assert_equal(parse(GenericBinding,'generic, public :: a=>b'),'GENERIC, PUBLIC :: a => b') assert_equal(parse(GenericBinding,'generic, public :: a(1,2)=>b ,c'), 'GENERIC, PUBLIC :: a(1,2) => b, c') def test_finalbinding(): assert_equal(parse(FinalBinding,'final a'),'FINAL a') assert_equal(parse(FinalBinding,'final::a'),'FINAL a') assert_equal(parse(FinalBinding,'final a , b'),'FINAL a, b') def test_allocatable(): assert_equal(parse(Allocatable,'allocatable a'),'ALLOCATABLE a') assert_equal(parse(Allocatable,'allocatable :: a'),'ALLOCATABLE a') assert_equal(parse(Allocatable,'allocatable a (1,2)'),'ALLOCATABLE a (1,2)') assert_equal(parse(Allocatable,'allocatable a (1,2) ,b'),'ALLOCATABLE a (1,2), b') def test_asynchronous(): assert_equal(parse(Asynchronous,'asynchronous a'),'ASYNCHRONOUS a') assert_equal(parse(Asynchronous,'asynchronous::a'),'ASYNCHRONOUS a') assert_equal(parse(Asynchronous,'asynchronous a , b'),'ASYNCHRONOUS a, b') def test_bind(): assert_equal(parse(Bind,'bind(c) a'),'BIND (C) a') assert_equal(parse(Bind,'bind(c) :: a'),'BIND (C) a') assert_equal(parse(Bind,'bind(c) a ,b'),'BIND (C) a, b') assert_equal(parse(Bind,'bind(c) /a/'),'BIND (C) / a /') assert_equal(parse(Bind,'bind(c) /a/ ,b'),'BIND (C) / a /, b') assert_equal(parse(Bind,'bind(c,name="hey") a'),'BIND (C, NAME = "hey") a') def test_else(): assert_equal(parse(Else,'else'),'ELSE') assert_equal(parse(ElseIf,'else if (a) then'),'ELSE IF (a) THEN') assert_equal(parse(ElseIf,'else if (a.eq.b(1,2)) then'), 'ELSE IF (a.eq.b(1,2)) THEN') def test_case(): assert_equal(parse(Case,'case (1)'),'CASE ( 1 )') assert_equal(parse(Case,'case (1:)'),'CASE ( 1 : )') assert_equal(parse(Case,'case (:1)'),'CASE ( : 1 )') assert_equal(parse(Case,'case (1:2)'),'CASE ( 1 : 2 )') assert_equal(parse(Case,'case (a(1,2))'),'CASE ( a(1,2) )') assert_equal(parse(Case,'case ("ab")'),'CASE ( "ab" )') assert_equal(parse(Case,'case default'),'CASE DEFAULT') assert_equal(parse(Case,'case (1:2 ,3:4)'),'CASE ( 1 : 2, 3 : 4 )') assert_equal(parse(Case,'case (a(1,:):)'),'CASE ( a(1,:) : )') assert_equal(parse(Case,'case default'),'CASE DEFAULT') def test_where(): assert_equal(parse(WhereStmt,'where (1) a=1'),'WHERE ( 1 ) a = 1') assert_equal(parse(WhereStmt,'where (a(1,2)) a=1'),'WHERE ( a(1,2) ) a = 1') def test_elsewhere(): assert_equal(parse(ElseWhere,'else where'),'ELSE WHERE') assert_equal(parse(ElseWhere,'elsewhere (1)'),'ELSE WHERE ( 1 )') assert_equal(parse(ElseWhere,'elsewhere(a(1,2))'),'ELSE WHERE ( a(1,2) )') def test_enumerator(): assert_equal(parse(Enumerator,'enumerator a'), 'ENUMERATOR a') assert_equal(parse(Enumerator,'enumerator:: a'), 'ENUMERATOR a') assert_equal(parse(Enumerator,'enumerator a,b'), 'ENUMERATOR a, b') assert_equal(parse(Enumerator,'enumerator a=1'), 'ENUMERATOR a=1') assert_equal(parse(Enumerator,'enumerator a=1 , b=c(1,2)'), 'ENUMERATOR a=1, b=c(1,2)') def test_fortranname(): assert_equal(parse(FortranName,'fortranname a'),'FORTRANNAME a') def test_threadsafe(): assert_equal(parse(Threadsafe,'threadsafe'),'THREADSAFE') def test_depend(): assert_equal(parse(Depend,'depend( a) b'), 'DEPEND ( a ) b') assert_equal(parse(Depend,'depend( a) ::b'), 'DEPEND ( a ) b') assert_equal(parse(Depend,'depend( a,c) b,e'), 'DEPEND ( a, c ) b, e') def test_check(): assert_equal(parse(Check,'check(1) a'), 'CHECK ( 1 ) a') assert_equal(parse(Check,'check(1) :: a'), 'CHECK ( 1 ) a') assert_equal(parse(Check,'check(b(1,2)) a'), 'CHECK ( b(1,2) ) a') assert_equal(parse(Check,'check(a>1) :: a'), 'CHECK ( a>1 ) a') def test_callstatement(): assert_equal(parse(CallStatement,'callstatement (*func)()',isstrict=1), 'CALLSTATEMENT (*func)()') assert_equal(parse(CallStatement,'callstatement i=1;(*func)()',isstrict=1), 'CALLSTATEMENT i=1;(*func)()') def test_callprotoargument(): assert_equal(parse(CallProtoArgument,'callprotoargument int(*), double'), 'CALLPROTOARGUMENT int(*), double') def test_pause(): assert_equal(parse(Pause,'pause'),'PAUSE') assert_equal(parse(Pause,'pause 1'),'PAUSE 1') assert_equal(parse(Pause,'pause "hey"'),'PAUSE "hey"') assert_equal(parse(Pause,'pause "hey pa"'),'PAUSE "hey pa"') def test_integer(): assert_equal(parse(Integer,'integer'),'INTEGER') assert_equal(parse(Integer,'integer*4'),'INTEGER*4') assert_equal(parse(Integer,'integer*4 a'),'INTEGER*4 a') assert_equal(parse(Integer,'integer*4, a'),'INTEGER*4 a') assert_equal(parse(Integer,'integer*4 a ,b'),'INTEGER*4 a, b') assert_equal(parse(Integer,'integer*4 :: a ,b'),'INTEGER*4 a, b') assert_equal(parse(Integer,'integer*4 a(1,2)'),'INTEGER*4 a(1,2)') assert_equal(parse(Integer,'integer*4 :: a(1,2),b'),'INTEGER*4 a(1,2), b') assert_equal(parse(Integer,'integer*4 external :: a'), 'INTEGER*4, external :: a') assert_equal(parse(Integer,'integer*4, external :: a'), 'INTEGER*4, external :: a') assert_equal(parse(Integer,'integer*4 external , intent(in) :: a'), 'INTEGER*4, external, intent(in) :: a') assert_equal(parse(Integer,'integer(kind=4)'),'INTEGER(KIND=4)') assert_equal(parse(Integer,'integer ( kind = 4)'),'INTEGER(KIND=4)') assert_equal(parse(Integer,'integer(kind=2+2)'),'INTEGER(KIND=2+2)') assert_equal(parse(Integer,'integer(kind=f(4,5))'),'INTEGER(KIND=f(4,5))') def test_character(): assert_equal(parse(Character,'character'),'CHARACTER') assert_equal(parse(Character,'character*2'),'CHARACTER(LEN=2)') assert_equal(parse(Character,'character**'),'CHARACTER(LEN=*)') assert_equal(parse(Character,'character*(2)'),'CHARACTER(LEN=2)') assert_equal(parse(Character,'character*(len =2)'),'CHARACTER(LEN=2)') assert_equal(parse(Character,'character*(len =2),'),'CHARACTER(LEN=2)') assert_equal(parse(Character,'character*(len =:)'),'CHARACTER(LEN=:)') assert_equal(parse(Character,'character(len =2)'),'CHARACTER(LEN=2)') assert_equal(parse(Character,'character(2)'),'CHARACTER(LEN=2)') assert_equal(parse(Character,'character(kind=2)'),'CHARACTER(KIND=2)') assert_equal(parse(Character,'character(kind=2,len=3)'), 'CHARACTER(LEN=3, KIND=2)') assert_equal(parse(Character,'character(lEN=3,kind=2)'), 'CHARACTER(LEN=3, KIND=2)') assert_equal(parse(Character,'character(len=3,kind=2)', isstrict=True), 'CHARACTER(LEN=3, KIND=2)') assert_equal(parse(Character,'chaRACTER(len=3,kind=fA(1,2))', isstrict=True), 'CHARACTER(LEN=3, KIND=fA(1,2))') assert_equal(parse(Character,'character(len=3,kind=fA(1,2))'), 'CHARACTER(LEN=3, KIND=fa(1,2))') def test_implicit(): assert_equal(parse(Implicit,'implicit none'),'IMPLICIT NONE') assert_equal(parse(Implicit,'implicit'),'IMPLICIT NONE') assert_equal(parse(Implicit,'implicit integer (i-m)'), 'IMPLICIT INTEGER ( i-m )') assert_equal(parse(Implicit,'implicit integer (i-m,p,q-r)'), 'IMPLICIT INTEGER ( i-m, p, q-r )') assert_equal(parse(Implicit,'implicit integer (i-m), real (z)'), 'IMPLICIT INTEGER ( i-m ), REAL ( z )')
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example Airflow DAG that creates, patches and deletes a Cloud SQL instance, and also creates, patches and deletes a database inside the instance, in Google Cloud Platform. This DAG relies on the following OS environment variables https://airflow.apache.org/concepts.html#variables * GCP_PROJECT_ID - Google Cloud Platform project for the Cloud SQL instance. * INSTANCE_NAME - Name of the Cloud SQL instance. * DB_NAME - Name of the database inside a Cloud SQL instance. """ import os from urllib.parse import urlsplit from airflow import models from airflow.providers.google.cloud.operators.cloud_sql import ( CloudSQLCreateInstanceDatabaseOperator, CloudSQLCreateInstanceOperator, CloudSQLDeleteInstanceDatabaseOperator, CloudSQLDeleteInstanceOperator, CloudSQLExportInstanceOperator, CloudSQLImportInstanceOperator, CloudSQLInstancePatchOperator, CloudSQLPatchInstanceDatabaseOperator, ) from airflow.providers.google.cloud.operators.gcs import ( GCSBucketCreateAclEntryOperator, GCSObjectCreateAclEntryOperator, ) from airflow.utils.dates import days_ago # [START howto_operator_cloudsql_arguments] GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project') INSTANCE_NAME = os.environ.get('GCSQL_MYSQL_INSTANCE_NAME', 'test-mysql') INSTANCE_NAME2 = os.environ.get('GCSQL_MYSQL_INSTANCE_NAME2', 'test-mysql2') DB_NAME = os.environ.get('GCSQL_MYSQL_DATABASE_NAME', 'testdb') # [END howto_operator_cloudsql_arguments] # [START howto_operator_cloudsql_export_import_arguments] EXPORT_URI = os.environ.get('GCSQL_MYSQL_EXPORT_URI', 'gs://bucketName/fileName') IMPORT_URI = os.environ.get('GCSQL_MYSQL_IMPORT_URI', 'gs://bucketName/fileName') # [END howto_operator_cloudsql_export_import_arguments] # Bodies below represent Cloud SQL instance resources: # https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances # [START howto_operator_cloudsql_create_arguments] FAILOVER_REPLICA_NAME = INSTANCE_NAME + "-failover-replica" READ_REPLICA_NAME = INSTANCE_NAME + "-read-replica" # [END howto_operator_cloudsql_create_arguments] # [START howto_operator_cloudsql_create_body] body = { "name": INSTANCE_NAME, "settings": { "tier": "db-n1-standard-1", "backupConfiguration": { "binaryLogEnabled": True, "enabled": True, "startTime": "05:00" }, "activationPolicy": "ALWAYS", "dataDiskSizeGb": 30, "dataDiskType": "PD_SSD", "databaseFlags": [], "ipConfiguration": { "ipv4Enabled": True, "requireSsl": True, }, "locationPreference": { "zone": "europe-west4-a" }, "maintenanceWindow": { "hour": 5, "day": 7, "updateTrack": "canary" }, "pricingPlan": "PER_USE", "replicationType": "ASYNCHRONOUS", "storageAutoResize": True, "storageAutoResizeLimit": 0, "userLabels": { "my-key": "my-value" } }, "failoverReplica": { "name": FAILOVER_REPLICA_NAME }, "databaseVersion": "MYSQL_5_7", "region": "europe-west4", } # [END howto_operator_cloudsql_create_body] body2 = { "name": INSTANCE_NAME2, "settings": { "tier": "db-n1-standard-1", }, "databaseVersion": "MYSQL_5_7", "region": "europe-west4", } # [START howto_operator_cloudsql_create_replica] read_replica_body = { "name": READ_REPLICA_NAME, "settings": { "tier": "db-n1-standard-1", }, "databaseVersion": "MYSQL_5_7", "region": "europe-west4", "masterInstanceName": INSTANCE_NAME, } # [END howto_operator_cloudsql_create_replica] # [START howto_operator_cloudsql_patch_body] patch_body = { "name": INSTANCE_NAME, "settings": { "dataDiskSizeGb": 35, "maintenanceWindow": { "hour": 3, "day": 6, "updateTrack": "canary" }, "userLabels": { "my-key-patch": "my-value-patch" } } } # [END howto_operator_cloudsql_patch_body] # [START howto_operator_cloudsql_export_body] export_body = { "exportContext": { "fileType": "sql", "uri": EXPORT_URI, "sqlExportOptions": { "schemaOnly": False } } } # [END howto_operator_cloudsql_export_body] # [START howto_operator_cloudsql_import_body] import_body = { "importContext": { "fileType": "sql", "uri": IMPORT_URI } } # [END howto_operator_cloudsql_import_body] # [START howto_operator_cloudsql_db_create_body] db_create_body = { "instance": INSTANCE_NAME, "name": DB_NAME, "project": GCP_PROJECT_ID } # [END howto_operator_cloudsql_db_create_body] # [START howto_operator_cloudsql_db_patch_body] db_patch_body = { "charset": "utf16", "collation": "utf16_general_ci" } # [END howto_operator_cloudsql_db_patch_body] default_args = { 'start_date': days_ago(1) } with models.DAG( 'example_gcp_sql', default_args=default_args, schedule_interval=None, # Override to match your needs tags=['example'], ) as dag: # ############################################## # # ### INSTANCES SET UP ######################### # # ############################################## # # [START howto_operator_cloudsql_create] sql_instance_create_task = CloudSQLCreateInstanceOperator( project_id=GCP_PROJECT_ID, body=body, instance=INSTANCE_NAME, task_id='sql_instance_create_task' ) # [END howto_operator_cloudsql_create] sql_instance_create_2_task = CloudSQLCreateInstanceOperator( project_id=GCP_PROJECT_ID, body=body2, instance=INSTANCE_NAME2, task_id='sql_instance_create_task2' ) # [END howto_operator_cloudsql_create] sql_instance_read_replica_create = CloudSQLCreateInstanceOperator( project_id=GCP_PROJECT_ID, body=read_replica_body, instance=INSTANCE_NAME2, task_id='sql_instance_read_replica_create' ) # ############################################## # # ### MODIFYING INSTANCE AND ITS DATABASE ###### # # ############################################## # # [START howto_operator_cloudsql_patch] sql_instance_patch_task = CloudSQLInstancePatchOperator( project_id=GCP_PROJECT_ID, body=patch_body, instance=INSTANCE_NAME, task_id='sql_instance_patch_task' ) sql_instance_patch_task2 = CloudSQLInstancePatchOperator( body=patch_body, instance=INSTANCE_NAME, task_id='sql_instance_patch_task2' ) # [END howto_operator_cloudsql_patch] # [START howto_operator_cloudsql_db_create] sql_db_create_task = CloudSQLCreateInstanceDatabaseOperator( project_id=GCP_PROJECT_ID, body=db_create_body, instance=INSTANCE_NAME, task_id='sql_db_create_task' ) sql_db_create_task2 = CloudSQLCreateInstanceDatabaseOperator( body=db_create_body, instance=INSTANCE_NAME, task_id='sql_db_create_task2' ) # [END howto_operator_cloudsql_db_create] # [START howto_operator_cloudsql_db_patch] sql_db_patch_task = CloudSQLPatchInstanceDatabaseOperator( project_id=GCP_PROJECT_ID, body=db_patch_body, instance=INSTANCE_NAME, database=DB_NAME, task_id='sql_db_patch_task' ) sql_db_patch_task2 = CloudSQLPatchInstanceDatabaseOperator( body=db_patch_body, instance=INSTANCE_NAME, database=DB_NAME, task_id='sql_db_patch_task2' ) # [END howto_operator_cloudsql_db_patch] # ############################################## # # ### EXPORTING SQL FROM INSTANCE 1 ############ # # ############################################## # export_url_split = urlsplit(EXPORT_URI) # For export to work we need to add the Cloud SQL instance's Service Account # write access to the destination GCS bucket. # [START howto_operator_cloudsql_export_gcs_permissions] sql_gcp_add_bucket_permission_task = GCSBucketCreateAclEntryOperator( entity="user-{{ task_instance.xcom_pull(" "'sql_instance_create_task', key='service_account_email') " "}}", role="WRITER", bucket=export_url_split[1], # netloc (bucket) task_id='sql_gcp_add_bucket_permission_task' ) # [END howto_operator_cloudsql_export_gcs_permissions] # [START howto_operator_cloudsql_export] sql_export_task = CloudSQLExportInstanceOperator( project_id=GCP_PROJECT_ID, body=export_body, instance=INSTANCE_NAME, task_id='sql_export_task' ) sql_export_task2 = CloudSQLExportInstanceOperator( body=export_body, instance=INSTANCE_NAME, task_id='sql_export_task2' ) # [END howto_operator_cloudsql_export] # ############################################## # # ### IMPORTING SQL TO INSTANCE 2 ############## # # ############################################## # import_url_split = urlsplit(IMPORT_URI) # For import to work we need to add the Cloud SQL instance's Service Account # read access to the target GCS object. # [START howto_operator_cloudsql_import_gcs_permissions] sql_gcp_add_object_permission_task = GCSObjectCreateAclEntryOperator( entity="user-{{ task_instance.xcom_pull(" "'sql_instance_create_task2', key='service_account_email')" " }}", role="READER", bucket=import_url_split[1], # netloc (bucket) object_name=import_url_split[2][1:], # path (strip first '/') task_id='sql_gcp_add_object_permission_task', ) # For import to work we also need to add the Cloud SQL instance's Service Account # write access to the whole bucket!. sql_gcp_add_bucket_permission_2_task = GCSBucketCreateAclEntryOperator( entity="user-{{ task_instance.xcom_pull(" "'sql_instance_create_task2', key='service_account_email') " "}}", role="WRITER", bucket=import_url_split[1], # netloc task_id='sql_gcp_add_bucket_permission_2_task', ) # [END howto_operator_cloudsql_import_gcs_permissions] # [START howto_operator_cloudsql_import] sql_import_task = CloudSQLImportInstanceOperator( project_id=GCP_PROJECT_ID, body=import_body, instance=INSTANCE_NAME2, task_id='sql_import_task' ) sql_import_task2 = CloudSQLImportInstanceOperator( body=import_body, instance=INSTANCE_NAME2, task_id='sql_import_task2' ) # [END howto_operator_cloudsql_import] # ############################################## # # ### DELETING A DATABASE FROM AN INSTANCE ##### # # ############################################## # # [START howto_operator_cloudsql_db_delete] sql_db_delete_task = CloudSQLDeleteInstanceDatabaseOperator( project_id=GCP_PROJECT_ID, instance=INSTANCE_NAME, database=DB_NAME, task_id='sql_db_delete_task' ) sql_db_delete_task2 = CloudSQLDeleteInstanceDatabaseOperator( instance=INSTANCE_NAME, database=DB_NAME, task_id='sql_db_delete_task2' ) # [END howto_operator_cloudsql_db_delete] # ############################################## # # ### INSTANCES TEAR DOWN ###################### # # ############################################## # # [START howto_operator_cloudsql_replicas_delete] sql_instance_failover_replica_delete_task = CloudSQLDeleteInstanceOperator( project_id=GCP_PROJECT_ID, instance=FAILOVER_REPLICA_NAME, task_id='sql_instance_failover_replica_delete_task' ) sql_instance_read_replica_delete_task = CloudSQLDeleteInstanceOperator( project_id=GCP_PROJECT_ID, instance=READ_REPLICA_NAME, task_id='sql_instance_read_replica_delete_task' ) # [END howto_operator_cloudsql_replicas_delete] # [START howto_operator_cloudsql_delete] sql_instance_delete_task = CloudSQLDeleteInstanceOperator( project_id=GCP_PROJECT_ID, instance=INSTANCE_NAME, task_id='sql_instance_delete_task' ) sql_instance_delete_task2 = CloudSQLDeleteInstanceOperator( instance=INSTANCE_NAME2, task_id='sql_instance_delete_task2' ) # [END howto_operator_cloudsql_delete] sql_instance_delete_2_task = CloudSQLDeleteInstanceOperator( project_id=GCP_PROJECT_ID, instance=INSTANCE_NAME2, task_id='sql_instance_delete_2_task' ) ( sql_instance_create_task # noqa >> sql_instance_create_2_task # noqa >> sql_instance_read_replica_create # noqa >> sql_instance_patch_task # noqa >> sql_instance_patch_task2 # noqa >> sql_db_create_task # noqa >> sql_db_create_task2 # noqa >> sql_db_patch_task # noqa >> sql_db_patch_task2 # noqa >> sql_gcp_add_bucket_permission_task # noqa >> sql_export_task # noqa >> sql_export_task2 # noqa >> sql_gcp_add_object_permission_task # noqa >> sql_gcp_add_bucket_permission_2_task # noqa >> sql_import_task # noqa >> sql_import_task2 # noqa >> sql_db_delete_task # noqa >> sql_db_delete_task2 # noqa >> sql_instance_failover_replica_delete_task # noqa >> sql_instance_read_replica_delete_task # noqa >> sql_instance_delete_task # noqa >> sql_instance_delete_2_task # noqa )
from __future__ import absolute_import from . import backend as K from .utils.generic_utils import get_from_module from six.moves import zip def clip_norm(g, c, n): if c > 0: g = K.switch(n >= c, g * c / n, g) return g def optimizer_from_config(config, custom_objects={}): all_classes = { 'sgd': SGD, 'rmsprop': RMSprop, 'adagrad': Adagrad, 'adadelta': Adadelta, 'adam': Adam, 'adamax': Adamax, 'nadam': Nadam, } class_name = config['class_name'] if class_name in custom_objects: cls = custom_objects[class_name] else: if class_name.lower() not in all_classes: raise ValueError('Optimizer class not found:', class_name) cls = all_classes[class_name.lower()] return cls.from_config(config['config']) class Optimizer(object): '''Abstract optimizer base class. Note: this is the parent class of all optimizers, not an actual optimizer that can be used for training models. All Keras optimizers support the following keyword arguments: clipnorm: float >= 0. Gradients will be clipped when their L2 norm exceeds this value. clipvalue: float >= 0. Gradients will be clipped when their absolute value exceeds this value. ''' def __init__(self, **kwargs): allowed_kwargs = {'clipnorm', 'clipvalue'} for k in kwargs: if k not in allowed_kwargs: raise Exception('Unexpected keyword argument ' 'passed to optimizer: ' + str(k)) self.__dict__.update(kwargs) self.updates = [] self.weights = [] def get_state(self): return [K.get_value(u[0]) for u in self.updates] def set_state(self, value_list): assert len(self.updates) == len(value_list) for u, v in zip(self.updates, value_list): K.set_value(u[0], v) def get_updates(self, params, constraints, loss): raise NotImplementedError def get_gradients(self, loss, params): grads = K.gradients(loss, params) if hasattr(self, 'clipnorm') and self.clipnorm > 0: norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads])) grads = [clip_norm(g, self.clipnorm, norm) for g in grads] if hasattr(self, 'clipvalue') and self.clipvalue > 0: grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads] return grads def set_weights(self, weights): '''Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). # Arguments weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). ''' params = self.weights weight_value_tuples = [] param_values = K.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise Exception('Optimizer weight shape ' + str(pv.shape) + ' not compatible with ' 'provided weight shape ' + str(w.shape)) weight_value_tuples.append((p, w)) K.batch_set_value(weight_value_tuples) def get_weights(self): '''Returns the current weights of the optimizer, as a list of numpy arrays. ''' return K.batch_get_value(self.weights) def get_config(self): config = {} if hasattr(self, 'clipnorm'): config['clipnorm'] = self.clipnorm if hasattr(self, 'clipvalue'): config['clipvalue'] = self.clipvalue return config @classmethod def from_config(cls, config): return cls(**config) class SGD(Optimizer): '''Stochastic gradient descent, with support for momentum, learning rate decay, and Nesterov momentum. # Arguments lr: float >= 0. Learning rate. momentum: float >= 0. Parameter updates momentum. decay: float >= 0. Learning rate decay over each update. nesterov: boolean. Whether to apply Nesterov momentum. ''' def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs): super(SGD, self).__init__(**kwargs) self.__dict__.update(locals()) self.iterations = K.variable(0.) self.lr = K.variable(lr) self.momentum = K.variable(momentum) self.decay = K.variable(decay) self.inital_decay = decay def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) self.updates .append(K.update_add(self.iterations, 1)) # momentum shapes = [K.get_variable_shape(p) for p in params] moments = [K.zeros(shape) for shape in shapes] self.weights = [self.iterations] + moments for p, g, m in zip(params, grads, moments): v = self.momentum * m - lr * g # velocity self.updates.append(K.update(m, v)) if self.nesterov: new_p = p + self.momentum * v - lr * g else: new_p = p + v # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'momentum': float(K.get_value(self.momentum)), 'decay': float(K.get_value(self.decay)), 'nesterov': self.nesterov} base_config = super(SGD, self).get_config() return dict(list(base_config.items()) + list(config.items())) class RMSprop(Optimizer): '''RMSProp optimizer. It is recommended to leave the parameters of this optimizer at their default values (except the learning rate, which can be freely tuned). This optimizer is usually a good choice for recurrent neural networks. # Arguments lr: float >= 0. Learning rate. rho: float >= 0. epsilon: float >= 0. Fuzz factor. decay: float >= 0. Learning rate decay over each update. ''' def __init__(self, lr=0.001, rho=0.9, epsilon=1e-8, decay=0., **kwargs): super(RMSprop, self).__init__(**kwargs) self.__dict__.update(locals()) self.lr = K.variable(lr) self.rho = K.variable(rho) self.decay = K.variable(decay) self.inital_decay = decay self.iterations = K.variable(0.) def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) shapes = [K.get_variable_shape(p) for p in params] accumulators = [K.zeros(shape) for shape in shapes] self.weights = accumulators self.updates = [] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) self.updates.append(K.update_add(self.iterations, 1)) for p, g, a in zip(params, grads, accumulators): # update accumulator new_a = self.rho * a + (1. - self.rho) * K.square(g) self.updates.append(K.update(a, new_a)) new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon) # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'rho': float(K.get_value(self.rho)), 'epsilon': self.epsilon} base_config = super(RMSprop, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adagrad(Optimizer): '''Adagrad optimizer. It is recommended to leave the parameters of this optimizer at their default values. # Arguments lr: float >= 0. Learning rate. epsilon: float >= 0. # References - [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) ''' def __init__(self, lr=0.01, epsilon=1e-8, decay=0., **kwargs): super(Adagrad, self).__init__(**kwargs) self.__dict__.update(locals()) self.lr = K.variable(lr) self.decay = K.variable(decay) self.inital_decay = decay self.iterations = K.variable(0.) def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) shapes = [K.get_variable_shape(p) for p in params] accumulators = [K.zeros(shape) for shape in shapes] self.weights = accumulators self.updates = [] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) self.updates.append(K.update_add(self.iterations, 1)) for p, g, a in zip(params, grads, accumulators): new_a = a + K.square(g) # update accumulator self.updates.append(K.update(a, new_a)) new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon) # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'epsilon': self.epsilon} base_config = super(Adagrad, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adadelta(Optimizer): '''Adadelta optimizer. It is recommended to leave the parameters of this optimizer at their default values. # Arguments lr: float >= 0. Learning rate. It is recommended to leave it at the default value. rho: float >= 0. epsilon: float >= 0. Fuzz factor. # References - [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701) ''' def __init__(self, lr=1.0, rho=0.95, epsilon=1e-8, decay=0., **kwargs): super(Adadelta, self).__init__(**kwargs) self.__dict__.update(locals()) self.lr = K.variable(lr) self.decay = K.variable(decay) self.inital_decay = decay self.iterations = K.variable(0.) def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) shapes = [K.get_variable_shape(p) for p in params] accumulators = [K.zeros(shape) for shape in shapes] delta_accumulators = [K.zeros(shape) for shape in shapes] self.weights = accumulators + delta_accumulators self.updates = [] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) self.updates.append(K.update_add(self.iterations, 1)) for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators): # update accumulator new_a = self.rho * a + (1. - self.rho) * K.square(g) self.updates.append(K.update(a, new_a)) # use the new accumulator and the *old* delta_accumulator update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon) new_p = p - lr * update # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) # update delta_accumulator new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update) self.updates.append(K.update(d_a, new_d_a)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'rho': self.rho, 'epsilon': self.epsilon} base_config = super(Adadelta, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adam(Optimizer): '''Adam optimizer. Default parameters follow those provided in the original paper. # Arguments lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. # References - [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8) ''' def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0., **kwargs): super(Adam, self).__init__(**kwargs) self.__dict__.update(locals()) self.iterations = K.variable(0) self.lr = K.variable(lr) self.beta_1 = K.variable(beta_1) self.beta_2 = K.variable(beta_2) self.decay = K.variable(decay) self.inital_decay = decay def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) t = self.iterations + 1 lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)) shapes = [K.get_variable_shape(p) for p in params] ms = [K.zeros(shape) for shape in shapes] vs = [K.zeros(shape) for shape in shapes] self.weights = [self.iterations] + ms + vs for p, g, m, v in zip(params, grads, ms, vs): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) new_p = p_t # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'beta_1': float(K.get_value(self.beta_1)), 'beta_2': float(K.get_value(self.beta_2)), 'epsilon': self.epsilon} base_config = super(Adam, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adamax(Optimizer): '''Adamax optimizer from Adam paper's Section 7. It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. # Arguments lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. # References - [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8) ''' def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0., **kwargs): super(Adamax, self).__init__(**kwargs) self.__dict__.update(locals()) self.iterations = K.variable(0.) self.lr = K.variable(lr) self.beta_1 = K.variable(beta_1) self.beta_2 = K.variable(beta_2) self.decay = K.variable(decay) self.inital_decay = decay def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) t = self.iterations + 1 lr_t = self.lr / (1. - K.pow(self.beta_1, t)) shapes = [K.get_variable_shape(p) for p in params] # zero init of 1st moment ms = [K.zeros(shape) for shape in shapes] # zero init of exponentially weighted infinity norm us = [K.zeros(shape) for shape in shapes] self.weights = [self.iterations] + ms + us for p, g, m, u in zip(params, grads, ms, us): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g u_t = K.maximum(self.beta_2 * u, K.abs(g)) p_t = p - lr_t * m_t / (u_t + self.epsilon) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(u, u_t)) new_p = p_t # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'beta_1': float(K.get_value(self.beta_1)), 'beta_2': float(K.get_value(self.beta_2)), 'epsilon': self.epsilon} base_config = super(Adamax, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Nadam(Optimizer): ''' Nesterov Adam optimizer: Much like Adam is essentially RMSprop with momentum, Nadam is Adam RMSprop with Nesterov momentum. Default parameters follow those provided in the paper. It is recommended to leave the parameters of this optimizer at their default values. # Arguments lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. # References - [Nadam report](http://cs229.stanford.edu/proj2015/054_report.pdf) - [On the importance of initialization and momentum in deep learning](http://www.cs.toronto.edu/~fritz/absps/momentum.pdf) ''' def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-8, schedule_decay=0.004, **kwargs): super(Nadam, self).__init__(**kwargs) self.__dict__.update(locals()) self.iterations = K.variable(0.) self.m_schedule = K.variable(1.) self.lr = K.variable(lr) self.beta_1 = K.variable(beta_1) self.beta_2 = K.variable(beta_2) self.schedule_decay = schedule_decay def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] t = self.iterations + 1 # Due to the recommendations in [2], i.e. warming momentum schedule momentum_cache_t = self.beta_1 * (1. - 0.5 * (K.pow(0.96, t * self.schedule_decay))) momentum_cache_t_1 = self.beta_1 * (1. - 0.5 * (K.pow(0.96, (t + 1) * self.schedule_decay))) m_schedule_new = self.m_schedule * momentum_cache_t m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1 self.updates.append((self.m_schedule, m_schedule_new)) shapes = [K.get_variable_shape(p) for p in params] ms = [K.zeros(shape) for shape in shapes] vs = [K.zeros(shape) for shape in shapes] self.weights = [self.iterations] + ms + vs for p, g, m, v in zip(params, grads, ms, vs): # the following equations given in [1] g_prime = g / (1. - m_schedule_new) m_t = self.beta_1 * m + (1. - self.beta_1) * g m_t_prime = m_t / (1. - m_schedule_next) v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g) v_t_prime = v_t / (1. - K.pow(self.beta_2, t)) m_t_bar = (1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon) new_p = p_t # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'beta_1': float(K.get_value(self.beta_1)), 'beta_2': float(K.get_value(self.beta_2)), 'epsilon': self.epsilon, 'schedule_decay': self.schedule_decay} base_config = super(Nadam, self).get_config() return dict(list(base_config.items()) + list(config.items())) # aliases sgd = SGD rmsprop = RMSprop adagrad = Adagrad adadelta = Adadelta adam = Adam adamax = Adamax nadam = Nadam def get(identifier, kwargs=None): return get_from_module(identifier, globals(), 'optimizer', instantiate=True, kwargs=kwargs)
#!/usr/bin/env python3 # # Copyright (c) 2014-2015 the Sanzang Utils authors # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Sanzang Utils program module for CJK translation. """ import getopt import io import signal import sys import unicodedata USAGE = """Usage: szu-t [options] table_file [file ...] Translate CJK text using a translation table. Options: -h, --help print this help message and exit -v, --verbose include information useful for debugging """ def set_stdio_utf8(): """ Set standard I/O streams to UTF-8. Attempt to reassign standard I/O streams to new streams using UTF-8. Standard input should discard any leading BOM. If an error is raised, assume the environment is inflexible but correct (IDLE). """ try: sys.stdin = io.TextIOWrapper( sys.stdin.detach(), encoding='utf-8-sig', line_buffering=True) sys.stdout = io.TextIOWrapper( sys.stdout.detach(), encoding='utf-8', line_buffering=True) sys.stderr = io.TextIOWrapper( sys.stderr.detach(), encoding='utf-8', line_buffering=True) except io.UnsupportedOperation: pass def read_table(table_fd): """ Read a translation table from an opened file. Given an open file object, read a well-formatted translation table and return its contents to the caller. """ table_str = unicodedata.normalize('NFC', table_fd.read()) table = [] for line in table_str.split('\n'): stripped = line.strip() if stripped != '': table.append(stripped.split('|')) return table def vocab(table, text): """ Return a new table containing only the vocabulary in the source text. Create a new translation table containing only the rules that are relevant for the given text. This is created by checking all source terms against a copy of the text. """ text_rules = [] text_copy = str(text) for rec in table: if rec[0] in text_copy: text_copy = text_copy.replace(rec[0], '\x1f') text_rules.append(rec) return text_rules def tr_raw(table, text): """ Translate text using a table. Return raw texts in a list. Perform translation of a text by applying the rules in a translation table. The result is a list of strings with each element corresponding to a column in the translation table. """ text = unicodedata.normalize('NFC', text).replace('\x1f', '') rules = vocab(table, text) collection = [text] for col_no in range(1, len(table[0])): trans = text for rec in rules: trans = trans.replace(rec[0], '\x1f' + rec[col_no] + '\x1f') trans = trans.replace('\x1f\n', '\n') trans = trans.replace('\x1f\x1f', ' ') trans = trans.replace('\x1f', ' ') collection.append(trans) return collection def tr_fmt(table, buffer, start): """ Translate text using a table. Return a formatted listing string. Perform translation of a text by applying rules in a translation table, and return a formatted string. The formatted string represents the source text and its translations collated together and organized by line number and by translation table column number. """ collection = tr_raw(table, buffer) for i in range(0, len(collection)): collection[i] = collection[i].rstrip().split('\n') listing = '' for line_no in range(0, len(collection[0])): for col_idx in range(0, len(table[0])): listing += '%d.%d|%s\n' % ( start + line_no, col_idx + 1, collection[col_idx][line_no]) listing += '\n' return listing def tr_file(table, fd_in, fd_out, start_idx=1, buf_size=100): """ Translate from one file to another (buffered). Given a table, an input file object, and an output file object, apply the translation table rules to the input text and write the translation as a formatted string to the output. """ str_buf = '' line_no = start_idx for line in fd_in: str_buf += line if line_no % buf_size == 0: fd_out.write(tr_fmt(table, str_buf, line_no - buf_size + 1)) str_buf = '' line_no += 1 if len(str_buf) > 0: position = line_no - str_buf.count('\n') fd_out.write(tr_fmt(table, str_buf, position)) return line_no def main(argv): """ Run as a portable command-line program. This program reads and writes UTF-8 text, and uses standard I/O streams for input text and translation output. Input has any leading byte-order marks stripped out from the beginning of the input stream. Broken pipes and SIGINT are handled silently. """ set_stdio_utf8() if 'SIGPIPE' in dir(signal): signal.signal(signal.SIGPIPE, signal.SIG_DFL) verbose = False try: opts, args = getopt.getopt(argv[1:], 'hv', ['help', 'verbose']) for option, _ in opts: if option in ('-h', '--help'): print(USAGE, end='') return 0 if option in ('-v', '--verbose'): verbose = True if len(args) < 1: sys.stderr.write(USAGE) return 1 with open(args[0], 'r', encoding='utf-8-sig') as table_fd: table = read_table(table_fd) if len(args) == 1: if sys.stdin.isatty(): tr_file(table, sys.stdin, sys.stdout, start_idx=1, buf_size=1) else: tr_file(table, sys.stdin, sys.stdout) else: idx = 1 for file_path in args[1:]: with open(file_path, 'r', encoding='utf-8-sig') as fin: idx = tr_file(table, fin, sys.stdout, idx) return 0 except KeyboardInterrupt: print() return 1 except Exception as err: if verbose: raise else: sys.stderr.write('szu-t: ' + str(err) + '\n') return 1 if __name__ == '__main__': sys.exit(main(sys.argv))
#!/usr/bin/env python # encoding: utf-8 # Copyright 2012 Aaron Morton # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for working with Cassandra and the versions.""" import copy import datetime import errno import logging import grp import os import os.path import pwd import re import socket import dt_util, file_util class Components(object): """Constants for Cassandra SSTable components.""" DATA = "Data.db" PRIMARY_INDEX = "Index.db" FILTER = "Filter.db" COMPRESSION_INFO = "CompressionInfo.db" STATS = "Statistics.db" DIGEST = "Digest.sha1" SUMMARY = "Summary.db" TOC = "TOC.txt" COMPACTED_MARKER = "Compacted" """Marker added to compacted files.""" TEMPORARY_MARKER = "tmp" """Marker used to identify temp sstables that are being created.""" FILE_VERSION_PATTERN = re.compile("[a-z]+") log = logging.getLogger(__name__) # ============================================================================ # MIN_VERSION = (1,0,0) TARGET_VERSION = None """Cassandra version we are working with. Used for file paths and things. Cannot use the version of the file because when 1.1 starts it moves files to new locations but does not change their file version. """ def set_version(ver): """Set the global cassandra version. If ``ver`` is string we expect the form "major.minor.rev". Otherwise it is expected to be a tuple of ints. """ global TARGET_VERSION if isinstance(ver, basestring): TARGET_VERSION = tuple(int(i) for i in ver.split(".")[:3]) else: TARGET_VERSION = ver log.info("Cassandra version changed to %s", TARGET_VERSION) return # ============================================================================ # Utility. _SAFE_DT_FMT = "%Y_%m_%dT%H_%M_%S_%f" """strftime() format to safely use a datetime in file name.""" def _to_safe_datetime_fmt(dt): """Convert the datetime ``dt`` instance to a file system safe format. """ return dt.strftime(_SAFE_DT_FMT) def _from_safe_datetime_fmt(dt_str): """Convert the string ``dt_str`` from a file system safe format.""" return datetime.datetime.strptime(dt_str, _SAFE_DT_FMT) def is_snapshot_path(file_path): """Returns true if this path is a snapshot path. It's a pretty simple test: does it have 'snapshots' in it. """ head = os.path.dirname(file_path or "") if not head: raise ValueError("file_path %s does not include directory" % ( file_path,)) while head != "/": head, tail = os.path.split(head) if tail == "snapshots": return True return False # ============================================================================ # On disk file stats. class FileStat(object): """Basic file stats""" log = logging.getLogger("%s.%s" % (__name__, "FileStat")) def __init__(self, file_path, uid=None, user=None, gid=None, group=None, mode=None, size=None): def meta(): try: return meta.data except (AttributeError): meta.data = self._extract_meta(file_path) return meta.data self.file_path = file_path self.uid = meta()["uid"] if uid is None else uid self.gid = meta()["gid"] if gid is None else gid self.mode = meta()["mode"] if mode is None else mode self.size = meta()["size"] if size is None else size self.user = meta()["user"] if user is None else user self.group = meta()["group"] if group is None else group def __str__(self): return "FileStat for {file_path}: uid {uid}, user {user}, gid {gid},"\ " group {group}, mode {mode}, size {size}".format(**vars(self)) def serialise(self): """Serialise the state to a dict. Every value has to be a string or a dict. """ return { "file_path" : self.file_path, "uid" : str(self.uid), "gid" : str(self.gid), "mode" : str(self.mode), "size" : str(self.size), "user" : str(self.user), "group" : str(self.group), } @classmethod def deserialise(cls, data): """Create an instance use the ``data`` dict.""" assert data def get_i(field): return int(data[field]) return cls(data["file_path"], uid=get_i("uid"), user=data["user"], gid=get_i("gid"), group=data["group"], mode=get_i("mode"), size=get_i("size")) def _extract_meta(self, file_path): """Get a dict of the os file meta for the ``file_path`` Allow OS errors to bubble out as files can be removed during processing. """ stat = os.stat(file_path) file_meta = { "uid" : stat.st_uid, "gid" : stat.st_gid, "mode" : stat.st_mode, "size" : stat.st_size } try: file_meta['user'] = pwd.getpwuid(stat.st_uid).pw_name except (EnvironmentError): log.debug("Ignoring error getting user name.", exc_info=True) file_meta['user'] = "" try: file_meta['group'] = grp.getgrgid(stat.st_gid).gr_name except (EnvironmentError): log.debug("Ignoring error getting group name.", exc_info=True) file_meta['group'] = "" log.debug("For {file_path} got meta {file_meta} ".format( file_path=file_path, file_meta=file_meta)) return file_meta # ============================================================================ # Mock stats for a deleted file class DeletedFileStat(FileStat): """File stats for a deleted file. Does not extract any data from disk. """ log = logging.getLogger("%s.%s" % (__name__, "DeletedFileStat")) def __init__(self, file_path): super(DeletedFileStat, self).__init__(file_path) def _extract_meta(self, file_path): return { "uid" : 0, "user" : None, "gid" : 0, "group" : None, "mode" : 0, "size" : 0 } # ============================================================================ # A SSTable Component such as a -Data.db file. class SSTableComponent(object): """Meta data about a component file for an SSTable. e.g. the -Data.db file. """ log = logging.getLogger("%s.%s" % (__name__, "SSTableComponent")) def __init__(self, file_path, keyspace=None, cf=None, version=None, generation=None, component=None, temporary=None, stat=None, is_deleted=False): def props(): try: return props.data except (AttributeError): props.data = self._component_properties(file_path) return props.data self.file_path = file_path self.keyspace = props()["keyspace"] if keyspace is None else keyspace self.cf = props()["cf"] if cf is None else cf self.version = props()["version"] if version is None else version self.generation = props()["generation"] if generation is None \ else generation self.component = props()["component"] if component is None \ else component self.temporary = props()["temporary"] if temporary is None \ else temporary self.is_deleted = is_deleted if stat is None: if self.is_deleted: self.stat = DeletedFileStat(file_path) else: self.stat = FileStat(file_path) else: self.stat = stat def __str__(self): return "SSTableComponent {file_path}".format(**vars(self)) def serialise(self): """Serialise the state to a dict.""" return { "file_path" : self.file_path, "keyspace" : self.keyspace, "cf" : self.cf, "version" : self.version, "generation" : str(self.generation), "component" : self.component, "temporary" : str(self.temporary), "stat" : self.stat.serialise(), "is_deleted" : "true" if self.is_deleted else "false" } @classmethod def deserialise(cls, data): """Create an instance use the ``data`` dict.""" assert data return cls(data["file_path"], keyspace=data["keyspace"], cf=data["cf"], version=data["version"], generation=int(data["generation"]), component=data["component"], temporary=True if data["temporary"].lower() == "true" else False, stat=FileStat.deserialise(data["stat"]), is_deleted=True if data["is_deleted"] == "true" else False) def _component_properties(self, file_path): """Parses ``file_path`` to extact the component tokens. Raises :exc:`ValueError` if the ``file_path`` cannot be parsed. Returns a dict of the component properties. """ self.log.debug("Parsing file path %s", file_path) file_dir, file_name = os.path.split(file_path) tokens = file_name.split("-") def pop(): """Pop from the tokens. Expected a token to be there. """ try: return tokens.pop(0) except (IndexError): raise ValueError("Not a valid SSTable file path %s" % ( file_path,)) def peek(): """Peeks the tokens. Expected a token to be there. """ try: return tokens[0] except (IndexError): raise ValueError("Not a valid SSTable file path %s" % ( file_path,)) properties = { "keyspace" : pop() if TARGET_VERSION >= (1,1,0) else None, "cf" : pop(), "temporary" : peek() == TEMPORARY_MARKER } if properties["temporary"]: pop() # If we did not get the keyspace from the file name it should # be in the path if TARGET_VERSION < (1,1,0): assert file_dir assert not properties["keyspace"] _, ks = os.path.split(file_dir) self.log.debug("Using Cassandra version %s, extracted KS name %s"\ " from file dir %s", TARGET_VERSION, ks, file_dir) properties["keyspace"] = ks #Older versions did not use two character file versions. if FILE_VERSION_PATTERN.match(peek()): properties["version"] = pop() else: # If we cannot work out the version then we propably # decoded the file path wrong cause the cassandra version is wrong raise RuntimeError("Got invalid file version {version} for "\ "file path {path} using Cassandra version {cass_ver}.".format( version=pop(), path=file_path, cass_ver=TARGET_VERSION)) properties["generation"] = int(pop()) properties["component"] = pop() self.log.debug("Got file properties %s from path %s", properties, file_path) return properties @property def file_name(self): """Returns the file name for the componet formatted to the current `TARGET_VERSION`. """ if TARGET_VERSION < (1,1,0): # pre 1.1 the file name was CF-version-generation-component fmt = "{cf}-{version}-{generation}-{component}" else: # Assume 1.1 and beyond # file name adds the keyspace. fmt = "{keyspace}-{cf}-{version}-{generation}-{component}" return fmt.format(**vars(self)) @property def backup_file_name(self): """Returns the file name ot use when backing up this component. This name ignores the curret :attr:`cassandra.TARGET_VERSION`. """ # Assume 1.1 and beyond # file name adds the keyspace. return "{keyspace}-{cf}-{version}-{generation}-{component}".format( **vars(self)) @property def cass_version(self): """Returns the Cassandra version that created the current file by inspecting the major and minor file version. Cassandra version is returned as a three part integer tuple (major, minor, rev). See o.a.c.io.sstable.Descriptor in the Cassandra code for up to date info on the versions. At the time of writing:: public static final String LEGACY_VERSION = "a"; // "pre-history" // b (0.7.0): added version to sstable filenames // c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings // d (0.7.0): row size in data component becomes a long instead of int // e (0.7.0): stores undecorated keys in data and index components // f (0.7.0): switched bloom filter implementations in data component // g (0.8): tracks flushed-at context in metadata component // h (1.0): tracks max client timestamp in metadata component // hb (1.0.3): records compression ration in metadata component // hc (1.0.4): records partitioner in metadata component // hd (1.0.10): includes row tombstones in maxtimestamp // he (1.1.3): includes ancestors generation in metadata component // hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782) // ia (1.2.0): column indexes are promoted to the index file // records estimated histogram of deletion times in tombstones // bloom filter (keys and columns) upgraded to Murmur3 // ib (1.2.1): tracks min client timestamp in metadata component """ major_version = self.version[0] minor_version = self.version[1] if len(self.version) > 1 else "" if major_version == "a": assert not minor_version return (0,6,0) if major_version >= "b" and major_version <= "f": assert not minor_version return (0,7,0) if major_version == "g": assert not minor_version return (0,8,0) if major_version == "h": if not minor_version: return (1,0,0) elif minor_version == "b": return (1,0,3) elif minor_version == "c": return (1,0,4) elif minor_version == "d": return (1,0,10) elif minor_version == "e": return (1,1,3) elif minor_version == "f": return (1,1,6) if major_version == "i": if minor_version == "a": return (1,2,0) elif minor_version == "b": return (1,2,1) raise ValueError("Unknown file format {version}".format( version=self.version)) def same_sstable(self, other): """Returns ``True`` if the ``other`` :cls:`SSTableComponent` is from the same SSTable as this. """ if other is None: return False return (other.keyspace == self.keyspace) and ( other.cf == self.cf) and ( other.version == self.version) and ( other.generation == self.generation) # ============================================================================ # A file that is going to be or has been backed up. # Includes the MD5 which is expensive to calculate. class BackupFile(object): """A file that is going to be backed up """ def __init__(self, file_path, host=None, md5=None, component=None): self.file_path = component.file_path if component is not \ None else file_path self.component = SSTableComponent(file_path) if component is None \ else component self.host = socket.getfqdn() if host is None else host self.md5 = file_util.file_md5(self.file_path) if md5 is None else md5 def __str__(self): return "BackupFile {file_path}: host {host}, md5 {md5}, "\ "{component}".format(**vars(self)) def serialise(self): """Serialises the instance to a dict. All values must be string or dict. """ return { "host" : self.host, "md5" : self.md5, "cassandra_version" : ".".join(str(i) for i in TARGET_VERSION), "component" : self.component.serialise() } @classmethod def deserialise(cls, data): """Deserialise the ``data`` dict to create a BackupFile.""" assert data return cls( None, host=data["host"], md5=data["md5"], component=SSTableComponent.deserialise(data["component"]) ) @classmethod def backup_keyspace_dir(self, host, keyspace): """Gets the directory to that contains backups for the specified ``host`` and ``keyspace``. """ return os.path.join(*( "hosts", host, keyspace )) @property def backup_path(self): """Gets the path to backup this file to. """ return os.path.join(*( "hosts", self.host, self.component.keyspace, self.component.cf, self.component.backup_file_name, )) @property def restore_path(self): """Gets the path to restore this file to formatted for the current ``TARGET_VERSION``. """ if TARGET_VERSION < (1, 1, 0): # Pre 1.1 path was keyspace/sstable return os.path.join(*( self.component.keyspace, self.component.file_name, )) # after 1.1 path was keyspace/cf/sstable return os.path.join(*( self.component.keyspace, self.component.cf, self.component.file_name, )) # ============================================================================ # A file that was attempted to be restored. class RestoredFile(object): """A file that was processed during a restore. It may or may not have been restored. """ def __init__(self, was_restored, restore_path, backup_file, reason_skipped=None): self.was_restored = was_restored self.restore_path = restore_path self.backup_file = backup_file self.reason_skipped = reason_skipped def serialise(self): """Serialises the instance to a dict. All values must be string or dict. """ return { "was_restored" : "true" if self.was_restored else "false", "restore_path" : self.restore_path, "backup_file" : self.backup_file.serialise(), "reason_skipped" : self.reason_skipped or "" } @classmethod def deserialise(cls, data): """Deserialise the ``data`` dict to create a :cls:`RestoredFile`.""" assert data return cls( True if data["was_restored"] == "true" else False, data["restore_path"], BackupFile.deserialise(data["backup_file"]), reason_skipped = data["reason_skipped"] ) def restore_msg(self): """Small message describing where the file was restored from -> to.""" if self.was_restored: return "{s.backup_file.backup_path} -> {s.restore_path}".format( s=self) return "{s.backup_file.backup_path} -> "\ "Skipped: {s.reason_skipped}".format(s=self) # ============================================================================ # A manifest of the files in a keyspace. class KeyspaceBackup(object): """A backup set for a particular keyspace. """ log = logging.getLogger("%s.%s" % (__name__, "KeyspaceBackup")) def __init__(self, keyspace, host=None, timestamp=None, backup_name=None, components=None): # self.components.setdefault self.keyspace = keyspace self.host = host or socket.getfqdn() self.timestamp = timestamp or dt_util.now() self.backup_name = backup_name or "{ts}-{keyspace}-{host}".format( ts=_to_safe_datetime_fmt(self.timestamp), keyspace=keyspace, host=self.host) # Map of {cf_name : [component]} self.components = components or {} def add_component(self, component): """Add the ``component`` to the list of components in this manifest. """ assert component.keyspace == self.keyspace self.components.setdefault(component.cf, []).append(component) return component def remove_sstable(self, component): """Remove all components for the SSTable ``component`` belongs to.""" assert component.keyspace == component.keyspace old_components = self.components.setdefault(component.cf, []) self.components[component.cf] = [ comp for comp in old_components if not component.same_sstable(comp) ] def snapshot(self): """Return a deep copy of this manifest that has the current timestamp.""" return KeyspaceBackup(self.keyspace, host=self.host, components=copy.deepcopy(self.components)) def serialise(self): """Return manifest that desribes the backup set.""" components = dict( (key, [component.serialise() for component in value]) for key, value in self.components.iteritems() ) return { "host" : self.host, "keyspace" : self.keyspace, "timestamp" : dt_util.to_iso(self.timestamp), "name" : self.backup_name, "components" : components } @classmethod def deserialise(cls, data): """Create an instance from the ``data`` dict. """ assert data components = dict( (key, [SSTableComponent.deserialise(comp) for comp in value]) for key, value in data["components"].iteritems() ) return cls(data["keyspace"], host=data["host"], timestamp=dt_util.from_iso(data["timestamp"]), backup_name=data["name"], components=components) @classmethod def from_backup_name(cls, backup_name): """Create a KeyspaceBackup from a backup name. The object does not contain a components list.""" # format is timestamp-keyspace-host # host may have "-" parts so only split the first two tokens # from the name. tokens = backup_name.split("-", 2) assert len(tokens) == 3, "Invalid backup_name %s" % (backup_name,) safe_ts = tokens.pop(0) keyspace = tokens.pop(0) host = tokens.pop(0) assert not tokens # expecting 2012_10_22T14_26_57_871835 for the safe TS. timestamp = _from_safe_datetime_fmt(safe_ts) return cls(keyspace, host=host, timestamp=timestamp) @classmethod def from_backup_path(cls, backup_path): _, local = os.path.split(backup_path) backup_name, _ = os.path.splitext(backup_path) return cls.from_backup_name(backup_name) @classmethod def backup_keyspace_dir(cls, keyspace): """Returns the backup dir used for the ``keyspace``. Manifests are not stored in this path, they are in :attr:`backup_day_dir` """ return os.path.join(*( "cluster", keyspace )) @classmethod def backup_day_dir(cls, keyspace, host, day): """Returns the backup dir used to store manifests for the ``keyspace`` and ``host`` on the datetime ``day``""" return os.path.join(*( "cluster", keyspace, str(day.year), str(day.month), str(day.day), host )) @property def backup_path(self): """Gets the path to backup the keyspace manifest to.""" return os.path.join( self.backup_day_dir(self.keyspace, self.host, self.timestamp), "%s.json" % (self.backup_name,) ) def iter_components(self): """Iterates through the SSTableComponents in this backup. Components ordered by column family. You will get all the components from "Aardvark" CF before "Beetroot" """ cf_names = self.components.keys() cf_names.sort() for cf_name in cf_names: sorted_components = list(self.components[cf_name]) sorted_components.sort(key=lambda x: x.file_name) for component in sorted_components: yield component
import httplib from urllib import quote from testcases import TestServerTestCase try: import json except ImportError: # < Python 2.6 from django.utils import simplejson as json class HTTPTestCase(TestServerTestCase): def setUp(self): self.start_test_server(address='localhost', port=8001) def tearDown(self): self.stop_test_server() def get_connection(self): return httplib.HTTPConnection('localhost', 8001) def test_get_apis_json(self): connection = self.get_connection() connection.request('GET', '/api/v1/', headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() data = json.loads(response.read()) self.assertEqual(response.status, 200) self.assertEqual(data, {"geonotes": {"list_endpoint": "/api/v1/geonotes/", "schema": "/api/v1/geonotes/schema/"}, "users": {"list_endpoint": "/api/v1/users/", "schema": "/api/v1/users/schema/"}}) def test_get_apis_xml(self): connection = self.get_connection() connection.request('GET', '/api/v1/', headers={'Accept': 'application/xml'}) response = connection.getresponse() connection.close() data = response.read() self.assertEqual(response.status, 200) self.assertEqual(data, '<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<response><users type="hash"><list_endpoint>/api/v1/users/</list_endpoint><schema>/api/v1/users/schema/</schema></users><geonotes type="hash"><list_endpoint>/api/v1/geonotes/</list_endpoint><schema>/api/v1/geonotes/schema/</schema></geonotes></response>') def test_get_list(self): connection = self.get_connection() connection.request('GET', '/api/v1/geonotes/', headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() data = json.loads(response.read()) self.assertEqual(response.status, 200) self.assertEqual(len(data['objects']), 3) # Because floating point. self.assertEqual(data['objects'][0]['content'], "Wooo two points inside Golden Gate park") self.assertEqual(data['objects'][0]['points']['type'], 'MultiPoint') self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][0][0], -122.475233, places=5) self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][0][1], 37.768616, places=5) self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][1][0], -122.470416, places=5) self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][1][1], 37.767381, places=5) self.assertEqual(data['objects'][1]['content'], "This is a note about Golden Gate Park. It contains Golden Gate Park\'s polygon") self.assertEqual(data['objects'][1]['polys']['type'], 'MultiPolygon') self.assertEqual(len(data['objects'][1]['polys']['coordinates']), 1) self.assertEqual(len(data['objects'][1]['polys']['coordinates'][0]), 1) self.assertEqual(len(data['objects'][1]['polys']['coordinates'][0][0]), 8) self.assertEqual(data['objects'][2]['content'], "A path inside Golden Gate Park! Huzzah!") self.assertEqual(data['objects'][2]['lines']['type'], 'MultiLineString') self.assertAlmostEqual(data['objects'][2]['lines']['coordinates'][0][0][0], -122.504544, places=5) self.assertAlmostEqual(data['objects'][2]['lines']['coordinates'][0][0][1], 37.767002, places=5) self.assertAlmostEqual(data['objects'][2]['lines']['coordinates'][0][1][0], -122.499995, places=5) self.assertAlmostEqual(data['objects'][2]['lines']['coordinates'][0][1][1], 37.768223, places=5) def test_post_object(self): connection = self.get_connection() post_data = '{"content": "A new post.", "is_active": true, "title": "New Title", "slug": "new-title", "user": "/api/v1/users/1/"}' connection.request('POST', '/api/v1/geonotes/', body=post_data, headers={'Accept': 'application/json', 'Content-type': 'application/json'}) response = connection.getresponse() self.assertEqual(response.status, 201) self.assertEqual(dict(response.getheaders())['location'], 'http://localhost:8001/api/v1/geonotes/4/') # make sure posted object exists connection.request('GET', '/api/v1/geonotes/4/', headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() self.assertEqual(response.status, 200) data = response.read() obj = json.loads(data) self.assertEqual(obj['content'], 'A new post.') self.assertEqual(obj['is_active'], True) self.assertEqual(obj['user'], '/api/v1/users/1/') def test_post_geojson(self): connection = self.get_connection() post_data = """{ "content": "A new post.", "is_active": true, "title": "New Title2", "slug": "new-title2", "user": "/api/v1/users/1/", "polys": { "type": "MultiPolygon", "coordinates": [ [ [ [ -122.511067, 37.771276 ], [ -122.510037, 37.766391 ], [ -122.510037, 37.763813 ], [ -122.456822, 37.765848 ], [ -122.452960, 37.766459 ], [ -122.454848, 37.773990 ], [ -122.475362, 37.773040 ], [ -122.511067, 37.771276 ] ] ] ] } }""" connection.request('POST', '/api/v1/geonotes/', body=post_data, headers={'Accept': 'application/json', 'Content-type': 'application/json'}) response = connection.getresponse() self.assertEqual(response.status, 201) self.assertEqual(dict(response.getheaders())['location'], 'http://localhost:8001/api/v1/geonotes/4/') # make sure posted object exists connection.request('GET', '/api/v1/geonotes/4/', headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() self.assertEqual(response.status, 200) data = response.read() obj = json.loads(data) self.assertEqual(obj['content'], 'A new post.') self.assertEqual(obj['is_active'], True) self.assertEqual(obj['user'], '/api/v1/users/1/') self.assertEqual(obj['polys'], {u'type': u'MultiPolygon', u'coordinates': [[[[-122.511067, 37.771276], [-122.510037, 37.766390999999999], [-122.510037, 37.763812999999999], [-122.456822, 37.765847999999998], [-122.45296, 37.766458999999998], [-122.454848, 37.773989999999998], [-122.475362, 37.773040000000002], [-122.511067, 37.771276]]]]}) def test_post_xml(self): connection = self.get_connection() post_data = """<object><created>2010-03-30T20:05:00</created><polys type="null"/><is_active type="boolean">True</is_active><title>Points inside Golden Gate Park note 2</title><lines type="null"/><slug>points-inside-golden-gate-park-note-2</slug><content>A new post.</content><points type="hash"><type>MultiPoint</type><coordinates type="list"><objects><value type="float">-122.475233</value><value type="float">37.768617</value></objects><objects><value type="float">-122.470416</value><value type="float">37.767382</value></objects></coordinates></points><user>/api/v1/users/1/</user></object>""" connection.request('POST', '/api/v1/geonotes/', body=post_data, headers={'Accept': 'application/xml', 'Content-type': 'application/xml'}) response = connection.getresponse() self.assertEqual(response.status, 201) self.assertEqual(dict(response.getheaders())['location'], 'http://localhost:8001/api/v1/geonotes/4/') # make sure posted object exists connection.request('GET', '/api/v1/geonotes/4/', headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() self.assertEqual(response.status, 200) data = response.read() obj = json.loads(data) self.assertEqual(obj['content'], 'A new post.') self.assertEqual(obj['is_active'], True) self.assertEqual(obj['user'], '/api/v1/users/1/') # Weeeee! GeoJSON returned! self.assertEqual(obj['points'], {"coordinates": [[-122.475233, 37.768616999999999], [-122.470416, 37.767381999999998]], "type": "MultiPoint"}) # Or we can ask for XML connection.request('GET', '/api/v1/geonotes/4/', headers={'Accept': 'application/xml'}) response = connection.getresponse() connection.close() self.assertEqual(response.status, 200) data = response.read() self.assertTrue('<points type="hash"><type>MultiPoint</type><coordinates type="list"><objects><value type="float">-122.475233</value><value type="float">37.768617</value></objects><objects><value type="float">-122.470416</value><value type="float">37.767382</value></objects></coordinates></points>' in data) def test_filter_within(self): golden_gate_park_json = """{"type": "MultiPolygon", "coordinates": [[[[-122.511067, 37.771276], [-122.510037, 37.766391], [-122.510037, 37.763813], [-122.456822, 37.765848], [-122.452960, 37.766459], [-122.454848, 37.773990], [-122.475362, 37.773040], [-122.511067, 37.771276]]]]}""" # Get points connection = self.get_connection() connection.request('GET', '/api/v1/geonotes/?points__within=%s' % quote(golden_gate_park_json), headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() self.assertEqual(response.status, 200) data = json.loads(response.read()) # We get back the points inside Golden Gate park! self.assertEqual(data['objects'][0]['content'], "Wooo two points inside Golden Gate park") self.assertEqual(data['objects'][0]['points']['type'], 'MultiPoint') self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][0][0], -122.475233, places=5) self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][0][1], 37.768616, places=5) self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][1][0], -122.470416, places=5) self.assertAlmostEqual(data['objects'][0]['points']['coordinates'][1][1], 37.767381, places=5) # Get lines connection = self.get_connection() connection.request('GET', '/api/v1/geonotes/?lines__within=%s' % quote(golden_gate_park_json), headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() self.assertEqual(response.status, 200) data = json.loads(response.read()) # We get back the line inside Golden Gate park! self.assertEqual(data['objects'][0]['content'], "A path inside Golden Gate Park! Huzzah!") self.assertEqual(data['objects'][0]['lines']['type'], 'MultiLineString') self.assertAlmostEqual(data['objects'][0]['lines']['coordinates'][0][0][0], -122.504544, places=5) self.assertAlmostEqual(data['objects'][0]['lines']['coordinates'][0][0][1], 37.767002, places=5) self.assertAlmostEqual(data['objects'][0]['lines']['coordinates'][0][1][0], -122.499995, places=5) self.assertAlmostEqual(data['objects'][0]['lines']['coordinates'][0][1][1], 37.768223, places=5) def test_filter_contains(self): points_inside_golden_gate_park = """{"coordinates": [[-122.475233, 37.768616999999999], [-122.470416, 37.767381999999998]], "type": "MultiPoint"}""" # Get polys that contain the points connection = self.get_connection() connection.request('GET', '/api/v1/geonotes/?polys__contains=%s' % quote(points_inside_golden_gate_park), headers={'Accept': 'application/json'}) response = connection.getresponse() connection.close() self.assertEqual(response.status, 200) data = json.loads(response.read()) # We get back the golden gate park polygon! self.assertEqual(data['objects'][0]['content'], "This is a note about Golden Gate Park. It contains Golden Gate Park\'s polygon") self.assertEqual(data['objects'][0]['polys']['type'], 'MultiPolygon') self.assertEqual(len(data['objects'][0]['polys']['coordinates']), 1) self.assertEqual(len(data['objects'][0]['polys']['coordinates'][0]), 1) self.assertEqual(len(data['objects'][0]['polys']['coordinates'][0][0]), 8)
""" Support for Cover devices. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/cover/ """ import os import logging import voluptuous as vol from homeassistant.config import load_yaml_config_file from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.entity import Entity from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa import homeassistant.helpers.config_validation as cv from homeassistant.components import group from homeassistant.const import ( SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION, SERVICE_STOP_COVER, SERVICE_OPEN_COVER_TILT, SERVICE_CLOSE_COVER_TILT, SERVICE_STOP_COVER_TILT, SERVICE_SET_COVER_TILT_POSITION, STATE_OPEN, STATE_CLOSED, STATE_UNKNOWN, ATTR_ENTITY_ID) DOMAIN = 'cover' SCAN_INTERVAL = 15 GROUP_NAME_ALL_COVERS = 'all covers' ENTITY_ID_ALL_COVERS = group.ENTITY_ID_FORMAT.format('all_covers') ENTITY_ID_FORMAT = DOMAIN + '.{}' _LOGGER = logging.getLogger(__name__) ATTR_CURRENT_POSITION = 'current_position' ATTR_CURRENT_TILT_POSITION = 'current_tilt_position' ATTR_POSITION = 'position' ATTR_TILT_POSITION = 'tilt_position' COVER_SERVICE_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, }) COVER_SET_COVER_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({ vol.Required(ATTR_POSITION): vol.All(vol.Coerce(int), vol.Range(min=0, max=100)), }) COVER_SET_COVER_TILT_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({ vol.Required(ATTR_TILT_POSITION): vol.All(vol.Coerce(int), vol.Range(min=0, max=100)), }) SERVICE_TO_METHOD = { SERVICE_OPEN_COVER: {'method': 'open_cover'}, SERVICE_CLOSE_COVER: {'method': 'close_cover'}, SERVICE_SET_COVER_POSITION: { 'method': 'set_cover_position', 'schema': COVER_SET_COVER_POSITION_SCHEMA}, SERVICE_STOP_COVER: {'method': 'stop_cover'}, SERVICE_OPEN_COVER_TILT: {'method': 'open_cover_tilt'}, SERVICE_CLOSE_COVER_TILT: {'method': 'close_cover_tilt'}, SERVICE_STOP_COVER_TILT: {'method': 'stop_cover_tilt'}, SERVICE_SET_COVER_TILT_POSITION: { 'method': 'set_cover_tilt_position', 'schema': COVER_SET_COVER_TILT_POSITION_SCHEMA}, } def is_closed(hass, entity_id=None): """Return if the cover is closed based on the statemachine.""" entity_id = entity_id or ENTITY_ID_ALL_COVERS return hass.states.is_state(entity_id, STATE_CLOSED) def open_cover(hass, entity_id=None): """Open all or specified cover.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else None hass.services.call(DOMAIN, SERVICE_OPEN_COVER, data) def close_cover(hass, entity_id=None): """Close all or specified cover.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else None hass.services.call(DOMAIN, SERVICE_CLOSE_COVER, data) def set_cover_position(hass, position, entity_id=None): """Move to specific position all or specified cover.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} data[ATTR_POSITION] = position hass.services.call(DOMAIN, SERVICE_SET_COVER_POSITION, data) def stop_cover(hass, entity_id=None): """Stop all or specified cover.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else None hass.services.call(DOMAIN, SERVICE_STOP_COVER, data) def open_cover_tilt(hass, entity_id=None): """Open all or specified cover tilt.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else None hass.services.call(DOMAIN, SERVICE_OPEN_COVER_TILT, data) def close_cover_tilt(hass, entity_id=None): """Close all or specified cover tilt.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else None hass.services.call(DOMAIN, SERVICE_CLOSE_COVER_TILT, data) def set_cover_tilt_position(hass, tilt_position, entity_id=None): """Move to specific tilt position all or specified cover.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} data[ATTR_TILT_POSITION] = tilt_position hass.services.call(DOMAIN, SERVICE_SET_COVER_TILT_POSITION, data) def stop_cover_tilt(hass, entity_id=None): """Stop all or specified cover tilt.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else None hass.services.call(DOMAIN, SERVICE_STOP_COVER_TILT, data) def setup(hass, config): """Track states and offer events for covers.""" component = EntityComponent( _LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_COVERS) component.setup(config) def handle_cover_service(service): """Handle calls to the cover services.""" method = SERVICE_TO_METHOD.get(service.service) params = service.data.copy() params.pop(ATTR_ENTITY_ID, None) if method: for cover in component.extract_from_service(service): getattr(cover, method['method'])(**params) if cover.should_poll: cover.update_ha_state(True) descriptions = load_yaml_config_file( os.path.join(os.path.dirname(__file__), 'services.yaml')) for service_name in SERVICE_TO_METHOD: schema = SERVICE_TO_METHOD[service_name].get( 'schema', COVER_SERVICE_SCHEMA) hass.services.register(DOMAIN, service_name, handle_cover_service, descriptions.get(service_name), schema=schema) return True class CoverDevice(Entity): """Representation a cover.""" # pylint: disable=no-self-use @property def current_cover_position(self): """Return current position of cover. None is unknown, 0 is closed, 100 is fully open. """ pass @property def current_cover_tilt_position(self): """Return current position of cover tilt. None is unknown, 0 is closed, 100 is fully open. """ pass @property def state(self): """Return the state of the cover.""" closed = self.is_closed if closed is None: return STATE_UNKNOWN return STATE_CLOSED if closed else STATE_OPEN @property def state_attributes(self): """Return the state attributes.""" data = {} current = self.current_cover_position if current is not None: data[ATTR_CURRENT_POSITION] = self.current_cover_position current_tilt = self.current_cover_tilt_position if current_tilt is not None: data[ATTR_CURRENT_TILT_POSITION] = self.current_cover_tilt_position return data @property def is_closed(self): """Return if the cover is closed or not.""" raise NotImplementedError() def open_cover(self, **kwargs): """Open the cover.""" raise NotImplementedError() def close_cover(self, **kwargs): """Close cover.""" raise NotImplementedError() def set_cover_position(self, **kwargs): """Move the cover to a specific position.""" pass def stop_cover(self, **kwargs): """Stop the cover.""" pass def open_cover_tilt(self, **kwargs): """Open the cover tilt.""" pass def close_cover_tilt(self, **kwargs): """Close the cover tilt.""" pass def set_cover_tilt_position(self, **kwargs): """Move the cover tilt to a specific position.""" pass def stop_cover_tilt(self, **kwargs): """Stop the cover.""" pass
# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Registry's Client API """ import os from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from glance.common import exception from glance.i18n import _ from glance.registry.client.v1 import client LOG = logging.getLogger(__name__) registry_client_ctx_opts = [ cfg.BoolOpt('send_identity_headers', default=False, help=_(""" Send headers received from identity when making requests to registry. Typically, Glance registry can be deployed in multiple flavors, which may or may not include authentication. For example, ``trusted-auth`` is a flavor that does not require the registry service to authenticate the requests it receives. However, the registry service may still need a user context to be populated to serve the requests. This can be achieved by the caller (the Glance API usually) passing through the headers it received from authenticating with identity for the same request. The typical headers sent are ``X-User-Id``, ``X-Tenant-Id``, ``X-Roles``, ``X-Identity-Status`` and ``X-Service-Catalog``. Provide a boolean value to determine whether to send the identity headers to provide tenant and user information along with the requests to registry service. By default, this option is set to ``False``, which means that user and tenant information is not available readily. It must be obtained by authenticating. Hence, if this is set to ``False``, ``flavor`` must be set to value that either includes authentication or authenticated user context. Possible values: * True * False Related options: * flavor """)), ] CONF = cfg.CONF CONF.register_opts(registry_client_ctx_opts) _registry_client = 'glance.registry.client' CONF.import_opt('registry_client_protocol', _registry_client) CONF.import_opt('registry_client_key_file', _registry_client) CONF.import_opt('registry_client_cert_file', _registry_client) CONF.import_opt('registry_client_ca_file', _registry_client) CONF.import_opt('registry_client_insecure', _registry_client) CONF.import_opt('registry_client_timeout', _registry_client) CONF.import_opt('use_user_token', _registry_client) CONF.import_opt('admin_user', _registry_client) CONF.import_opt('admin_password', _registry_client) CONF.import_opt('admin_tenant_name', _registry_client) CONF.import_opt('auth_url', _registry_client) CONF.import_opt('auth_strategy', _registry_client) CONF.import_opt('auth_region', _registry_client) CONF.import_opt('metadata_encryption_key', 'glance.common.config') _CLIENT_CREDS = None _CLIENT_HOST = None _CLIENT_PORT = None _CLIENT_KWARGS = {} # AES key used to encrypt 'location' metadata _METADATA_ENCRYPTION_KEY = None def configure_registry_client(): """ Sets up a registry client for use in registry lookups """ global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY try: host, port = CONF.registry_host, CONF.registry_port except cfg.ConfigFileValueError: msg = _("Configuration option was not valid") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(reason=msg) except IndexError: msg = _("Could not find required configuration option") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(reason=msg) _CLIENT_HOST = host _CLIENT_PORT = port _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key _CLIENT_KWARGS = { 'use_ssl': CONF.registry_client_protocol.lower() == 'https', 'key_file': CONF.registry_client_key_file, 'cert_file': CONF.registry_client_cert_file, 'ca_file': CONF.registry_client_ca_file, 'insecure': CONF.registry_client_insecure, 'timeout': CONF.registry_client_timeout, } if not CONF.use_user_token: configure_registry_admin_creds() def configure_registry_admin_creds(): global _CLIENT_CREDS if CONF.auth_url or os.getenv('OS_AUTH_URL'): strategy = 'keystone' else: strategy = CONF.auth_strategy _CLIENT_CREDS = { 'user': CONF.admin_user, 'password': CONF.admin_password, 'username': CONF.admin_user, 'tenant': CONF.admin_tenant_name, 'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url, 'strategy': strategy, 'region': CONF.auth_region, } def get_registry_client(cxt): global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT global _METADATA_ENCRYPTION_KEY kwargs = _CLIENT_KWARGS.copy() if CONF.use_user_token: kwargs['auth_token'] = cxt.auth_token if _CLIENT_CREDS: kwargs['creds'] = _CLIENT_CREDS if CONF.send_identity_headers: identity_headers = { 'X-User-Id': cxt.user or '', 'X-Tenant-Id': cxt.tenant or '', 'X-Roles': ','.join(cxt.roles), 'X-Identity-Status': 'Confirmed', 'X-Service-Catalog': jsonutils.dumps(cxt.service_catalog), } kwargs['identity_headers'] = identity_headers kwargs['request_id'] = cxt.request_id return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY, **kwargs) def get_images_list(context, **kwargs): c = get_registry_client(context) return c.get_images(**kwargs) def get_images_detail(context, **kwargs): c = get_registry_client(context) return c.get_images_detailed(**kwargs) def get_image_metadata(context, image_id): c = get_registry_client(context) return c.get_image(image_id) def add_image_metadata(context, image_meta): LOG.debug("Adding image metadata...") c = get_registry_client(context) return c.add_image(image_meta) def update_image_metadata(context, image_id, image_meta, purge_props=False, from_state=None): LOG.debug("Updating image metadata for image %s...", image_id) c = get_registry_client(context) return c.update_image(image_id, image_meta, purge_props=purge_props, from_state=from_state) def delete_image_metadata(context, image_id): LOG.debug("Deleting image metadata for image %s...", image_id) c = get_registry_client(context) return c.delete_image(image_id) def get_image_members(context, image_id): c = get_registry_client(context) return c.get_image_members(image_id) def get_member_images(context, member_id): c = get_registry_client(context) return c.get_member_images(member_id) def replace_members(context, image_id, member_data): c = get_registry_client(context) return c.replace_members(image_id, member_data) def add_member(context, image_id, member_id, can_share=None): c = get_registry_client(context) return c.add_member(image_id, member_id, can_share=can_share) def delete_member(context, image_id, member_id): c = get_registry_client(context) return c.delete_member(image_id, member_id)
# # # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Inter-node RPC library. """ # pylint: disable=C0103,R0201,R0904 # C0103: Invalid name, since call_ are not valid # R0201: Method could be a function, we keep all rpcs instance methods # as not to change them back and forth between static/instance methods # if they need to start using instance attributes # R0904: Too many public methods import base64 import copy import logging import os import threading import zlib import pycurl from ganeti import utils from ganeti import objects from ganeti import http from ganeti import serializer from ganeti import constants from ganeti import errors from ganeti import netutils from ganeti import ssconf from ganeti import runtime from ganeti import compat from ganeti import rpc_defs from ganeti import pathutils from ganeti import vcluster # Special module generated at build time from ganeti import _generated_rpc # pylint has a bug here, doesn't see this import import ganeti.http.client # pylint: disable=W0611 _RPC_CLIENT_HEADERS = [ "Content-type: %s" % http.HTTP_APP_JSON, "Expect:", ] #: Special value to describe an offline host _OFFLINE = object() def Init(): """Initializes the module-global HTTP client manager. Must be called before using any RPC function and while exactly one thread is running. """ # curl_global_init(3) and curl_global_cleanup(3) must be called with only # one thread running. This check is just a safety measure -- it doesn't # cover all cases. assert threading.activeCount() == 1, \ "Found more than one active thread when initializing pycURL" logging.info("Using PycURL %s", pycurl.version) pycurl.global_init(pycurl.GLOBAL_ALL) def Shutdown(): """Stops the module-global HTTP client manager. Must be called before quitting the program and while exactly one thread is running. """ pycurl.global_cleanup() def _ConfigRpcCurl(curl): noded_cert = pathutils.NODED_CERT_FILE noded_client_cert = pathutils.NODED_CLIENT_CERT_FILE # This fallback is required for backwards compatibility with 2.10. Ganeti # 2.11 introduced per-node client certificates, but when we restart after # an upgrade from 2.10, the client certs are not in place yet, and we need # to fall back to using the cluster-wide server cert. if not os.path.exists(noded_client_cert): logging.warn("Using server certificate as client certificate for RPC" "call.") noded_client_cert = noded_cert curl.setopt(pycurl.FOLLOWLOCATION, False) curl.setopt(pycurl.CAINFO, noded_cert) curl.setopt(pycurl.SSL_VERIFYHOST, 0) curl.setopt(pycurl.SSL_VERIFYPEER, True) curl.setopt(pycurl.SSLCERTTYPE, "PEM") curl.setopt(pycurl.SSLCERT, noded_client_cert) curl.setopt(pycurl.SSLKEYTYPE, "PEM") curl.setopt(pycurl.SSLKEY, noded_client_cert) curl.setopt(pycurl.CONNECTTIMEOUT, constants.RPC_CONNECT_TIMEOUT) def RunWithRPC(fn): """RPC-wrapper decorator. When applied to a function, it runs it with the RPC system initialized, and it shutsdown the system afterwards. This means the function must be called without RPC being initialized. """ def wrapper(*args, **kwargs): Init() try: return fn(*args, **kwargs) finally: Shutdown() return wrapper def _Compress(_, data): """Compresses a string for transport over RPC. Small amounts of data are not compressed. @type data: str @param data: Data @rtype: tuple @return: Encoded data to send """ # Small amounts of data are not compressed if len(data) < 512: return (constants.RPC_ENCODING_NONE, data) # Compress with zlib and encode in base64 return (constants.RPC_ENCODING_ZLIB_BASE64, base64.b64encode(zlib.compress(data, 3))) class RpcResult(object): """RPC Result class. This class holds an RPC result. It is needed since in multi-node calls we can't raise an exception just because one out of many failed, and therefore we use this class to encapsulate the result. @ivar data: the data payload, for successful results, or None @ivar call: the name of the RPC call @ivar node: the name of the node to which we made the call @ivar offline: whether the operation failed because the node was offline, as opposed to actual failure; offline=True will always imply failed=True, in order to allow simpler checking if the user doesn't care about the exact failure mode @ivar fail_msg: the error message if the call failed """ def __init__(self, data=None, failed=False, offline=False, call=None, node=None): self.offline = offline self.call = call self.node = node if offline: self.fail_msg = "Node is marked offline" self.data = self.payload = None elif failed: self.fail_msg = self._EnsureErr(data) self.data = self.payload = None else: self.data = data if not isinstance(self.data, (tuple, list)): self.fail_msg = ("RPC layer error: invalid result type (%s)" % type(self.data)) self.payload = None elif len(data) != 2: self.fail_msg = ("RPC layer error: invalid result length (%d), " "expected 2" % len(self.data)) self.payload = None elif not self.data[0]: self.fail_msg = self._EnsureErr(self.data[1]) self.payload = None else: # finally success self.fail_msg = None self.payload = data[1] for attr_name in ["call", "data", "fail_msg", "node", "offline", "payload"]: assert hasattr(self, attr_name), "Missing attribute %s" % attr_name def __repr__(self): return ("RpcResult(data=%s, call=%s, node=%s, offline=%s, fail_msg=%s)" % (self.offline, self.call, self.node, self.offline, self.fail_msg)) @staticmethod def _EnsureErr(val): """Helper to ensure we return a 'True' value for error.""" if val: return val else: return "No error information" def Raise(self, msg, prereq=False, ecode=None): """If the result has failed, raise an OpExecError. This is used so that LU code doesn't have to check for each result, but instead can call this function. """ if not self.fail_msg: return if not msg: # one could pass None for default message msg = ("Call '%s' to node '%s' has failed: %s" % (self.call, self.node, self.fail_msg)) else: msg = "%s: %s" % (msg, self.fail_msg) if prereq: ec = errors.OpPrereqError else: ec = errors.OpExecError if ecode is not None: args = (msg, ecode) else: args = (msg, ) raise ec(*args) def Warn(self, msg, feedback_fn): """If the result has failed, call the feedback_fn. This is used to in cases were LU wants to warn the user about a failure, but continue anyway. """ if not self.fail_msg: return msg = "%s: %s" % (msg, self.fail_msg) feedback_fn(msg) def _SsconfResolver(ssconf_ips, node_list, _, ssc=ssconf.SimpleStore, nslookup_fn=netutils.Hostname.GetIP): """Return addresses for given node names. @type ssconf_ips: bool @param ssconf_ips: Use the ssconf IPs @type node_list: list @param node_list: List of node names @type ssc: class @param ssc: SimpleStore class that is used to obtain node->ip mappings @type nslookup_fn: callable @param nslookup_fn: function use to do NS lookup @rtype: list of tuple; (string, string) @return: List of tuples containing node name and IP address """ ss = ssc() family = ss.GetPrimaryIPFamily() if ssconf_ips: iplist = ss.GetNodePrimaryIPList() ipmap = dict(entry.split() for entry in iplist) else: ipmap = {} result = [] for node in node_list: ip = ipmap.get(node) if ip is None: ip = nslookup_fn(node, family=family) result.append((node, ip, node)) return result class _StaticResolver(object): def __init__(self, addresses): """Initializes this class. """ self._addresses = addresses def __call__(self, hosts, _): """Returns static addresses for hosts. """ assert len(hosts) == len(self._addresses) return list(zip(hosts, self._addresses, hosts)) def _CheckConfigNode(node_uuid_or_name, node, accept_offline_node): """Checks if a node is online. @type node_uuid_or_name: string @param node_uuid_or_name: Node UUID @type node: L{objects.Node} or None @param node: Node object """ if node is None: # Assume that the passed parameter was actually a node name, so depend on # DNS for name resolution return (node_uuid_or_name, node_uuid_or_name, node_uuid_or_name) else: if node.offline and not accept_offline_node: ip = _OFFLINE else: ip = node.primary_ip return (node.name, ip, node_uuid_or_name) def _NodeConfigResolver(single_node_fn, all_nodes_fn, node_uuids, opts): """Calculate node addresses using configuration. Note that strings in node_uuids are treated as node names if the UUID is not found in the configuration. """ accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE) assert accept_offline_node or opts is None, "Unknown option" # Special case for single-host lookups if len(node_uuids) == 1: (uuid, ) = node_uuids return [_CheckConfigNode(uuid, single_node_fn(uuid), accept_offline_node)] else: all_nodes = all_nodes_fn() return [_CheckConfigNode(uuid, all_nodes.get(uuid, None), accept_offline_node) for uuid in node_uuids] class _RpcProcessor(object): def __init__(self, resolver, port, lock_monitor_cb=None): """Initializes this class. @param resolver: callable accepting a list of node UUIDs or hostnames, returning a list of tuples containing name, IP address and original name of the resolved node. IP address can be the name or the special value L{_OFFLINE} to mark offline machines. @type port: int @param port: TCP port @param lock_monitor_cb: Callable for registering with lock monitor """ self._resolver = resolver self._port = port self._lock_monitor_cb = lock_monitor_cb @staticmethod def _PrepareRequests(hosts, port, procedure, body, read_timeout): """Prepares requests by sorting offline hosts into separate list. @type body: dict @param body: a dictionary with per-host body data """ results = {} requests = {} assert isinstance(body, dict) assert len(body) == len(hosts) assert compat.all(isinstance(v, (str, bytes)) for v in body.values()) assert frozenset(h[2] for h in hosts) == frozenset(body), \ "%s != %s" % (hosts, list(body)) for (name, ip, original_name) in hosts: if ip is _OFFLINE: # Node is marked as offline results[original_name] = RpcResult(node=name, offline=True, call=procedure) else: requests[original_name] = \ http.client.HttpClientRequest(str(ip), port, http.HTTP_POST, str("/%s" % procedure), headers=_RPC_CLIENT_HEADERS, post_data=body[original_name], read_timeout=read_timeout, nicename="%s/%s" % (name, procedure), curl_config_fn=_ConfigRpcCurl) return (results, requests) @staticmethod def _CombineResults(results, requests, procedure): """Combines pre-computed results for offline hosts with actual call results. """ for name, req in requests.items(): if req.success and req.resp_status_code == http.HTTP_OK: host_result = RpcResult(data=serializer.LoadJson(req.resp_body), node=name, call=procedure) else: # TODO: Better error reporting if req.error: msg = req.error else: msg = req.resp_body logging.error("RPC error in %s on node %s: %s", procedure, name, msg) host_result = RpcResult(data=msg, failed=True, node=name, call=procedure) results[name] = host_result return results def __call__(self, nodes, procedure, body, read_timeout, resolver_opts, _req_process_fn=None): """Makes an RPC request to a number of nodes. @type nodes: sequence @param nodes: node UUIDs or Hostnames @type procedure: string @param procedure: Request path @type body: dictionary @param body: dictionary with request bodies per host @type read_timeout: int or None @param read_timeout: Read timeout for request @rtype: dictionary @return: a dictionary mapping host names to rpc.RpcResult objects """ assert read_timeout is not None, \ "Missing RPC read timeout for procedure '%s'" % procedure if _req_process_fn is None: _req_process_fn = http.client.ProcessRequests (results, requests) = \ self._PrepareRequests(self._resolver(nodes, resolver_opts), self._port, procedure, body, read_timeout) _req_process_fn(list(requests.values()), lock_monitor_cb=self._lock_monitor_cb) assert not frozenset(results).intersection(requests) return self._CombineResults(results, requests, procedure) class _RpcClientBase(object): def __init__(self, resolver, encoder_fn, lock_monitor_cb=None, _req_process_fn=None): """Initializes this class. """ proc = _RpcProcessor(resolver, netutils.GetDaemonPort(constants.NODED), lock_monitor_cb=lock_monitor_cb) self._proc = compat.partial(proc, _req_process_fn=_req_process_fn) self._encoder = compat.partial(self._EncodeArg, encoder_fn) @staticmethod def _EncodeArg(encoder_fn, node, arg): """Encode argument. """ (argkind, value) = arg if argkind is None: return value else: return encoder_fn(argkind)(node, value) def _Call(self, cdef, node_list, args): """Entry point for automatically generated RPC wrappers. """ (procedure, _, resolver_opts, timeout, argdefs, prep_fn, postproc_fn, _) = cdef if callable(timeout): read_timeout = timeout(args) else: read_timeout = timeout if callable(resolver_opts): req_resolver_opts = resolver_opts(args) else: req_resolver_opts = resolver_opts if len(args) != len(argdefs): raise errors.ProgrammerError("Number of passed arguments doesn't match") if prep_fn is None: prep_fn = lambda _, args: args assert callable(prep_fn) # encode the arguments for each node individually, pass them and the node # name to the prep_fn, and serialise its return value encode_args_fn = lambda node: [self._encoder(node, (argdef[1], val)) for (argdef, val) in zip(argdefs, args)] pnbody = dict( (n, serializer.DumpJson(prep_fn(n, encode_args_fn(n)), private_encoder=serializer.EncodeWithPrivateFields)) for n in node_list ) result = self._proc(node_list, procedure, pnbody, read_timeout, req_resolver_opts) if postproc_fn: return dict((k, postproc_fn(v)) for (k, v) in result.items()) else: return result def _ObjectToDict(_, value): """Converts an object to a dictionary. @note: See L{objects}. """ return value.ToDict() def _ObjectListToDict(node, value): """Converts a list of L{objects} to dictionaries. """ return [_ObjectToDict(node, v) for v in value] def _PrepareFileUpload(getents_fn, node, filename): """Loads a file and prepares it for an upload to nodes. """ statcb = utils.FileStatHelper() data = _Compress(node, utils.ReadBinaryFile(filename, preread=statcb)) st = statcb.st if getents_fn is None: getents_fn = runtime.GetEnts getents = getents_fn() virt_filename = vcluster.MakeVirtualPath(filename) return [virt_filename, data, st.st_mode, getents.LookupUid(st.st_uid), getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime] def _PrepareFinalizeExportDisks(_, snap_disks): """Encodes disks for finalizing export. """ flat_disks = [] for disk in snap_disks: if isinstance(disk, bool): flat_disks.append(disk) else: flat_disks.append(disk.ToDict()) return flat_disks def _EncodeBlockdevRename(_, value): """Encodes information for renaming block devices. """ return [(d.ToDict(), uid) for d, uid in value] def _AddSpindlesToLegacyNodeInfo(result, space_info): """Extracts the spindle information from the space info and adds it to the result dictionary. @type result: dict of strings @param result: dictionary holding the result of the legacy node info @type space_info: list of dicts of strings @param space_info: list, each row holding space information of one storage unit @rtype: None @return: does not return anything, manipulates the C{result} variable """ lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( space_info, constants.ST_LVM_PV) if lvm_pv_info: result["spindles_free"] = lvm_pv_info["storage_free"] result["spindles_total"] = lvm_pv_info["storage_size"] else: result["spindles_free"] = 0 result["spindles_total"] = 0 def _AddStorageInfoToLegacyNodeInfoByTemplate( result, space_info, disk_template): """Extracts the storage space information of the disk template from the space info and adds it to the result dictionary. @see: C{_AddSpindlesToLegacyNodeInfo} for parameter information. """ if utils.storage.DiskTemplateSupportsSpaceReporting(disk_template): disk_info = utils.storage.LookupSpaceInfoByDiskTemplate( space_info, disk_template) result["name"] = disk_info["name"] result["storage_free"] = disk_info["storage_free"] result["storage_size"] = disk_info["storage_size"] else: # FIXME: consider displaying '-' in this case result["storage_free"] = 0 result["storage_size"] = 0 def MakeLegacyNodeInfo(data, disk_template): """Formats the data returned by call_node_info. Converts the data into a single dictionary. This is fine for most use cases, but some require information from more than one volume group or hypervisor. """ (bootid, space_info, (hv_info, )) = data ret = utils.JoinDisjointDicts(hv_info, {"bootid": bootid}) _AddSpindlesToLegacyNodeInfo(ret, space_info) _AddStorageInfoToLegacyNodeInfoByTemplate(ret, space_info, disk_template) return ret def _AnnotateDParamsDRBD(disk, params): """Annotates just DRBD disks layouts. """ (drbd_params, data_params, meta_params) = params assert disk.dev_type == constants.DT_DRBD8 disk.params = objects.FillDict(drbd_params, disk.params) (dev_data, dev_meta) = disk.children dev_data.params = objects.FillDict(data_params, dev_data.params) dev_meta.params = objects.FillDict(meta_params, dev_meta.params) return disk def _AnnotateDParamsGeneric(disk, params): """Generic disk parameter annotation routine. """ assert disk.dev_type != constants.DT_DRBD8 disk.params = objects.FillDict(params[0], disk.params) return disk def AnnotateDiskParams(disks, disk_params): """Annotates the disk objects with the disk parameters. @param disks: The list of disks objects to annotate @param disk_params: The disk parameters for annotation @returns: A list of disk objects annotated """ def AnnotateDisk(disk): if disk.dev_type == constants.DT_DISKLESS: return disk ld_params = objects.Disk.ComputeLDParams(disk.dev_type, disk_params) if disk.dev_type == constants.DT_DRBD8: return _AnnotateDParamsDRBD(disk, ld_params) else: return _AnnotateDParamsGeneric(disk, ld_params) return [AnnotateDisk(disk.Copy()) for disk in disks] def _GetExclusiveStorageFlag(cfg, node_uuid): ni = cfg.GetNodeInfo(node_uuid) if ni is None: raise errors.OpPrereqError("Invalid node name %s" % node_uuid, errors.ECODE_NOENT) return cfg.GetNdParams(ni)[constants.ND_EXCLUSIVE_STORAGE] def _AddExclusiveStorageFlagToLvmStorageUnits(storage_units, es_flag): """Adds the exclusive storage flag to lvm units. This function creates a copy of the storage_units lists, with the es_flag being added to all lvm storage units. @type storage_units: list of pairs (string, string) @param storage_units: list of 'raw' storage units, consisting only of (storage_type, storage_key) @type es_flag: boolean @param es_flag: exclusive storage flag @rtype: list of tuples (string, string, list) @return: list of storage units (storage_type, storage_key, params) with the params containing the es_flag for lvm-vg storage units """ result = [] for (storage_type, storage_key) in storage_units: if storage_type in [constants.ST_LVM_VG]: result.append((storage_type, storage_key, [es_flag])) if es_flag: result.append((constants.ST_LVM_PV, storage_key, [es_flag])) else: result.append((storage_type, storage_key, [])) return result def GetExclusiveStorageForNodes(cfg, node_uuids): """Return the exclusive storage flag for all the given nodes. @type cfg: L{config.ConfigWriter} @param cfg: cluster configuration @type node_uuids: list or tuple @param node_uuids: node UUIDs for which to read the flag @rtype: dict @return: mapping from node uuids to exclusive storage flags @raise errors.OpPrereqError: if any given node name has no corresponding node """ getflag = lambda n: _GetExclusiveStorageFlag(cfg, n) flags = map(getflag, node_uuids) return dict(zip(node_uuids, flags)) def PrepareStorageUnitsForNodes(cfg, storage_units, node_uuids): """Return the lvm storage unit for all the given nodes. Main purpose of this function is to map the exclusive storage flag, which can be different for each node, to the default LVM storage unit. @type cfg: L{config.ConfigWriter} @param cfg: cluster configuration @type storage_units: list of pairs (string, string) @param storage_units: list of 'raw' storage units, e.g. pairs of (storage_type, storage_key) @type node_uuids: list or tuple @param node_uuids: node UUIDs for which to read the flag @rtype: dict @return: mapping from node uuids to a list of storage units which include the exclusive storage flag for lvm storage @raise errors.OpPrereqError: if any given node name has no corresponding node """ getunit = lambda n: _AddExclusiveStorageFlagToLvmStorageUnits( storage_units, _GetExclusiveStorageFlag(cfg, n)) flags = map(getunit, node_uuids) return dict(zip(node_uuids, flags)) #: Generic encoders _ENCODERS = { rpc_defs.ED_OBJECT_DICT: _ObjectToDict, rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict, rpc_defs.ED_COMPRESS: _Compress, rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks, rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename, } class RpcRunner(_RpcClientBase, _generated_rpc.RpcClientDefault, _generated_rpc.RpcClientBootstrap, _generated_rpc.RpcClientDnsOnly, _generated_rpc.RpcClientConfig): """RPC runner class. """ def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None): """Initialized the RPC runner. @type cfg: L{config.ConfigWriter} @param cfg: Configuration @type lock_monitor_cb: callable @param lock_monitor_cb: Lock monitor callback """ self._cfg = cfg encoders = _ENCODERS.copy() encoders.update({ # Encoders requiring configuration object rpc_defs.ED_INST_DICT: self._InstDict, rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp, rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp, rpc_defs.ED_NIC_DICT: self._NicDict, rpc_defs.ED_DEVICE_DICT: self._DeviceDict, # Encoders annotating disk parameters rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP, rpc_defs.ED_MULTI_DISKS_DICT_DP: self._MultiDiskDictDP, rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP, rpc_defs.ED_NODE_TO_DISK_DICT_DP: self._EncodeNodeToDiskDictDP, # Encoders with special requirements rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents), rpc_defs.ED_IMPEXP_IO: self._EncodeImportExportIO, }) # Resolver using configuration resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo, cfg.GetAllNodesInfo) # Pylint doesn't recognize multiple inheritance properly, see # <http://www.logilab.org/ticket/36586> and # <http://www.logilab.org/ticket/35642> # pylint: disable=W0233 _RpcClientBase.__init__(self, resolver, encoders.get, lock_monitor_cb=lock_monitor_cb, _req_process_fn=_req_process_fn) _generated_rpc.RpcClientConfig.__init__(self) _generated_rpc.RpcClientBootstrap.__init__(self) _generated_rpc.RpcClientDnsOnly.__init__(self) _generated_rpc.RpcClientDefault.__init__(self) def _NicDict(self, _, nic): """Convert the given nic to a dict and encapsulate netinfo """ n = copy.deepcopy(nic) if n.network: net_uuid = self._cfg.LookupNetwork(n.network) if net_uuid: nobj = self._cfg.GetNetwork(net_uuid) n.netinfo = objects.Network.ToDict(nobj) return n.ToDict() def _DeviceDict(self, _, devinstance): (device, instance) = devinstance if isinstance(device, objects.NIC): return self._NicDict(None, device) elif isinstance(device, objects.Disk): return self._SingleDiskDictDP(None, (device, instance)) def _InstDict(self, node, instance, hvp=None, bep=None, osp=None): """Convert the given instance to a dict. This is done via the instance's ToDict() method and additionally we fill the hvparams with the cluster defaults. @type instance: L{objects.Instance} @param instance: an Instance object @type hvp: dict or None @param hvp: a dictionary with overridden hypervisor parameters @type bep: dict or None @param bep: a dictionary with overridden backend parameters @type osp: dict or None @param osp: a dictionary with overridden os parameters @rtype: dict @return: the instance dict, with the hvparams filled with the cluster defaults """ idict = instance.ToDict() cluster = self._cfg.GetClusterInfo() idict["hvparams"] = cluster.FillHV(instance) idict["secondary_nodes"] = \ self._cfg.GetInstanceSecondaryNodes(instance.uuid) if hvp is not None: idict["hvparams"].update(hvp) idict["beparams"] = cluster.FillBE(instance) if bep is not None: idict["beparams"].update(bep) idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams) if osp is not None: idict["osparams"].update(osp) disks = self._cfg.GetInstanceDisks(instance.uuid) idict["disks_info"] = self._DisksDictDP(node, (disks, instance)) for nic in idict["nics"]: nic["nicparams"] = objects.FillDict( cluster.nicparams[constants.PP_DEFAULT], nic["nicparams"]) network = nic.get("network", None) if network: net_uuid = self._cfg.LookupNetwork(network) if net_uuid: nobj = self._cfg.GetNetwork(net_uuid) nic["netinfo"] = objects.Network.ToDict(nobj) return idict def _InstDictHvpBepDp(self, node, instance_params): """Wrapper for L{_InstDict}. """ (instance, hvp, bep) = instance_params return self._InstDict(node, instance, hvp=hvp, bep=bep) def _InstDictOspDp(self, node, instance_osparams): """Wrapper for L{_InstDict}. """ (instance, osparams) = instance_osparams return self._InstDict(node, instance, osp=osparams) def _DisksDictDP(self, node, instance_disks): """Wrapper for L{AnnotateDiskParams}. """ (disks, instance) = instance_disks diskparams = self._cfg.GetInstanceDiskParams(instance) ret = [] for disk in AnnotateDiskParams(disks, diskparams): disk_node_uuids = disk.GetNodes(instance.primary_node) node_ips = dict((uuid, node.secondary_ip) for (uuid, node) in self._cfg.GetMultiNodeInfo(disk_node_uuids)) disk.UpdateDynamicDiskParams(node, node_ips) ret.append(disk.ToDict(include_dynamic_params=True)) return ret def _MultiDiskDictDP(self, node, disks_insts): """Wrapper for L{AnnotateDiskParams}. Supports a list of (disk, instance) tuples. """ return [disk for disk_inst in disks_insts for disk in self._DisksDictDP(node, disk_inst)] def _SingleDiskDictDP(self, node, instance_disk): """Wrapper for L{AnnotateDiskParams}. """ (disk, instance) = instance_disk anno_disk = self._DisksDictDP(node, ([disk], instance))[0] return anno_disk def _EncodeNodeToDiskDictDP(self, node, value): """Encode dict of node name -> list of (disk, instance) tuples as values. """ return dict((name, [self._SingleDiskDictDP(node, disk) for disk in disks]) for name, disks in value.items()) def _EncodeImportExportIO(self, node, ieinfo): """Encodes import/export I/O information. """ (ieio, ieioargs) = ieinfo if ieio == constants.IEIO_RAW_DISK: assert len(ieioargs) == 2 return (ieio, (self._SingleDiskDictDP(node, ieioargs), )) if ieio == constants.IEIO_SCRIPT: assert len(ieioargs) == 2 return (ieio, (self._SingleDiskDictDP(node, ieioargs[0]), ieioargs[1])) return (ieio, ieioargs) class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue): """RPC wrappers for job queue. """ def __init__(self, _context, address_list): """Initializes this class. """ if address_list is None: resolver = compat.partial(_SsconfResolver, True) else: # Caller provided an address list resolver = _StaticResolver(address_list) _RpcClientBase.__init__(self, resolver, _ENCODERS.get, lock_monitor_cb=lambda _: None) _generated_rpc.RpcClientJobQueue.__init__(self) class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap, _generated_rpc.RpcClientDnsOnly): """RPC wrappers for bootstrapping. """ def __init__(self): """Initializes this class. """ # Pylint doesn't recognize multiple inheritance properly, see # <http://www.logilab.org/ticket/36586> and # <http://www.logilab.org/ticket/35642> # pylint: disable=W0233 _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True), _ENCODERS.get) _generated_rpc.RpcClientBootstrap.__init__(self) _generated_rpc.RpcClientDnsOnly.__init__(self) class DnsOnlyRunner(_RpcClientBase, _generated_rpc.RpcClientDnsOnly): """RPC wrappers for calls using only DNS. """ def __init__(self): """Initialize this class. """ _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False), _ENCODERS.get) _generated_rpc.RpcClientDnsOnly.__init__(self) class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig): """RPC wrappers for L{config}. """ def __init__(self, _context, address_list, _req_process_fn=None, _getents=None): """Initializes this class. """ lock_monitor_cb = None if address_list is None: resolver = compat.partial(_SsconfResolver, True) else: # Caller provided an address list resolver = _StaticResolver(address_list) encoders = _ENCODERS.copy() encoders.update({ rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents), }) _RpcClientBase.__init__(self, resolver, encoders.get, lock_monitor_cb=lock_monitor_cb, _req_process_fn=_req_process_fn) _generated_rpc.RpcClientConfig.__init__(self)
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.cloud.dataflow_v1beta3.types import snapshots from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import SnapshotsV1Beta3Transport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import SnapshotsV1Beta3GrpcAsyncIOTransport from .client import SnapshotsV1Beta3Client class SnapshotsV1Beta3AsyncClient: """Provides methods to manage snapshots of Google Cloud Dataflow jobs. """ _client: SnapshotsV1Beta3Client DEFAULT_ENDPOINT = SnapshotsV1Beta3Client.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = SnapshotsV1Beta3Client.DEFAULT_MTLS_ENDPOINT common_billing_account_path = staticmethod( SnapshotsV1Beta3Client.common_billing_account_path ) parse_common_billing_account_path = staticmethod( SnapshotsV1Beta3Client.parse_common_billing_account_path ) common_folder_path = staticmethod(SnapshotsV1Beta3Client.common_folder_path) parse_common_folder_path = staticmethod( SnapshotsV1Beta3Client.parse_common_folder_path ) common_organization_path = staticmethod( SnapshotsV1Beta3Client.common_organization_path ) parse_common_organization_path = staticmethod( SnapshotsV1Beta3Client.parse_common_organization_path ) common_project_path = staticmethod(SnapshotsV1Beta3Client.common_project_path) parse_common_project_path = staticmethod( SnapshotsV1Beta3Client.parse_common_project_path ) common_location_path = staticmethod(SnapshotsV1Beta3Client.common_location_path) parse_common_location_path = staticmethod( SnapshotsV1Beta3Client.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: SnapshotsV1Beta3AsyncClient: The constructed client. """ return SnapshotsV1Beta3Client.from_service_account_info.__func__(SnapshotsV1Beta3AsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: SnapshotsV1Beta3AsyncClient: The constructed client. """ return SnapshotsV1Beta3Client.from_service_account_file.__func__(SnapshotsV1Beta3AsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @classmethod def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[ClientOptions] = None ): """Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the client cert source is None. (2) if `client_options.client_cert_source` is provided, use the provided one; if the default client cert source exists, use the default one; otherwise the client cert source is None. The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the default mTLS endpoint; if the environment variabel is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. More details can be found at https://google.aip.dev/auth/4114. Args: client_options (google.api_core.client_options.ClientOptions): Custom options for the client. Only the `api_endpoint` and `client_cert_source` properties may be used in this method. Returns: Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ return SnapshotsV1Beta3Client.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> SnapshotsV1Beta3Transport: """Returns the transport used by the client instance. Returns: SnapshotsV1Beta3Transport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(SnapshotsV1Beta3Client).get_transport_class, type(SnapshotsV1Beta3Client) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, SnapshotsV1Beta3Transport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the snapshots v1 beta3 client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.SnapshotsV1Beta3Transport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = SnapshotsV1Beta3Client( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def get_snapshot( self, request: Union[snapshots.GetSnapshotRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> snapshots.Snapshot: r"""Gets information about a snapshot. .. code-block:: python from google.cloud import dataflow_v1beta3 def sample_get_snapshot(): # Create a client client = dataflow_v1beta3.SnapshotsV1Beta3Client() # Initialize request argument(s) request = dataflow_v1beta3.GetSnapshotRequest( ) # Make the request response = client.get_snapshot(request=request) # Handle the response print(response) Args: request (Union[google.cloud.dataflow_v1beta3.types.GetSnapshotRequest, dict]): The request object. Request to get information about a snapshot retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dataflow_v1beta3.types.Snapshot: Represents a snapshot of a job. """ # Create or coerce a protobuf request object. request = snapshots.GetSnapshotRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_snapshot, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def delete_snapshot( self, request: Union[snapshots.DeleteSnapshotRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> snapshots.DeleteSnapshotResponse: r"""Deletes a snapshot. .. code-block:: python from google.cloud import dataflow_v1beta3 def sample_delete_snapshot(): # Create a client client = dataflow_v1beta3.SnapshotsV1Beta3Client() # Initialize request argument(s) request = dataflow_v1beta3.DeleteSnapshotRequest( ) # Make the request response = client.delete_snapshot(request=request) # Handle the response print(response) Args: request (Union[google.cloud.dataflow_v1beta3.types.DeleteSnapshotRequest, dict]): The request object. Request to delete a snapshot. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dataflow_v1beta3.types.DeleteSnapshotResponse: Response from deleting a snapshot. """ # Create or coerce a protobuf request object. request = snapshots.DeleteSnapshotRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_snapshot, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def list_snapshots( self, request: Union[snapshots.ListSnapshotsRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> snapshots.ListSnapshotsResponse: r"""Lists snapshots. .. code-block:: python from google.cloud import dataflow_v1beta3 def sample_list_snapshots(): # Create a client client = dataflow_v1beta3.SnapshotsV1Beta3Client() # Initialize request argument(s) request = dataflow_v1beta3.ListSnapshotsRequest( ) # Make the request response = client.list_snapshots(request=request) # Handle the response print(response) Args: request (Union[google.cloud.dataflow_v1beta3.types.ListSnapshotsRequest, dict]): The request object. Request to list snapshots. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dataflow_v1beta3.types.ListSnapshotsResponse: List of snapshots. """ # Create or coerce a protobuf request object. request = snapshots.ListSnapshotsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_snapshots, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-dataflow-client", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("SnapshotsV1Beta3AsyncClient",)