text
stringlengths
4
1.02M
meta
dict
from testnado import AuthenticatedFetchCase from tornado.testing import AsyncHTTPTestCase try: import urlparse except ImportError: import urllib.parse as urlparse class HandlerTestCase(AuthenticatedFetchCase, AsyncHTTPTestCase): def assert_redirected_path_equals(self, expected_path, response): if "Location" not in response.headers: self.fail("Response does not have a 'Location' header.") location = response.headers["Location"] path = urlparse.urlparse(location).path self.assertEqual(expected_path, path)
{ "content_hash": "afe06960d0947d88b9aaa5981644f9d1", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 69, "avg_line_length": 33.470588235294116, "alnum_prop": 0.7381370826010545, "repo_name": "joshmarshall/testnado", "id": "84a6fe07eec280a28ff5fda8c429fe9ea74bcb5e", "size": "569", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "testnado/handler_test_case.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "38446" } ], "symlink_target": "" }
from cms.api import add_plugin, create_page from cms.models import Page from cms.plugin_pool import plugin_pool from cms.test_utils.project.pluginapp.plugins.caching.cms_plugins import NoCachePlugin, SekizaiPlugin from cms.test_utils.testcases import CMSTestCase from cms.test_utils.util.context_managers import SettingsOverride from cms.test_utils.util.fuzzy_int import FuzzyInt from cms.toolbar.toolbar import CMSToolbar from cms.utils import get_cms_setting from django.core.cache import cache from django.db import connection from django.template import Template, RequestContext from django.conf import settings from cms.views import _get_cache_version class CacheTestCase(CMSTestCase): def tearDown(self): cache.clear() def test_cache_placeholder(self): template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}") page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) placeholder = page1.placeholders.filter(slot="body")[0] add_plugin(placeholder, "TextPlugin", 'en', body="English") add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch") request = self.get_request('/en/') request.current_page = Page.objects.get(pk=page1.pk) request.toolbar = CMSToolbar(request) rctx = RequestContext(request) with self.assertNumQueries(3): template.render(rctx) connection.queries = [] request = self.get_request('/en/') request.current_page = Page.objects.get(pk=page1.pk) request.toolbar = CMSToolbar(request) request.toolbar.edit_mode = False rctx = RequestContext(request) template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}") with self.assertNumQueries(1): template.render(rctx) # toolbar request = self.get_request('/en/') request.current_page = Page.objects.get(pk=page1.pk) request.toolbar = CMSToolbar(request) request.toolbar.edit_mode = True template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}") rctx = RequestContext(request) with self.assertNumQueries(3): template.render(rctx) page1.publish('en') cache.clear() exclude = [ 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware' ] middleware = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude] with SettingsOverride(CMS_PAGE_CACHE=False, MIDDLEWARE_CLASSES=middleware): with self.assertNumQueries(FuzzyInt(14, 18)): self.client.get('/en/') with self.assertNumQueries(FuzzyInt(7, 11)): self.client.get('/en/') with SettingsOverride(CMS_PAGE_CACHE=False, MIDDLEWARE_CLASSES=middleware, CMS_PLACEHOLDER_CACHE=False): with self.assertNumQueries(FuzzyInt(9, 13)): self.client.get('/en/') def test_no_cache_plugin(self): page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) placeholder1 = page1.placeholders.filter(slot="body")[0] placeholder2 = page1.placeholders.filter(slot="right-column")[0] plugin_pool.register_plugin(NoCachePlugin) add_plugin(placeholder1, "TextPlugin", 'en', body="English") add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch") request = self.get_request('/en/') request.current_page = Page.objects.get(pk=page1.pk) request.toolbar = CMSToolbar(request) template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}") rctx = RequestContext(request) with self.assertNumQueries(3): template.render(rctx) request = self.get_request('/en/') request.current_page = Page.objects.get(pk=page1.pk) request.toolbar = CMSToolbar(request) template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}") rctx = RequestContext(request) with self.assertNumQueries(1): template.render(rctx) add_plugin(placeholder1, "NoCachePlugin", 'en') page1.publish('en') request = self.get_request('/en/') request.current_page = Page.objects.get(pk=page1.pk) request.toolbar = CMSToolbar(request) template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}") rctx = RequestContext(request) with self.assertNumQueries(4): render = template.render(rctx) with self.assertNumQueries(FuzzyInt(14, 18)): response = self.client.get('/en/') resp1 = response.content.decode('utf8').split("$$$")[1] request = self.get_request('/en/') request.current_page = Page.objects.get(pk=page1.pk) request.toolbar = CMSToolbar(request) template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}") rctx = RequestContext(request) with self.assertNumQueries(4): render2 = template.render(rctx) with self.assertNumQueries(FuzzyInt(10, 14)): response = self.client.get('/en/') resp2 = response.content.decode('utf8').split("$$$")[1] self.assertNotEqual(render, render2) self.assertNotEqual(resp1, resp2) plugin_pool.unregister_plugin(NoCachePlugin) def test_cache_page(self): # Clear the entire cache for a clean slate cache.clear() # Ensure that we're testing in an environment WITHOUT the MW cache... exclude = [ 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware' ] mw_classes = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude] with SettingsOverride(MIDDLEWARE_CLASSES=mw_classes): # Silly to do these tests if this setting isn't True page_cache_setting = get_cms_setting('PAGE_CACHE') self.assertTrue(page_cache_setting) # Create a test page page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) # Add some content placeholder = page1.placeholders.filter(slot="body")[0] add_plugin(placeholder, "TextPlugin", 'en', body="English") add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch") # Create a request object request = self.get_request(page1.get_path(), 'en') # Ensure that user is NOT authenticated self.assertFalse(request.user.is_authenticated()) # Test that the page is initially uncached with self.assertNumQueries(FuzzyInt(1, 20)): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) # # Test that subsequent requests of the same page are cached by # asserting that they require fewer queries. # with self.assertNumQueries(0): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) # # Test that the cache is invalidated on unpublishing the page # old_version = _get_cache_version() page1.unpublish('en') self.assertGreater(_get_cache_version(), old_version) # # Test that this means the page is actually not cached. # page1.publish('en') with self.assertNumQueries(FuzzyInt(1, 20)): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) # # Test that the above behavior is different when CMS_PAGE_CACHE is # set to False (disabled) # cache.clear() with SettingsOverride(CMS_PAGE_CACHE=False): # Test that the page is initially uncached with self.assertNumQueries(FuzzyInt(1, 20)): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) # # Test that subsequent requests of the same page are still requires DB # access. # with self.assertNumQueries(FuzzyInt(1, 20)): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) def test_invalidate_restart(self): # Clear the entire cache for a clean slate cache.clear() # Ensure that we're testing in an environment WITHOUT the MW cache... exclude = [ 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware' ] mw_classes = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude] with SettingsOverride(MIDDLEWARE_CLASSES=mw_classes): # Silly to do these tests if this setting isn't True page_cache_setting = get_cms_setting('PAGE_CACHE') self.assertTrue(page_cache_setting) # Create a test page page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) # Add some content placeholder = page1.placeholders.filter(slot="body")[0] add_plugin(placeholder, "TextPlugin", 'en', body="English") add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch") # Create a request object request = self.get_request(page1.get_path(), 'en') # Ensure that user is NOT authenticated self.assertFalse(request.user.is_authenticated()) # Test that the page is initially uncached with self.assertNumQueries(FuzzyInt(1, 20)): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) # # Test that subsequent requests of the same page are cached by # asserting that they require fewer queries. # with self.assertNumQueries(0): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) old_plugins = plugin_pool.plugins plugin_pool.clear() plugin_pool.discover_plugins() plugin_pool.plugins = old_plugins with self.assertNumQueries(FuzzyInt(1, 20)): response = self.client.get('/en/') self.assertEqual(response.status_code, 200) def test_sekizai_plugin(self): page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) placeholder1 = page1.placeholders.filter(slot="body")[0] placeholder2 = page1.placeholders.filter(slot="right-column")[0] plugin_pool.register_plugin(SekizaiPlugin) add_plugin(placeholder1, "SekizaiPlugin", 'en') add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch") page1.publish('en') response = self.client.get('/en/') self.assertContains(response, 'alert(') response = self.client.get('/en/') self.assertContains(response, 'alert(')
{ "content_hash": "3433538e50c2efd707864bf082896ab9", "timestamp": "", "source": "github", "line_count": 267, "max_line_length": 112, "avg_line_length": 43.344569288389515, "alnum_prop": 0.6083124513954895, "repo_name": "intgr/django-cms", "id": "6b9f27a04913448e0a9bcc212a8228278ce92581", "size": "11597", "binary": false, "copies": "3", "ref": "refs/heads/develop", "path": "cms/tests/cache.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "88130" }, { "name": "JavaScript", "bytes": "408675" }, { "name": "Python", "bytes": "2863951" }, { "name": "Ruby", "bytes": "990" }, { "name": "Shell", "bytes": "1383" }, { "name": "XSLT", "bytes": "5122" } ], "symlink_target": "" }
""" Test nonlocal uses and unused-variable. """ __revision__ = 1 def test_nonlocal(): """ Test that assigning to a nonlocal does not trigger an 'unused-variable' warnings. """ attr = True def set_value(val): """ Set the value in a nonlocal. """ nonlocal attr attr = val return set_value
{ "content_hash": "1be1bdb895db2c0228f74d5dcfef185e", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 58, "avg_line_length": 24.071428571428573, "alnum_prop": 0.5934718100890207, "repo_name": "Titulacion-Sistemas/PythonTitulacion-EV", "id": "ffcc9783f9a38f2a37d8f425fe19ccd30d576875", "size": "337", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "Lib/site-packages/pylint/test/input/func_noerror_unused_variable_py30.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "2117" }, { "name": "C", "bytes": "469338" }, { "name": "C++", "bytes": "93276" }, { "name": "CSS", "bytes": "173812" }, { "name": "JavaScript", "bytes": "203291" }, { "name": "PowerShell", "bytes": "8104" }, { "name": "Python", "bytes": "17198855" }, { "name": "Shell", "bytes": "2237" }, { "name": "TeX", "bytes": "1527" }, { "name": "Visual Basic", "bytes": "904" }, { "name": "XSLT", "bytes": "154751" } ], "symlink_target": "" }
"""Top-level namespace for spm.""" from nipype.interfaces.spm.base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from nipype.interfaces.spm.preprocess import (SliceTiming, Realign, Coregister, Normalize, Segment, Smooth, NewSegment) from nipype.interfaces.spm.model import (Level1Design, EstimateModel, EstimateContrast, OneSampleTTest, TwoSampleTTest, MultipleRegression, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign )
{ "content_hash": "def3a84135e5c8ec39fa07b1fee790a3", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 84, "avg_line_length": 59, "alnum_prop": 0.47231638418079097, "repo_name": "satra/NiPypeold", "id": "f948a87755308826be628a35f2246515042a6555", "size": "999", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nipype/interfaces/spm/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "931" }, { "name": "Objective-C", "bytes": "4736" }, { "name": "Python", "bytes": "1389618" }, { "name": "Tcl", "bytes": "43377" } ], "symlink_target": "" }
from peewee import * from playhouse.sqlite_ext import SqliteExtDatabase db = SqliteExtDatabase('store/virus_manager.db', threadlocals=True) class BaseModel(Model): class Meta: database = db class ManagedMachine(BaseModel): image_name = TextField(unique=True) reference_image = TextField() creation_time = IntegerField() class Infection(BaseModel): name = TextField() machine = ForeignKeyField(ManagedMachine, related_name='infections') db.create_tables([ManagedMachine, Infection], True)
{ "content_hash": "be8de72ccfdf7cac89a046c6ff3af477", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 51, "avg_line_length": 23.434782608695652, "alnum_prop": 0.7235621521335807, "repo_name": "nsgomez/vboxmanager", "id": "53c518786997eb30dcc18cd3cb48f487eff25bfb", "size": "539", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "models.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "31" }, { "name": "CSS", "bytes": "788" }, { "name": "Python", "bytes": "23660" } ], "symlink_target": "" }
""" :codeauthor: Jayesh Kariya <jayeshk@saltstack.com> """ import salt.states.incron as incron from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, patch from tests.support.unit import TestCase class IncronTestCase(TestCase, LoaderModuleMockMixin): """ Test cases for salt.states.incron """ def setup_loader_modules(self): return {incron: {}} # 'present' function tests: 1 def test_present(self): """ Test to verifies that the specified incron job is present for the specified user. """ name = "salt" path = "/home/user" mask = "IN_MODIFY" cmd = 'echo "$$ $@"' ret = {"name": name, "result": None, "comment": "", "changes": {}} comt4 = "Incron {} for user root failed to commit with error \nabsent".format( name ) mock_dict = MagicMock( return_value={"crons": [{"path": path, "cmd": cmd, "mask": mask}]} ) mock = MagicMock(side_effect=["present", "new", "updated", "absent"]) with patch.dict( incron.__salt__, {"incron.list_tab": mock_dict, "incron.set_job": mock} ): with patch.dict(incron.__opts__, {"test": True}): comt = "Incron {} is set to be added".format(name) ret.update({"comment": comt}) self.assertDictEqual(incron.present(name, path, mask, cmd), ret) with patch.dict(incron.__opts__, {"test": False}): comt = "Incron {} already present".format(name) ret.update({"comment": comt, "result": True}) self.assertDictEqual(incron.present(name, path, mask, cmd), ret) comt = "Incron {} added to root's incrontab".format(name) ret.update({"comment": comt, "changes": {"root": "salt"}}) self.assertDictEqual(incron.present(name, path, mask, cmd), ret) comt = "Incron {} updated".format(name) ret.update({"comment": comt}) self.assertDictEqual(incron.present(name, path, mask, cmd), ret) ret.update({"comment": comt4, "result": False, "changes": {}}) self.assertDictEqual(incron.present(name, path, mask, cmd), ret) # 'absent' function tests: 1 def test_absent(self): """ Test to verifies that the specified incron job is absent for the specified user. """ name = "salt" path = "/home/user" mask = "IN_MODIFY" cmd = 'echo "$$ $@"' ret = {"name": name, "result": True, "comment": "", "changes": {}} comt4 = "Incron {} for user root failed to commit with error new".format(name) mock_dict = MagicMock( return_value={"crons": [{"path": path, "cmd": cmd, "mask": mask}]} ) mock = MagicMock(side_effect=["absent", "removed", "new"]) with patch.dict( incron.__salt__, {"incron.list_tab": mock_dict, "incron.rm_job": mock} ): with patch.dict(incron.__opts__, {"test": True}): comt = "Incron {} is absent".format(name) ret.update({"comment": comt}) self.assertDictEqual(incron.absent(name, path, mask, cmd), ret) with patch.dict(incron.__opts__, {"test": False}): comt = "Incron {} already absent".format(name) ret.update({"comment": comt, "result": True}) self.assertDictEqual(incron.absent(name, path, mask, cmd), ret) comt = "Incron {} removed from root's crontab".format(name) ret.update({"comment": comt, "changes": {"root": "salt"}}) self.assertDictEqual(incron.absent(name, path, mask, cmd), ret) ret.update({"comment": comt4, "result": False, "changes": {}}) self.assertDictEqual(incron.absent(name, path, mask, cmd), ret)
{ "content_hash": "b8c02d456d25dfe1ee197ef0e2e2cc17", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 86, "avg_line_length": 39.495049504950494, "alnum_prop": 0.546001504136375, "repo_name": "saltstack/salt", "id": "066676567ca74e102014eacc08b3166cc44751aa", "size": "3989", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/unit/states/test_incron.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "14911" }, { "name": "C", "bytes": "1571" }, { "name": "Cython", "bytes": "1458" }, { "name": "Dockerfile", "bytes": "184" }, { "name": "Groovy", "bytes": "12318" }, { "name": "HCL", "bytes": "257" }, { "name": "HTML", "bytes": "8031" }, { "name": "Jinja", "bytes": "45598" }, { "name": "Makefile", "bytes": "713" }, { "name": "NSIS", "bytes": "76572" }, { "name": "PowerShell", "bytes": "75891" }, { "name": "Python", "bytes": "41444811" }, { "name": "Rich Text Format", "bytes": "6242" }, { "name": "Roff", "bytes": "191" }, { "name": "Ruby", "bytes": "961" }, { "name": "SaltStack", "bytes": "35856" }, { "name": "Scheme", "bytes": "895" }, { "name": "Scilab", "bytes": "1147" }, { "name": "Shell", "bytes": "524917" } ], "symlink_target": "" }
""" Personal deployment (same host) settings Data is kept in personal home directory area. Service configuration is kept under personal home directory. This is also useful for non-system deployment on a shared host. """ from __future__ import unicode_literals from __future__ import absolute_import, division, print_function from .common import * ANNALIST_VERSION_MSG = "Annalist version %s (personal configuration)"%(ANNALIST_VERSION) SETTINGS_MODULE = __name__ SITE_DIR_NAME = "annalist_site" BASE_DATA_DIR = os.path.expanduser("~") BASE_SITE_DIR = os.path.join(BASE_DATA_DIR, SITE_DIR_NAME) CONFIG_BASE = os.path.join(os.path.expanduser("~"), ".annalist/") STATIC_ROOT = os.path.join(BASE_SITE_DIR, 'static') BASE_LOG_DIR = BASE_SITE_DIR+"/" ANNALIST_LOG_PATH = BASE_LOG_DIR+ANNALIST_LOG_FILE ACCESS_LOG_PATH = BASE_LOG_DIR+ACCESS_LOG_FILE ERROR_LOG_PATH = BASE_LOG_DIR+ERROR_LOG_FILE DATABASE_PATH = os.path.join(BASE_SITE_DIR, 'db.sqlite3') DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': DATABASE_PATH, } } # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = ['*'] # Insecure: use e.g. ['.annalist.net'] LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'timed': { 'format': '%(levelname)s %(asctime)s %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { # Include the default Django email handler for errors # This is what you'd get without configuring logging at all. 'mail_admins': { 'class': 'django.utils.log.AdminEmailHandler', 'level': 'ERROR', # But the emails are plain text by default - HTML is nicer 'include_html': True, 'formatter': 'verbose' }, # Log to a text file that can be rotated by logrotate 'logfile': { # 'class': 'logging.handlers.WatchedFileHandler', # 'class': 'logging.handlers.RotatingFileHandler', 'class': 'annalist_site.settings.common.RotatingNewFileHandler', 'filename': ANNALIST_LOG_PATH, 'maxBytes': 2*1024*1024, # 2Mb 'backupCount': 9, # Keep 9 files 'level': TRACE_FIELD_VALUE, 'formatter': 'timed' }, }, 'loggers': { # Again, default Django configuration to email unhandled exceptions # 'django.request': { # 'handlers': ['mail_admins'], # 'level': 'ERROR', # 'propagate': True, # }, 'django.request': { 'handlers': ['logfile'], 'level': 'INFO', 'propagate': True, }, # Might as well log any errors anywhere else in Django 'django': { 'handlers': ['logfile'], 'level': 'INFO', 'propagate': False, }, 'annalist_root': { 'handlers': ['logfile'], 'level': 'INFO', # Or maybe INFO or DEBUG 'propagate': False }, 'annalist_site': { 'handlers': ['logfile'], 'level': 'INFO', # Or maybe INFO or DEBUG 'propagate': False }, 'annalist': { 'handlers': ['logfile'], 'level': TRACE_FIELD_VALUE, # Or maybe INFO or DEBUG 'propagate': False }, 'login': { 'handlers': ['logfile'], 'level': 'INFO', # Or maybe INFO or DEBUG 'propagate': False }, }, } import logging log = logging.getLogger(__name__) log.info("Annalist starting...") # Force new log files for any rotating file log handlers for h in log.handlers: log.info("@@ log handler %r"%(h,)) if isinstance(h, logging.handlers.RotatingFileHandler): log.info("@@ log rollover") h.doRollover() # log.info("Annalist version %s (personal configuration)"%(ANNALIST_VERSION)) log.info(ANNALIST_VERSION_MSG) # For development/testing: don't log SECRET_KEY in production! # log.info("SECRET_KEY: "+SECRET_KEY) log.debug("SETTINGS_MODULE: "+SETTINGS_MODULE) log.debug("BASE_DATA_DIR: "+BASE_DATA_DIR) log.debug("BASE_SITE_DIR: "+BASE_SITE_DIR) log.debug("CONFIG_BASE: "+CONFIG_BASE) log.debug("DJANGO_ROOT: "+DJANGO_ROOT) log.debug("SITE_CONFIG_DIR: "+SITE_CONFIG_DIR) log.debug("SITE_SRC_ROOT: "+SITE_SRC_ROOT) log.debug("STATICFILES_DIRS: "+repr(STATICFILES_DIRS)) log.debug("DB PATH: "+DATABASES['default']['NAME']) log.debug("ALLOWED_HOSTS: "+",".join(ALLOWED_HOSTS)) log.debug("ANNALIST_LOG_PATH: "+ANNALIST_LOG_PATH) log.debug("TRACE_FIELD_VALUE: "+str(TRACE_FIELD_VALUE)) # End.
{ "content_hash": "74f65bb6b1ddad565dfc14c53b835b33", "timestamp": "", "source": "github", "line_count": 145, "max_line_length": 95, "avg_line_length": 34.813793103448276, "alnum_prop": 0.5671553090332805, "repo_name": "gklyne/annalist", "id": "4d16df8f80376a1884c4f6f9154265db7bac983a", "size": "5048", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/annalist_root/annalist_site/settings/personal.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "295504" }, { "name": "Dockerfile", "bytes": "2276" }, { "name": "HTML", "bytes": "160550" }, { "name": "Haskell", "bytes": "8403" }, { "name": "JavaScript", "bytes": "3127" }, { "name": "Makefile", "bytes": "3312" }, { "name": "Python", "bytes": "4767305" }, { "name": "Shell", "bytes": "71836" }, { "name": "TeX", "bytes": "131682" } ], "symlink_target": "" }
import base64 import binascii import bz2 import logging import plistlib import re from typing import Any, Union, Dict from xml.parsers.expat import ExpatError Plist = Dict[str, Any] Text = Union[str, bytes] logger = logging.getLogger(__name__) def class_to_title(text): return re.sub(r'([a-z](?=[A-Z])|[A-Z](?=[A-Z][a-z]))', r'\1 ', text) def safe_text(text: Any) -> str: """Process text of any type to ensure it can be saved in the DB.""" # Ensure bytes get decoded, no matter what, to unicode. if isinstance(text, bytes): text = text.decode('UTF-8', errors='replace') elif not isinstance(text, str): text = str(text) # Replace null characters with nothing to prevent db errors. return text.replace('\x00', '') def stringify(data): """Sanitize collection data into a string format for db storage. Args: data (str, bool, numeric, dict, list): Values to squash into strings. Returns: list data returns as a comma separated string or '{EMPTY}' if the list is empty. All other data types are `str()` converted, including nested collections in a list. """ if isinstance(data, list): return ", ".join(str(i) for i in data) if data else "{EMPTY}" # Handle dict, int, float, bool values. return str(data) def decode_submission_data(data: Text, compression: str = '') -> bytes: """Returns bytes from compressed or base64 encoded text. Sal submissions include plist data that has been base64 encoded and bz2 coompressed. Historically, this data may also have been passed as just text, or only base64 encoded. This function first decodes from base64 to bytes. Then it will perforom bz2 decompression if requested. As this data is normally then parsed through plistlib, the data is returned as bytes under all circumstances. Args: data (str, bytes): Data to decode. May just be a regular string! compression (str): Type of encoding and compression. Defaults to empty str. Uses substrings to determine what to do: 'base64' results in first base64 decoding. 'bz2' results in bz2.decompression. Returns: The decoded, decompressed bytes, or b'' if there were exceptions. """ if 'base64' in compression: try: data = base64.b64decode(data) except (TypeError, binascii.Error): logger.warning("Submission data failed base 64 decoding: '%s'", data) data = b'' if 'bz2' in compression: try: data = bz2.decompress(data) except IOError: logger.warning("Submission data failed decompression: '%s'", data) data = b'' # Make sure we're returning bytes, even if the compression # arg is empty. if isinstance(data, str): data = data.encode() return data def submission_plist_loads(data: Text, compression: str = '') -> Plist: if compression: data = decode_submission_data(data, compression) if isinstance(data, str): data = data.encode() try: plist = plistlib.loads(data) except (plistlib.InvalidFileException, ExpatError): logger.warning("Submission data failed plist deserialization: '%s'", data) plist = {} return plist def is_valid_plist(data: Text) -> bool: if isinstance(data, str): data = data.encode() try: plistlib.loads(data) return True except (plistlib.InvalidFileException, ExpatError): return False
{ "content_hash": "7db01024e7cb4102e84e1cd4270e2b9c", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 82, "avg_line_length": 30.218487394957982, "alnum_prop": 0.639599555061179, "repo_name": "sheagcraig/sal", "id": "c0766843e1a63ab189b202e66a6382107b838d54", "size": "3596", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "utils/text_utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "237900" }, { "name": "Dockerfile", "bytes": "2210" }, { "name": "HTML", "bytes": "152149" }, { "name": "JavaScript", "bytes": "275632" }, { "name": "Makefile", "bytes": "2208" }, { "name": "Python", "bytes": "601593" }, { "name": "Shell", "bytes": "4539" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('category', models.CharField(blank=True, max_length=50)), ('date_time', models.DateTimeField(auto_now_add=True)), ('content', models.TextField(blank=True, null=True)), ], options={ 'ordering': ['-date_time'], }, ), ]
{ "content_hash": "70d234571baeaf97addf3d497632f6a9", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 114, "avg_line_length": 28.814814814814813, "alnum_prop": 0.5398457583547558, "repo_name": "tongxindao/shiyanlou", "id": "7428df6b41cced23530f56001e6293bad3b443e1", "size": "851", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "shiyanlou_cs803/my_blog/article/migrations/0001_initial.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "265212" }, { "name": "C++", "bytes": "686" }, { "name": "CSS", "bytes": "261341" }, { "name": "HTML", "bytes": "945024" }, { "name": "Java", "bytes": "115" }, { "name": "JavaScript", "bytes": "475129" }, { "name": "Makefile", "bytes": "750" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "529824" }, { "name": "Shell", "bytes": "384" } ], "symlink_target": "" }
import functools import shlex from namesync.packages import six class RecordParseError(Exception): pass DEFAULT_PRIORITY = 10 @functools.total_ordering class Record(object): def __init__(self, type, name, content, priority=None, ttl=None, data=None): self.type = type self.name = name self.content = content self.priority = str(priority) if priority else None self.ttl = str(ttl) if ttl else None self.data = data def format(self, max_name_len=None): max_type_len = 5 max_name_len = max_name_len or len(self.name) components = [ self.type.ljust(max_type_len), self.name.ljust(max_name_len), self.quoted_content, ] if self.output_priority: components.append(self.priority or DEFAULT_PRIORITY) if self.output_ttl: components.append(self.ttl) return ' '.join(components) def __str__(self): return self.format() def __eq__(self, other): return all([ self.type == other.type, self.name == other.name, self.content == other.content, self.priority == other.priority, self.ttl == other.ttl, ]) def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): return (self.type, self.name) < (other.type, other.name) @classmethod def parse(cls, string): components = shlex.split(string, comments=True) if len(components) == 0: return None if len(components) < 3: raise RecordParseError(string) record = cls( type=components[0], name=components[1], content=components[2], ) if len(components) > 3: if record.type == 'MX': record.priority = components[3] else: record.ttl = components[3] if len(components) > 4 and record.type == 'MX': record.ttl = components[4] elif len(components) > 5: raise RecordParseError(string) return record @property def uses_priority(self): return self.type == 'MX' @property def output_priority(self): return self.uses_priority and (self.priority is not None or self.output_ttl) @property def output_ttl(self): return self.ttl is not None @property def quoted_content(self): return repr(str(self.content)) if ' ' in self.content else self.content def rreplace(s, old, new, maxreplace=None): if maxreplace is None: other = s.rsplit(old) else: other = s.rsplit(old, maxreplace) return new.join(other) def short_name(zone, name): display_name = rreplace(name, zone, '', 1) display_name = display_name.rstrip('.') return '.' if display_name == '' else display_name def full_name(zone, name): return zone if name == '.' else '{}.{}'.format(name, zone) def flatfile_to_records(file): records = [] for line in file: record = Record.parse(line) if record: records.append(record) records.sort() return records def records_to_flatfile(records, file): max_type_len = 5 max_name_len = max(len(record.name) for record in records) + 4 for record in records: file.write(record.format(max_name_len)) file.write('\n') def make_records_map(records): record_map = {} for record in records: key = (record.type, record.name) record_map.setdefault(key, {})[record.content] = record return record_map def diff_records(old, new): add = [] update = [] remove = [] old_map = make_records_map(old) new_map = make_records_map(new) for key, new_content_map in six.iteritems(new_map): if key not in old_map: add.extend(new_content_map.values()) else: old_content_map = old_map[key] if len(new_content_map) == len(old_content_map) == 1: new_record = six.next(six.itervalues(new_content_map)) old_record = six.next(six.itervalues(old_content_map)) if new_record != old_record: new_record.data = old_record.data update.append(new_record) else: for content, new_record in six.iteritems(new_content_map): if content in old_content_map: old_record = old_content_map[content] if new_record != old_record: new_record.data = old_record.data update.append(new_record) else: add.append(new_record) for key, old_content_map in six.iteritems(old_map): if key not in new_map: remove.extend(old_content_map.values()) else: new_content_map = new_map[key] if not (len(new_content_map) == len(old_content_map) == 1): for content, old_record in six.iteritems(old_content_map): if content not in new_content_map: remove.append(old_record) add.sort() update.sort() remove.sort() return { 'add': add, 'update': update, 'remove': remove, }
{ "content_hash": "4927228176f7450e88496959c0f22b0a", "timestamp": "", "source": "github", "line_count": 189, "max_line_length": 84, "avg_line_length": 28.544973544973544, "alnum_prop": 0.5564411492122335, "repo_name": "dnerdy/namesync", "id": "24f34e04905e0ed1ff10dd4e107b214db3ee1668", "size": "5395", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "namesync/records.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "896733" }, { "name": "Shell", "bytes": "294" } ], "symlink_target": "" }
import threading import struct import sys import re import socket import json from websocket import create_connection #THIS CLASS IS DEFINITELY NOT COMPLETE. TODO: FINISH. class VoiceGateway(threading.Thread): def __init__(self, serverId, userId, sessionId, token, endpoint): threading.Thread.__init__(self) self.serverId = serverId self.userId = userId self.sessionId = sessionId self.token = token self.endpoint = endpoint self.opCodes = self.OPCodes(self) self.endpoint = endpoint.split(":")[0] self.websocketAddress = "wss://" + endpoint.split(":")[0] self.port = int(endpoint.split(":")[1]) #self.ws = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: #self.ws.connect((self.ipAddress, self.port)) self.ws = create_connection(self.websocketAddress) self.opCodes.receive() #message = self.ws.recv() self.sendIdentify() self.opCodes.receive() self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.connect((self.endpoint, self.port)) self.ip, self.port = self.ipDiscovery() self.sendSelectProtocol() self.opCodes.receive() self.sendFrame() while True: print "Listening" data = self.socket.recvfrom(1024) print data #self.ws.close() #print "New socket" #self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #print "Connecting " + self.ip + ":" + self.port #self.socket.connect((self.ip, self.port)) #packet = bytearray(70) #self.socket.sendto(packet, (self.ip, self.port)) #print "Receive" #data = self.socket.recvfrom(1024) #print data #while True: # self.opCodes.receive() # #message = self.ws.recv() # #print message except Exception, e: print e; def sendFrame(self): #According to the discord API, we need to write a custom header #https://discordapp.com/developers/docs/topics/voice-connections#encrypting-and-sending-voice headerSize = 24 header = bytearray(headerSize) struct.pack_into(">s", header, 0, "0x80") struct.pack_into(">s", header, 1, "0x78") struct.pack_into(">H", header, 2, 1) struct.pack_into(">I", header, 4, 1) struct.pack_into(">i", header, 4, self.ssrc) self.socket.send(header + "hello") def sendIdentify(self): identify = { "op":0, "d": { "server_id": str(self.serverId), "user_id": str(self.userId), "session_id": str(self.sessionId), "token": str(self.token), } }; self.ws.send(json.dumps(identify)); def sendSelectProtocol(self): selectProtocol = { "op":1, "d": { "protocol": "udp", "data": { "address": str(self.ip), "port": int(self.port), "mode": "plain"#"xsalsa20_poly1305" } } } print json.dumps(selectProtocol) self.ws.send(json.dumps(selectProtocol)); def sendSpeaking(self): speaking = { "op":5, "d": { "speaking": True, "delay": 3, } } self.ws.send(json.dumps(speaking)); def ipDiscovery(self): #According to discord specifications we must send a packet of size 70 (little endian) for ip discovery #https://discordapp.com/developers/docs/topics/voice-connections#ip-discovery packetSize = 70 packet = bytearray(packetSize) struct.pack_into('>I', packet, 0, self.ssrc) self.socket.send(packet) data = self.socket.recvfrom(packetSize) #TODO: Let's find a better regex scheme ipRegex = ".*00(\d+\.\d+\.\d+\.\d+).*, (\d+)\)\)" regexSearch = re.search(ipRegex, str(data).decode("utf-8"), re.M|re.I) ipAddress = regexSearch.group(1) port = regexSearch.group(2) return ipAddress, port class OPCodes: def __init__(self, voiceGateway): self.voiceGateway = voiceGateway def identify(self): print "identify" def selectProtocol(self): print "select protocol" def ready(self): print "receive ready" print self.voiceGateway.data["d"] self.voiceGateway.heartbeatInterval = self.voiceGateway.data["d"]["heartbeat_interval"] self.voiceGateway.ssrc = self.voiceGateway.data["d"]["ssrc"] self.voiceGateway.port = self.voiceGateway.data["d"]["port"] self.voiceGateway.modes = self.voiceGateway.data["d"]["modes"] def heartbeat(self): print "received voice heartbeat" print self.voiceGateway.data self.voiceGateway.heartbeatInterval = self.voiceGateway.data["d"]["heartbeat_interval"] print "finished heartbeat" def sessionDescription(self): self.voiceGateway.secretKey = self.voiceGateway.data["d"]["secret_key"] self.voiceGateway.mode = self.voiceGateway.data["d"]["mode"] print "session description" def speaking(self): print "speaking" def receive(self): print "Receiving" message = self.voiceGateway.ws.recv() print message self.voiceGateway.data = json.loads(message) opCode = self.voiceGateway.data["op"] print opCode #https://discordapp.com/developers/docs/topics/voice-connections gatewayOPCodes = { 0: self.identify, 1: self.selectProtocol, 2: self.ready, # 3: self.heartbeat, ????? 4: self.sessionDescription, 5: self.speaking, 8: self.heartbeat, } func = gatewayOPCodes.get(opCode, "8") func()
{ "content_hash": "c7fbbfd197a040c792d46e0188d35fee", "timestamp": "", "source": "github", "line_count": 176, "max_line_length": 104, "avg_line_length": 29.056818181818183, "alnum_prop": 0.6716855690262026, "repo_name": "alanrossx2/DiscordBot-PurpleMinion", "id": "d41b486030a4d93cfd5a56decb8bb01e27ff4360", "size": "5114", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "DiscordRequestAPI/VoiceGateway.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "27873" }, { "name": "Shell", "bytes": "1372" } ], "symlink_target": "" }
import unittest import threading from heronpy.api import global_metrics import heron.instance.tests.python.utils.mock_generator as mock_generator class GlobalMetricsTest(unittest.TestCase): def setUp(self): self.metrics_collector = mock_generator.MockMetricsCollector() global_metrics.init(self.metrics_collector, 10) self.lock = threading.Lock() def test_normal(self): global_metrics.incr("mycounter_a") global_metrics.incr("mycounter_b", 3) global_metrics.safe_incr("mycounter_c", 5) counter = global_metrics.metricsContainer d = counter.get_value_and_reset() self.assertTrue("mycounter_a" in d) self.assertTrue("mycounter_b" in d) self.assertTrue("mycounter_c" in d) self.assertEqual(d["mycounter_a"], 1) self.assertEqual(d["mycounter_b"], 3) self.assertEqual(d["mycounter_c"], 5) def concurrent_incr(self): def incr_worker(): global_metrics.safe_incr("K") global_metrics.safe_incr("K", 2) global_metrics.safe_incr("K", 3) threads = [] for i in range(10): t = threading.Thread(target=incr_worker) threads.append(t) t.start() for t in threads: t.join() counter = global_metrics.metricsContainer d = counter.get_value_and_reset() self.assertTrue("K" in d) self.assertEqual(d["K"], 60) def test_concurrent_incr(self): for i in range(100): global_metrics.metricsContainer.get_value_and_reset() self.concurrent_incr()
{ "content_hash": "b83f47cbfe550e2ee3c8bb56a2cf7ad7", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 73, "avg_line_length": 32.06521739130435, "alnum_prop": 0.6813559322033899, "repo_name": "lucperkins/heron", "id": "319f229bcc8a021b220d39cc19b1bf2e5ba8335b", "size": "2154", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "heron/instance/tests/python/utils/global_metrics_unittest.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "11709" }, { "name": "C++", "bytes": "1623239" }, { "name": "CSS", "bytes": "109554" }, { "name": "HCL", "bytes": "2115" }, { "name": "HTML", "bytes": "156820" }, { "name": "Java", "bytes": "4466689" }, { "name": "JavaScript", "bytes": "1110981" }, { "name": "M4", "bytes": "17941" }, { "name": "Makefile", "bytes": "1046" }, { "name": "Objective-C", "bytes": "1929" }, { "name": "Python", "bytes": "1537910" }, { "name": "Ruby", "bytes": "1930" }, { "name": "Scala", "bytes": "72781" }, { "name": "Shell", "bytes": "166876" }, { "name": "Smarty", "bytes": "528" }, { "name": "Thrift", "bytes": "915" } ], "symlink_target": "" }
import logging import math import os import timeit from collections import defaultdict import pytest import torch from torch.autograd import grad from torch.distributions import constraints, kl_divergence import pyro import pyro.distributions as dist import pyro.optim import pyro.poutine as poutine from pyro import infer from pyro.distributions.testing.rejection_gamma import ShapeAugmentedGamma from pyro.infer import SVI, config_enumerate from pyro.infer.enum import iter_discrete_traces from pyro.infer.importance import vectorized_importance_weights from pyro.infer.trace_elbo import Trace_ELBO from pyro.infer.traceenum_elbo import TraceEnum_ELBO from pyro.infer.util import LAST_CACHE_SIZE from pyro.ops.indexing import Vindex from pyro.util import torch_isnan from tests.common import assert_equal, skipif_param try: from contextlib import ExitStack # python 3 except ImportError: from contextlib2 import ExitStack # python 2 logger = logging.getLogger(__name__) def _skip_cuda(*args): return skipif_param( *args, condition="CUDA_TEST" in os.environ, reason="https://github.com/pyro-ppl/pyro/issues/1380" ) @pytest.mark.parametrize("depth", [1, 2, 3, 4, 5]) @pytest.mark.parametrize("graph_type", ["flat", "dense"]) def test_iter_discrete_traces_order(depth, graph_type): @config_enumerate(default="sequential") def model(depth): for i in range(depth): pyro.sample("x{}".format(i), dist.Bernoulli(0.5)) traces = list(iter_discrete_traces(graph_type, model, depth)) assert len(traces) == 2 ** depth for trace in traces: sites = [name for name, site in trace.nodes.items() if site["type"] == "sample"] assert sites == ["x{}".format(i) for i in range(depth)] @pytest.mark.parametrize("graph_type", ["flat", "dense"]) def test_iter_discrete_traces_scalar(graph_type): pyro.clear_param_store() @config_enumerate(default="sequential") def model(): p = pyro.param("p", torch.tensor(0.05)) probs = pyro.param("probs", torch.tensor([0.1, 0.2, 0.3, 0.4])) x = pyro.sample("x", dist.Bernoulli(p)) y = pyro.sample("y", dist.Categorical(probs)) return dict(x=x, y=y) traces = list(iter_discrete_traces(graph_type, model)) probs = pyro.param("probs") assert len(traces) == 2 * len(probs) @pytest.mark.parametrize("graph_type", ["flat", "dense"]) @pytest.mark.parametrize("expand", [False, True]) def test_iter_discrete_traces_vector(expand, graph_type): pyro.clear_param_store() @config_enumerate(default="sequential", expand=expand) def model(): p = pyro.param("p", torch.tensor([0.05, 0.15])) probs = pyro.param( "probs", torch.tensor([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]]) ) with pyro.plate("plate", 2): x = pyro.sample("x", dist.Bernoulli(p)) y = pyro.sample("y", dist.Categorical(probs)) if expand: assert x.size() == (2,) assert y.size() == (2,) else: assert x.shape == (1,) assert y.shape == (1,) return dict(x=x, y=y) traces = list(iter_discrete_traces(graph_type, model)) probs = pyro.param("probs") assert len(traces) == 2 * probs.size(-1) def test_enumerate_sequential_guide(): values = [] def model(): x = pyro.sample("x", dist.Bernoulli(0.5)) values.append(float(x)) def guide(): pyro.sample("x", dist.Bernoulli(0.5), infer={"enumerate": "sequential"}) elbo = TraceEnum_ELBO(max_plate_nesting=0) elbo.loss(model, guide) assert len(values) == 2, values def test_enumerate_sequential_model(): def model(): pyro.sample("x", dist.Bernoulli(0.5), infer={"enumerate": "sequential"}) def guide(): pass with pytest.raises(NotImplementedError): elbo = TraceEnum_ELBO(max_plate_nesting=0) elbo.loss(model, guide) # The usual dist.Bernoulli avoids NANs by clamping log prob. This unsafe version # allows us to test additional NAN avoidance in _compute_dice_elbo(). class UnsafeBernoulli(dist.Bernoulli): def log_prob(self, value): i = value.long() j = torch.arange(len(self.probs), dtype=torch.long) return torch.stack([(-self.probs).log1p(), self.probs.log()])[i, j] @pytest.mark.parametrize("sample_shape", [(), (2,), (3, 4)]) def test_unsafe_bernoulli(sample_shape): logits = torch.randn(10) p = dist.Bernoulli(logits=logits) q = UnsafeBernoulli(logits=logits) x = p.sample(sample_shape) assert_equal(p.log_prob(x), q.log_prob(x)) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) def test_avoid_nan(enumerate1): pyro.clear_param_store() def model(): p = torch.tensor([0.0, 0.5, 1.0]) with pyro.plate("batch", 3): pyro.sample("z", UnsafeBernoulli(p)) @config_enumerate(default=enumerate1) def guide(): p = pyro.param("p", torch.tensor([0.0, 0.5, 1.0], requires_grad=True)) with pyro.plate("batch", 3): pyro.sample("z", UnsafeBernoulli(p)) elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) loss = elbo.loss(model, guide) assert not math.isnan(loss), loss loss = elbo.differentiable_loss(model, guide) assert not torch_isnan(loss), loss loss = elbo.loss_and_grads(model, guide) assert not math.isnan(loss), loss # A simple Gaussian mixture model, with no vectorization. def gmm_model(data, verbose=False): p = pyro.param("p", torch.tensor(0.3, requires_grad=True)) scale = pyro.param("scale", torch.tensor(1.0, requires_grad=True)) mus = torch.tensor([-1.0, 1.0]) for i in pyro.plate("data", len(data)): z = pyro.sample("z_{}".format(i), dist.Bernoulli(p)) z = z.long() if verbose: logger.debug("M{} z_{} = {}".format(" " * int(i), int(i), z.cpu().numpy())) pyro.sample("x_{}".format(i), dist.Normal(mus[z], scale), obs=data[i]) def gmm_guide(data, verbose=False): for i in pyro.plate("data", len(data)): p = pyro.param("p_{}".format(i), torch.tensor(0.6, requires_grad=True)) z = pyro.sample("z_{}".format(i), dist.Bernoulli(p)) z = z.long() if verbose: logger.debug("G{} z_{} = {}".format(" " * int(i), int(i), z.cpu().numpy())) @pytest.mark.parametrize("data_size", [1, 2, 3]) @pytest.mark.parametrize("graph_type", ["flat", "dense"]) @pytest.mark.parametrize("model", [gmm_model, gmm_guide]) def test_gmm_iter_discrete_traces(data_size, graph_type, model): pyro.clear_param_store() data = torch.arange(0.0, float(data_size)) model = config_enumerate(model, "sequential") traces = list(iter_discrete_traces(graph_type, model, data=data, verbose=True)) # This non-vectorized version is exponential in data_size: assert len(traces) == 2 ** data_size # A Gaussian mixture model, with vectorized batching. def gmm_batch_model(data): p = pyro.param("p", torch.tensor([0.3], requires_grad=True)) p = torch.cat([p, 1 - p]) scale = pyro.param("scale", torch.tensor([1.0], requires_grad=True)) mus = torch.tensor([-1.0, 1.0]) with pyro.plate("data", len(data)) as batch: n = len(batch) z = pyro.sample("z", dist.OneHotCategorical(p).expand_by([n])) assert z.shape[-1] == 2 loc = (z * mus).sum(-1) pyro.sample("x", dist.Normal(loc, scale.expand(n)), obs=data[batch]) def gmm_batch_guide(data): with pyro.plate("data", len(data)) as batch: n = len(batch) probs = pyro.param("probs", torch.ones(n, 1) * 0.6) probs = torch.cat([probs, 1 - probs], dim=1) z = pyro.sample("z", dist.OneHotCategorical(probs)) assert z.shape[-1] == 2 @pytest.mark.parametrize("data_size", [1, 2, 3]) @pytest.mark.parametrize("graph_type", ["flat", "dense"]) @pytest.mark.parametrize("model", [gmm_batch_model, gmm_batch_guide]) def test_gmm_batch_iter_discrete_traces(model, data_size, graph_type): pyro.clear_param_store() data = torch.arange(0.0, float(data_size)) model = config_enumerate(model, "sequential") traces = list(iter_discrete_traces(graph_type, model, data=data)) # This vectorized version is independent of data_size: assert len(traces) == 2 @pytest.mark.parametrize( "model,guide", [ (gmm_model, gmm_guide), (gmm_batch_model, gmm_batch_guide), ], ids=["single", "batch"], ) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) def test_svi_step_smoke(model, guide, enumerate1): pyro.clear_param_store() data = torch.tensor([0.0, 1.0, 9.0]) guide = config_enumerate(guide, default=enumerate1) optimizer = pyro.optim.Adam({"lr": 0.001}) elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) inference = SVI(model, guide, optimizer, loss=elbo) inference.step(data) @pytest.mark.parametrize( "model,guide", [ (gmm_model, gmm_guide), (gmm_batch_model, gmm_batch_guide), ], ids=["single", "batch"], ) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) def test_differentiable_loss(model, guide, enumerate1): pyro.clear_param_store() data = torch.tensor([0.0, 1.0, 9.0]) guide = config_enumerate(guide, default=enumerate1) elbo = TraceEnum_ELBO( max_plate_nesting=1, strict_enumeration_warning=any([enumerate1]) ) pyro.set_rng_seed(0) loss = elbo.differentiable_loss(model, guide, data) param_names = sorted(pyro.get_param_store()) actual_loss = loss.item() actual_grads = grad( loss, [pyro.param(name).unconstrained() for name in param_names] ) pyro.set_rng_seed(0) expected_loss = elbo.loss_and_grads(model, guide, data) expected_grads = [pyro.param(name).unconstrained().grad for name in param_names] assert_equal(actual_loss, expected_loss) for name, actual_grad, expected_grad in zip( param_names, actual_grads, expected_grads ): assert_equal( actual_grad, expected_grad, msg="bad {} gradient. Expected:\n{}\nActual:\n{}".format( name, expected_grad, actual_grad ), ) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) def test_svi_step_guide_uses_grad(enumerate1): data = torch.tensor([0.0, 1.0, 3.0]) def model(): scale = pyro.param("scale") loc = pyro.sample("loc", dist.Normal(0.0, 10.0)) pyro.sample("b", dist.Bernoulli(0.5)) with pyro.plate("data", len(data)): pyro.sample("obs", dist.Normal(loc, scale), obs=data) @config_enumerate(default=enumerate1) def guide(): p = pyro.param("p", torch.tensor(0.5), constraint=constraints.unit_interval) scale = pyro.param("scale", torch.tensor(1.0), constraint=constraints.positive) var = pyro.param("var", torch.tensor(1.0), constraint=constraints.positive) x = torch.tensor(0.0, requires_grad=True) prior = dist.Normal(0.0, 10.0).log_prob(x) likelihood = dist.Normal(x, scale).log_prob(data).sum() loss = -(prior + likelihood) g = grad(loss, [x], create_graph=True)[0] H = grad(g, [x], create_graph=True)[0] loc = x.detach() - g / H # newton step pyro.sample("loc", dist.Normal(loc, var)) pyro.sample("b", dist.Bernoulli(p)) elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) inference = SVI(model, guide, pyro.optim.Adam({}), elbo) inference.step() @pytest.mark.parametrize("scale", [1, 10]) @pytest.mark.parametrize("method", ["loss", "differentiable_loss", "loss_and_grads"]) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) def test_elbo_bern(method, enumerate1, scale): pyro.clear_param_store() num_particles = 1 if enumerate1 else 10000 prec = 0.001 if enumerate1 else 0.22 q = pyro.param("q", torch.tensor(0.5, requires_grad=True)) kl = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25)) @poutine.scale(scale=scale) def model(): with pyro.plate("particles", num_particles): pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles])) @config_enumerate(default=enumerate1) @poutine.scale(scale=scale) def guide(): q = pyro.param("q") with pyro.plate("particles", num_particles): pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles])) elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) if method == "loss": actual = elbo.loss(model, guide) / num_particles expected = kl.item() * scale assert_equal( actual, expected, prec=prec, msg="".join( [ "\nexpected = {}".format(expected), "\n actual = {}".format(actual), ] ), ) else: if method == "differentiable_loss": loss = elbo.differentiable_loss(model, guide) actual = grad(loss, [q])[0] / num_particles elif method == "loss_and_grads": elbo.loss_and_grads(model, guide) actual = q.grad / num_particles expected = grad(kl, [q])[0] * scale assert_equal( actual, expected, prec=prec, msg="".join( [ "\nexpected = {}".format(expected.detach().cpu().numpy()), "\n actual = {}".format(actual.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize("method", ["loss", "differentiable_loss", "loss_and_grads"]) @pytest.mark.parametrize("enumerate1", [None, "parallel"]) def test_elbo_normal(method, enumerate1): pyro.clear_param_store() num_particles = 1 if enumerate1 else 10000 prec = 0.01 q = pyro.param("q", torch.tensor(1.0, requires_grad=True)) kl = kl_divergence(dist.Normal(q, 1.0), dist.Normal(0.0, 1.0)) def model(): with pyro.plate("particles", num_particles): pyro.sample("z", dist.Normal(0.0, 1.0).expand_by([num_particles])) @config_enumerate(default=enumerate1, num_samples=20000) def guide(): q = pyro.param("q") with pyro.plate("particles", num_particles): pyro.sample("z", dist.Normal(q, 1.0).expand_by([num_particles])) elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) if method == "loss": actual = elbo.loss(model, guide) / num_particles expected = kl.item() assert_equal( actual, expected, prec=prec, msg="".join( [ "\nexpected = {}".format(expected), "\n actual = {}".format(actual), ] ), ) else: if method == "differentiable_loss": loss = elbo.differentiable_loss(model, guide) actual = grad(loss, [q])[0] / num_particles elif method == "loss_and_grads": elbo.loss_and_grads(model, guide) actual = q.grad / num_particles expected = grad(kl, [q])[0] assert_equal( actual, expected, prec=prec, msg="".join( [ "\nexpected = {}".format(expected.detach().cpu().numpy()), "\n actual = {}".format(actual.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize( "enumerate1,num_samples1", [ (None, None), ("sequential", None), ("parallel", None), ("parallel", 300), ], ) @pytest.mark.parametrize( "enumerate2,num_samples2", [ (None, None), ("sequential", None), ("parallel", None), ("parallel", 300), ], ) @pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"]) def test_elbo_bern_bern(method, enumerate1, enumerate2, num_samples1, num_samples2): pyro.clear_param_store() if enumerate1 and enumerate2 and num_samples1 is None and num_samples2 is None: num_particles = 1 prec = 0.001 else: num_particles = 2 * 300 * 300 for n in [num_samples1, num_samples2]: if n is not None: num_particles = num_particles // n prec = 0.2 q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) def model(): pyro.sample("x1", dist.Bernoulli(0.2)) pyro.sample("x2", dist.Bernoulli(0.4)) def guide(): q = pyro.param("q") pyro.sample( "x1", dist.Bernoulli(q), infer={"enumerate": enumerate1, "num_samples": num_samples1}, ) pyro.sample( "x2", dist.Bernoulli(q), infer={"enumerate": enumerate2, "num_samples": num_samples2}, ) kl = sum(kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) for p in [0.2, 0.4]) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO( num_particles=num_particles, vectorize_particles=True, strict_enumeration_warning=any([enumerate1, enumerate2]), ) if method == "differentiable_loss": loss = elbo.differentiable_loss(model, guide) actual_loss = loss.item() actual_grad = grad(loss, [q])[0] else: actual_loss = elbo.loss_and_grads(model, guide) actual_grad = q.grad assert_equal( actual_loss, expected_loss, prec=prec, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=prec, msg="".join( [ "\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grads = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize( "enumerate1,enumerate2,enumerate3,num_samples", [ (e1, e2, e3, num_samples) for e1 in [None, "sequential", "parallel"] for e2 in [None, "sequential", "parallel"] for e3 in [None, "sequential", "parallel"] for num_samples in [None, 10000] if num_samples is None or (e1, e2, e3) == ("parallel", "parallel", "parallel") ], ) @pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"]) def test_elbo_berns(method, enumerate1, enumerate2, enumerate3, num_samples): pyro.clear_param_store() num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 10000 prec = ( 0.001 if all([enumerate1, enumerate2, enumerate3]) and not num_samples else 0.1 ) q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) def model(): pyro.sample("x1", dist.Bernoulli(0.1)) pyro.sample("x2", dist.Bernoulli(0.2)) pyro.sample("x3", dist.Bernoulli(0.3)) def guide(): q = pyro.param("q") pyro.sample( "x1", dist.Bernoulli(q), infer={"enumerate": enumerate1, "num_samples": num_samples}, ) pyro.sample( "x2", dist.Bernoulli(q), infer={"enumerate": enumerate2, "num_samples": num_samples}, ) pyro.sample( "x3", dist.Bernoulli(q), infer={"enumerate": enumerate3, "num_samples": num_samples}, ) kl = sum( kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) for p in [0.1, 0.2, 0.3] ) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO( num_particles=num_particles, vectorize_particles=True, strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]), ) if method == "differentiable_loss": loss = elbo.differentiable_loss(model, guide) actual_loss = loss.item() actual_grad = grad(loss, [q])[0] else: actual_loss = elbo.loss_and_grads(model, guide) actual_grad = q.grad assert_equal( actual_loss, expected_loss, prec=prec, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=prec, msg="".join( [ "\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grads = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize("num_samples", [None, 2000]) @pytest.mark.parametrize("max_plate_nesting", [0, 1]) @pytest.mark.parametrize("enumerate1", ["sequential", "parallel"]) @pytest.mark.parametrize("enumerate2", ["sequential", "parallel"]) @pytest.mark.parametrize("enumerate3", ["sequential", "parallel"]) def test_elbo_categoricals( enumerate1, enumerate2, enumerate3, max_plate_nesting, num_samples ): pyro.clear_param_store() p1 = torch.tensor([0.6, 0.4]) p2 = torch.tensor([0.3, 0.3, 0.4]) p3 = torch.tensor([0.1, 0.2, 0.3, 0.4]) q1 = pyro.param("q1", torch.tensor([0.4, 0.6], requires_grad=True)) q2 = pyro.param("q2", torch.tensor([0.4, 0.3, 0.3], requires_grad=True)) q3 = pyro.param("q3", torch.tensor([0.4, 0.3, 0.2, 0.1], requires_grad=True)) def model(): pyro.sample("x1", dist.Categorical(p1)) pyro.sample("x2", dist.Categorical(p2)) pyro.sample("x3", dist.Categorical(p3)) def guide(): pyro.sample( "x1", dist.Categorical(pyro.param("q1")), infer={ "enumerate": enumerate1, "num_samples": num_samples if enumerate1 == "parallel" else None, }, ) pyro.sample( "x2", dist.Categorical(pyro.param("q2")), infer={ "enumerate": enumerate2, "num_samples": num_samples if enumerate2 == "parallel" else None, }, ) pyro.sample( "x3", dist.Categorical(pyro.param("q3")), infer={ "enumerate": enumerate3, "num_samples": num_samples if enumerate3 == "parallel" else None, }, ) kl = ( kl_divergence(dist.Categorical(q1), dist.Categorical(p1)) + kl_divergence(dist.Categorical(q2), dist.Categorical(p2)) + kl_divergence(dist.Categorical(q3), dist.Categorical(p3)) ) expected_loss = kl.item() expected_grads = grad(kl, [q1, q2, q3]) elbo = TraceEnum_ELBO( max_plate_nesting=max_plate_nesting, strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]), ) actual_loss = elbo.loss_and_grads(model, guide) actual_grads = [q1.grad, q2.grad, q3.grad] assert_equal( actual_loss, expected_loss, prec=0.001 if not num_samples else 0.1, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) for actual_grad, expected_grad in zip(actual_grads, expected_grads): assert_equal( actual_grad, expected_grad, prec=0.001 if not num_samples else 0.1, msg="".join( [ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize("enumerate1", [None, "parallel"]) @pytest.mark.parametrize("enumerate2", [None, "parallel"]) @pytest.mark.parametrize("enumerate3", [None, "parallel"]) @pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"]) def test_elbo_normals(method, enumerate1, enumerate2, enumerate3): pyro.clear_param_store() num_particles = 100 * 10 ** sum( 1 for e in [enumerate1, enumerate2, enumerate3] if not e ) prec = 0.1 q = pyro.param("q", torch.tensor(0.0, requires_grad=True)) def model(): pyro.sample("x1", dist.Normal(0.25, 1.0)) pyro.sample("x2", dist.Normal(0.5, 1.0)) pyro.sample("x3", dist.Normal(1.0, 1.0)) def guide(): q = pyro.param("q") pyro.sample( "x1", dist.Normal(q, 1.0), infer={"enumerate": enumerate1, "num_samples": 10}, ) pyro.sample( "x2", dist.Normal(q, 1.0), infer={"enumerate": enumerate2, "num_samples": 10}, ) pyro.sample( "x3", dist.Normal(q, 1.0), infer={"enumerate": enumerate3, "num_samples": 10}, ) kl = sum( kl_divergence(dist.Normal(q, 1.0), dist.Normal(p, 1.0)) for p in [0.25, 0.5, 1.0] ) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO( num_particles=num_particles, vectorize_particles=True, strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]), ) if method == "differentiable_loss": loss = elbo.differentiable_loss(model, guide) actual_loss = loss.item() actual_grad = grad(loss, [q])[0] else: actual_loss = elbo.loss_and_grads(model, guide) actual_grad = q.grad assert_equal( actual_loss, expected_loss, prec=prec, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=prec, msg="".join( [ "\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grads = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize( "enumerate1,enumerate2,num_samples", [ (e1, e2, num_samples) for e1 in [None, "sequential", "parallel"] for e2 in [None, "sequential", "parallel"] for num_samples in [None, 10000] if num_samples is None or (e1, e2) == ("parallel", "parallel") ], ) @pytest.mark.parametrize("plate_dim", [1, 2]) def test_elbo_plate(plate_dim, enumerate1, enumerate2, num_samples): pyro.clear_param_store() num_particles = 1 if all([enumerate1, enumerate2]) else 10000 q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 def model(): with pyro.plate("particles", num_particles): pyro.sample("y", dist.Bernoulli(p).expand_by([num_particles])) with pyro.plate("plate", plate_dim): pyro.sample( "z", dist.Bernoulli(p).expand_by([plate_dim, num_particles]) ) def guide(): q = pyro.param("q") with pyro.plate("particles", num_particles): pyro.sample( "y", dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate1, "num_samples": num_samples}, ) with pyro.plate("plate", plate_dim): pyro.sample( "z", dist.Bernoulli(q).expand_by([plate_dim, num_particles]), infer={"enumerate": enumerate2, "num_samples": num_samples}, ) kl = (1 + plate_dim) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2])) actual_loss = elbo.loss_and_grads(model, guide) / num_particles actual_grad = pyro.param("q").grad / num_particles assert_equal( actual_loss, expected_loss, prec=0.1, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=0.1, msg="".join( [ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"]) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) @pytest.mark.parametrize("plate_dim", [1, 2]) def test_elbo_iplate(plate_dim, enumerate1, enumerate2): pyro.clear_param_store() num_particles = 1 if all([enumerate1, enumerate2]) else 20000 q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 def model(): with pyro.plate("particles", num_particles): pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles])) for i in pyro.plate("plate", plate_dim): pyro.sample( "y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]) ) def guide(): q = pyro.param("q") with pyro.plate("particles", num_particles): pyro.sample( "x", dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate1}, ) for i in pyro.plate("plate", plate_dim): pyro.sample( "y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate2}, ) kl = (1 + plate_dim) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2])) actual_loss = elbo.loss_and_grads(model, guide) / num_particles actual_grad = pyro.param("q").grad / num_particles assert_equal( actual_loss, expected_loss, prec=0.1, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=0.1, msg="".join( [ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize( "enumerate1,enumerate2,enumerate3,enumerate4,num_samples", [ (e1, e2, e3, e4, num_samples) for e1 in [None, "sequential", "parallel"] for e2 in [None, "sequential", "parallel"] for e3 in [None, "sequential", "parallel"] for e4 in [None, "sequential", "parallel"] for num_samples in [None, 10000] if num_samples is None or (e1, e2, e3, e4) == ("parallel",) * 4 ], ) @pytest.mark.parametrize("inner_dim", [2]) @pytest.mark.parametrize("outer_dim", [2]) def test_elbo_plate_plate( outer_dim, inner_dim, enumerate1, enumerate2, enumerate3, enumerate4, num_samples ): pyro.clear_param_store() num_particles = ( 1 if all([enumerate1, enumerate2, enumerate3, enumerate4]) else 100000 ) q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 def model(): d = dist.Bernoulli(p) context1 = pyro.plate("outer", outer_dim, dim=-1) context2 = pyro.plate("inner", inner_dim, dim=-2) pyro.sample("w", d) with context1: pyro.sample("x", d) with context2: pyro.sample("y", d) with context1, context2: pyro.sample("z", d) def guide(): d = dist.Bernoulli(pyro.param("q")) context1 = pyro.plate("outer", outer_dim, dim=-1) context2 = pyro.plate("inner", inner_dim, dim=-2) pyro.sample("w", d, infer={"enumerate": enumerate1, "num_samples": num_samples}) with context1: pyro.sample( "x", d, infer={"enumerate": enumerate2, "num_samples": num_samples} ) with context2: pyro.sample( "y", d, infer={"enumerate": enumerate3, "num_samples": num_samples} ) with context1, context2: pyro.sample( "z", d, infer={"enumerate": enumerate4, "num_samples": num_samples} ) kl_node = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) kl = (1 + outer_dim + inner_dim + outer_dim * inner_dim) * kl_node expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO( num_particles=num_particles, vectorize_particles=True, strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]), ) actual_loss = elbo.loss_and_grads(model, guide) actual_grad = pyro.param("q").grad assert_equal( actual_loss, expected_loss, prec=0.1, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=0.1, msg="".join( [ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize( "enumerate1,enumerate2,enumerate3,num_samples", [ (e1, e2, e3, num_samples) for e1 in [None, "sequential", "parallel"] for e2 in [None, "sequential", "parallel"] for e3 in [None, "sequential", "parallel"] for num_samples in [None, 2000] if num_samples is None or (e1, e2, e3) == ("parallel",) * 3 ], ) @pytest.mark.parametrize("inner_dim", [2]) @pytest.mark.parametrize("outer_dim", [3]) def test_elbo_plate_iplate( outer_dim, inner_dim, enumerate1, enumerate2, enumerate3, num_samples ): pyro.clear_param_store() num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 100000 q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 def model(): with pyro.plate("particles", num_particles): pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles])) with pyro.plate("outer", outer_dim): pyro.sample( "y", dist.Bernoulli(p).expand_by([outer_dim, num_particles]) ) for i in pyro.plate("inner", inner_dim): pyro.sample( "z_{}".format(i), dist.Bernoulli(p).expand_by([outer_dim, num_particles]), ) def guide(): q = pyro.param("q") with pyro.plate("particles", num_particles): pyro.sample( "x", dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate1, "num_samples": num_samples}, ) with pyro.plate("outer", outer_dim): pyro.sample( "y", dist.Bernoulli(q).expand_by([outer_dim, num_particles]), infer={"enumerate": enumerate2, "num_samples": num_samples}, ) for i in pyro.plate("inner", inner_dim): pyro.sample( "z_{}".format(i), dist.Bernoulli(q).expand_by([outer_dim, num_particles]), infer={"enumerate": enumerate3, "num_samples": num_samples}, ) kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence( dist.Bernoulli(q), dist.Bernoulli(p) ) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO( strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]) ) actual_loss = elbo.loss_and_grads(model, guide) / num_particles actual_grad = pyro.param("q").grad / num_particles assert_equal( actual_loss, expected_loss, prec=0.1, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=0.1, msg="".join( [ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"]) @pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"]) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) @pytest.mark.parametrize("inner_dim", [2]) @pytest.mark.parametrize("outer_dim", [2]) def test_elbo_iplate_plate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3): pyro.clear_param_store() num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 50000 q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 def model(): with pyro.plate("particles", num_particles): pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles])) inner_plate = pyro.plate("inner", inner_dim) for i in pyro.plate("outer", outer_dim): pyro.sample( "y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]) ) with inner_plate: pyro.sample( "z_{}".format(i), dist.Bernoulli(p).expand_by([inner_dim, num_particles]), ) def guide(): q = pyro.param("q") with pyro.plate("particles", num_particles): pyro.sample( "x", dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate1}, ) inner_plate = pyro.plate("inner", inner_dim) for i in pyro.plate("outer", outer_dim): pyro.sample( "y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate2}, ) with inner_plate: pyro.sample( "z_{}".format(i), dist.Bernoulli(q).expand_by([inner_dim, num_particles]), infer={"enumerate": enumerate3}, ) kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence( dist.Bernoulli(q), dist.Bernoulli(p) ) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO( strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]) ) actual_loss = elbo.loss_and_grads(model, guide) / num_particles actual_grad = pyro.param("q").grad / num_particles assert_equal( actual_loss, expected_loss, prec=0.1, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=0.1, msg="".join( [ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"]) @pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"]) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) @pytest.mark.parametrize("inner_dim", [2]) @pytest.mark.parametrize("outer_dim", [2]) def test_elbo_iplate_iplate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3): pyro.clear_param_store() num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 150000 q = pyro.param("q", torch.tensor(0.75, requires_grad=True)) p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 def model(): with pyro.plate("particles", num_particles): pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles])) inner_iplate = pyro.plate("inner", outer_dim) for i in pyro.plate("outer", inner_dim): pyro.sample( "y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]) ) for j in inner_iplate: pyro.sample( "z_{}_{}".format(i, j), dist.Bernoulli(p).expand_by([num_particles]), ) def guide(): q = pyro.param("q") with pyro.plate("particles", num_particles): pyro.sample( "x", dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate1}, ) inner_iplate = pyro.plate("inner", inner_dim) for i in pyro.plate("outer", outer_dim): pyro.sample( "y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate2}, ) for j in inner_iplate: pyro.sample( "z_{}_{}".format(i, j), dist.Bernoulli(q).expand_by([num_particles]), infer={"enumerate": enumerate3}, ) kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence( dist.Bernoulli(q), dist.Bernoulli(p) ) expected_loss = kl.item() expected_grad = grad(kl, [q])[0] elbo = TraceEnum_ELBO( strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]) ) actual_loss = elbo.loss_and_grads(model, guide) / num_particles actual_grad = pyro.param("q").grad / num_particles assert_equal( actual_loss, expected_loss, prec=0.1, msg="".join( [ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ] ), ) assert_equal( actual_grad, expected_grad, prec=0.2, msg="".join( [ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize("pi1", [0.33, 0.43]) @pytest.mark.parametrize("pi2", [0.55, 0.27]) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) def test_non_mean_field_bern_bern_elbo_gradient(enumerate1, pi1, pi2): pyro.clear_param_store() num_particles = 1 if enumerate1 else 20000 def model(): with pyro.plate("particles", num_particles): y = pyro.sample("y", dist.Bernoulli(0.33).expand_by([num_particles])) pyro.sample("z", dist.Bernoulli(0.55 * y + 0.10)) def guide(): q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True)) q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True)) with pyro.plate("particles", num_particles): y = pyro.sample("y", dist.Bernoulli(q1).expand_by([num_particles])) pyro.sample("z", dist.Bernoulli(q2 * y + 0.10)) logger.info("Computing gradients using surrogate loss") elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) elbo.loss_and_grads(model, config_enumerate(guide, default=enumerate1)) actual_grad_q1 = pyro.param("q1").grad / num_particles actual_grad_q2 = pyro.param("q2").grad / num_particles logger.info("Computing analytic gradients") q1 = torch.tensor(pi1, requires_grad=True) q2 = torch.tensor(pi2, requires_grad=True) elbo = kl_divergence(dist.Bernoulli(q1), dist.Bernoulli(0.33)) elbo = elbo + q1 * kl_divergence(dist.Bernoulli(q2 + 0.10), dist.Bernoulli(0.65)) elbo = elbo + (1.0 - q1) * kl_divergence(dist.Bernoulli(0.10), dist.Bernoulli(0.10)) expected_grad_q1, expected_grad_q2 = grad(elbo, [q1, q2]) prec = 0.03 if enumerate1 is None else 0.001 assert_equal( actual_grad_q1, expected_grad_q1, prec=prec, msg="".join( [ "\nq1 expected = {}".format(expected_grad_q1.data.cpu().numpy()), "\nq1 actual = {}".format(actual_grad_q1.data.cpu().numpy()), ] ), ) assert_equal( actual_grad_q2, expected_grad_q2, prec=prec, msg="".join( [ "\nq2 expected = {}".format(expected_grad_q2.data.cpu().numpy()), "\nq2 actual = {}".format(actual_grad_q2.data.cpu().numpy()), ] ), ) @pytest.mark.parametrize("pi1", [0.33, 0.44]) @pytest.mark.parametrize("pi2", [0.55, 0.39]) @pytest.mark.parametrize("pi3", [0.22, 0.29]) @pytest.mark.parametrize( "enumerate1,num_samples", [ (None, None), ("sequential", None), ("parallel", None), ("parallel", 2), ], ) def test_non_mean_field_bern_normal_elbo_gradient( enumerate1, pi1, pi2, pi3, num_samples ): pyro.clear_param_store() include_z = True num_particles = 10000 def model(): with pyro.plate("particles", num_particles): q3 = pyro.param("q3", torch.tensor(pi3, requires_grad=True)) y = pyro.sample("y", dist.Bernoulli(q3).expand_by([num_particles])) if include_z: pyro.sample("z", dist.Normal(0.55 * y + q3, 1.0)) def guide(): q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True)) q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True)) with pyro.plate("particles", num_particles): y = pyro.sample( "y", dist.Bernoulli(q1).expand_by([num_particles]), infer={"enumerate": enumerate1}, ) if include_z: pyro.sample("z", dist.Normal(q2 * y + 0.10, 1.0)) logger.info("Computing gradients using surrogate loss") elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) elbo.loss_and_grads(model, guide) actual_grad_q1 = pyro.param("q1").grad / num_particles if include_z: actual_grad_q2 = pyro.param("q2").grad / num_particles actual_grad_q3 = pyro.param("q3").grad / num_particles logger.info("Computing analytic gradients") q1 = torch.tensor(pi1, requires_grad=True) q2 = torch.tensor(pi2, requires_grad=True) q3 = torch.tensor(pi3, requires_grad=True) elbo = kl_divergence(dist.Bernoulli(q1), dist.Bernoulli(q3)) if include_z: elbo = elbo + q1 * kl_divergence( dist.Normal(q2 + 0.10, 1.0), dist.Normal(q3 + 0.55, 1.0) ) elbo = elbo + (1.0 - q1) * kl_divergence( dist.Normal(0.10, 1.0), dist.Normal(q3, 1.0) ) expected_grad_q1, expected_grad_q2, expected_grad_q3 = grad(elbo, [q1, q2, q3]) else: expected_grad_q1, expected_grad_q3 = grad(elbo, [q1, q3]) prec = 0.04 if enumerate1 is None else 0.02 assert_equal( actual_grad_q1, expected_grad_q1, prec=prec, msg="".join( [ "\nq1 expected = {}".format(expected_grad_q1.data.cpu().numpy()), "\nq1 actual = {}".format(actual_grad_q1.data.cpu().numpy()), ] ), ) if include_z: assert_equal( actual_grad_q2, expected_grad_q2, prec=prec, msg="".join( [ "\nq2 expected = {}".format(expected_grad_q2.data.cpu().numpy()), "\nq2 actual = {}".format(actual_grad_q2.data.cpu().numpy()), ] ), ) assert_equal( actual_grad_q3, expected_grad_q3, prec=prec, msg="".join( [ "\nq3 expected = {}".format(expected_grad_q3.data.cpu().numpy()), "\nq3 actual = {}".format(actual_grad_q3.data.cpu().numpy()), ] ), ) @pytest.mark.parametrize("pi1", [0.33, 0.41]) @pytest.mark.parametrize("pi2", [0.44, 0.17]) @pytest.mark.parametrize("pi3", [0.22, 0.29]) def test_non_mean_field_normal_bern_elbo_gradient(pi1, pi2, pi3): def model(num_particles): with pyro.plate("particles", num_particles): q3 = pyro.param("q3", torch.tensor(pi3, requires_grad=True)) q4 = pyro.param("q4", torch.tensor(0.5 * (pi1 + pi2), requires_grad=True)) z = pyro.sample("z", dist.Normal(q3, 1.0).expand_by([num_particles])) zz = torch.exp(z) / (1.0 + torch.exp(z)) pyro.sample("y", dist.Bernoulli(q4 * zz)) def guide(num_particles): q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True)) q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True)) with pyro.plate("particles", num_particles): z = pyro.sample("z", dist.Normal(q2, 1.0).expand_by([num_particles])) zz = torch.exp(z) / (1.0 + torch.exp(z)) pyro.sample("y", dist.Bernoulli(q1 * zz)) qs = ["q1", "q2", "q3", "q4"] results = {} for ed, num_particles in zip( [None, "parallel", "sequential"], [30000, 20000, 20000] ): pyro.clear_param_store() elbo = TraceEnum_ELBO(strict_enumeration_warning=any([ed])) elbo.loss_and_grads(model, config_enumerate(guide, default=ed), num_particles) results[str(ed)] = {} for q in qs: results[str(ed)]["actual_grad_%s" % q] = ( pyro.param(q).grad.detach().cpu().numpy() / num_particles ) prec = 0.03 for ed in ["parallel", "sequential"]: logger.info("\n*** {} ***".format(ed)) for q in qs: logger.info("[{}] actual: {}".format(q, results[ed]["actual_grad_%s" % q])) assert_equal( results[ed]["actual_grad_%s" % q], results["None"]["actual_grad_%s" % q], prec=prec, msg="".join( [ "\nexpected (MC estimate) = {}".format( results["None"]["actual_grad_%s" % q] ), "\n actual ({} estimate) = {}".format( ed, results[ed]["actual_grad_%s" % q] ), ] ), ) @pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"]) def test_elbo_rsvi(enumerate1): pyro.clear_param_store() num_particles = 40000 prec = 0.01 if enumerate1 else 0.022 q = pyro.param("q", torch.tensor(0.5, requires_grad=True)) a = pyro.param("a", torch.tensor(1.5, requires_grad=True)) kl1 = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25)) kl2 = kl_divergence(dist.Gamma(a, 1.0), dist.Gamma(0.5, 1.0)) def model(): with pyro.plate("particles", num_particles): pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles])) pyro.sample("y", dist.Gamma(0.50, 1.0).expand_by([num_particles])) @config_enumerate(default=enumerate1) def guide(): q = pyro.param("q") a = pyro.param("a") with pyro.plate("particles", num_particles): pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles])) pyro.sample( "y", ShapeAugmentedGamma(a, torch.tensor(1.0)).expand_by([num_particles]), ) elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1])) elbo.loss_and_grads(model, guide) actual_q = q.grad / num_particles expected_q = grad(kl1, [q])[0] assert_equal( actual_q, expected_q, prec=prec, msg="".join( [ "\nexpected q.grad = {}".format(expected_q.detach().cpu().numpy()), "\n actual q.grad = {}".format(actual_q.detach().cpu().numpy()), ] ), ) actual_a = a.grad / num_particles expected_a = grad(kl2, [a])[0] assert_equal( actual_a, expected_a, prec=prec, msg="".join( [ "\nexpected a.grad= {}".format(expected_a.detach().cpu().numpy()), "\n actual a.grad = {}".format(actual_a.detach().cpu().numpy()), ] ), ) @pytest.mark.parametrize( "enumerate1,num_steps,expand", [ ("sequential", 2, True), ("sequential", 2, False), ("sequential", 3, True), ("sequential", 3, False), ("parallel", 2, True), ("parallel", 2, False), ("parallel", 3, True), ("parallel", 3, False), ("parallel", 10, False), ("parallel", 20, False), _skip_cuda("parallel", 30, False), ], ) def test_elbo_hmm_in_model(enumerate1, num_steps, expand): pyro.clear_param_store() data = torch.ones(num_steps) init_probs = torch.tensor([0.5, 0.5]) def model(data): transition_probs = pyro.param( "transition_probs", torch.tensor([[0.9, 0.1], [0.1, 0.9]]), constraint=constraints.simplex, ) locs = pyro.param("obs_locs", torch.tensor([-1.0, 1.0])) scale = pyro.param( "obs_scale", torch.tensor(1.0), constraint=constraints.positive ) x = None for i, y in pyro.markov(enumerate(data)): probs = init_probs if x is None else transition_probs[x] x = pyro.sample("x_{}".format(i), dist.Categorical(probs)) pyro.sample("y_{}".format(i), dist.Normal(locs[x], scale), obs=y) @config_enumerate(default=enumerate1, expand=expand) def guide(data): mean_field_probs = pyro.param( "mean_field_probs", torch.ones(num_steps, 2) / 2, constraint=constraints.simplex, ) for i in pyro.markov(range(num_steps)): pyro.sample("x_{}".format(i), dist.Categorical(mean_field_probs[i])) elbo = TraceEnum_ELBO() elbo.loss_and_grads(model, guide, data) expected_unconstrained_grads = { "transition_probs": torch.tensor([[0.2, -0.2], [-0.2, 0.2]]) * (num_steps - 1), "obs_locs": torch.tensor([-num_steps, 0]), "obs_scale": torch.tensor(-num_steps), "mean_field_probs": torch.tensor([[0.5, -0.5]] * num_steps), } for name, value in pyro.get_param_store().named_parameters(): actual = value.grad expected = expected_unconstrained_grads[name] assert_equal( actual, expected, msg="".join( [ "\nexpected {}.grad = {}".format(name, expected.cpu().numpy()), "\n actual {}.grad = {}".format( name, actual.detach().cpu().numpy() ), ] ), ) @pytest.mark.parametrize( "enumerate1,num_steps,expand", [ ("sequential", 2, True), ("sequential", 2, False), ("sequential", 3, True), ("sequential", 3, False), ("parallel", 2, True), ("parallel", 2, False), ("parallel", 3, True), ("parallel", 3, False), ("parallel", 10, False), ("parallel", 20, False), _skip_cuda("parallel", 30, False), _skip_cuda("parallel", 40, False), _skip_cuda("parallel", 50, False), ], ) def test_elbo_hmm_in_guide(enumerate1, num_steps, expand): pyro.clear_param_store() data = torch.ones(num_steps) init_probs = torch.tensor([0.5, 0.5]) def model(data): transition_probs = pyro.param( "transition_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) emission_probs = pyro.param( "emission_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) x = None for i, y in pyro.markov(enumerate(data)): probs = init_probs if x is None else transition_probs[x] x = pyro.sample("x_{}".format(i), dist.Categorical(probs)) pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y) @config_enumerate(default=enumerate1, expand=expand) def guide(data): transition_probs = pyro.param( "transition_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) x = None for i, y in pyro.markov(enumerate(data)): probs = init_probs if x is None else transition_probs[x] x = pyro.sample("x_{}".format(i), dist.Categorical(probs)) elbo = TraceEnum_ELBO() elbo.loss_and_grads(model, guide, data) # These golden values simply test agreement between parallel and sequential. expected_grads = { 2: { "transition_probs": [[0.1029949, -0.1029949], [0.1029949, -0.1029949]], "emission_probs": [[0.75, -0.75], [0.25, -0.25]], }, 3: { "transition_probs": [[0.25748726, -0.25748726], [0.25748726, -0.25748726]], "emission_probs": [[1.125, -1.125], [0.375, -0.375]], }, 10: { "transition_probs": [[1.64832076, -1.64832076], [1.64832076, -1.64832076]], "emission_probs": [[3.75, -3.75], [1.25, -1.25]], }, 20: { "transition_probs": [[3.70781687, -3.70781687], [3.70781687, -3.70781687]], "emission_probs": [[7.5, -7.5], [2.5, -2.5]], }, 22: { "transition_probs": [[4.11979618, -4.11979618], [4.11979618, -4.11979618]], "emission_probs": [[8.25, -8.25], [2.75, -2.75]], }, 30: { "transition_probs": [[5.76771452, -5.76771452], [5.76771452, -5.76771452]], "emission_probs": [[11.25, -11.25], [3.75, -3.75]], }, } if num_steps not in expected_grads: return for name, value in pyro.get_param_store().named_parameters(): actual = value.grad expected = torch.tensor(expected_grads[num_steps][name]) assert_equal( actual, expected, msg="".join( [ "\nexpected {}.grad = {}".format(name, expected.cpu().numpy()), "\n actual {}.grad = {}".format( name, actual.detach().cpu().numpy() ), ] ), ) @pytest.mark.parametrize("num_steps", [2, 3, 4, 5, 10, 20, _skip_cuda(30)]) def test_hmm_enumerate_model(num_steps): data = dist.Categorical(torch.tensor([0.5, 0.5])).sample((num_steps,)) @config_enumerate def model(data): transition_probs = pyro.param( "transition_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) emission_probs = pyro.param( "emission_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) x = 0 for t, y in pyro.markov(enumerate(data)): x = pyro.sample("x_{}".format(t), dist.Categorical(transition_probs[x])) pyro.sample("y_{}".format(t), dist.Categorical(emission_probs[x]), obs=y) logger.debug("{}\t{}".format(t, tuple(x.shape))) def guide(data): pass elbo = TraceEnum_ELBO() elbo.differentiable_loss(model, guide, data) @pytest.mark.parametrize("num_steps", [2, 3, 4, 5, 10, 20, _skip_cuda(30)]) def test_hmm_enumerate_model_and_guide(num_steps): data = dist.Categorical(torch.tensor([0.5, 0.5])).sample((num_steps,)) def model(data): transition_probs = pyro.param( "transition_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) emission_probs = pyro.param( "emission_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5]))) logger.debug("-1\t{}".format(tuple(x.shape))) for t, y in pyro.markov(enumerate(data)): x = pyro.sample( "x_{}".format(t), dist.Categorical(transition_probs[x]), infer={"enumerate": "parallel"}, ) pyro.sample("y_{}".format(t), dist.Categorical(emission_probs[x]), obs=y) logger.debug("{}\t{}".format(t, tuple(x.shape))) def guide(data): init_probs = pyro.param( "init_probs", torch.tensor([0.75, 0.25]), constraint=constraints.simplex ) pyro.sample("x", dist.Categorical(init_probs), infer={"enumerate": "parallel"}) elbo = TraceEnum_ELBO() elbo.differentiable_loss(model, guide, data) def _check_loss_and_grads(expected_loss, actual_loss): assert_equal( actual_loss, expected_loss, msg="Expected:\n{}\nActual:\n{}".format( expected_loss.detach().cpu().numpy(), actual_loss.detach().cpu().numpy() ), ) names = pyro.get_param_store().keys() params = [pyro.param(name).unconstrained() for name in names] actual_grads = grad(actual_loss, params, allow_unused=True, retain_graph=True) expected_grads = grad(expected_loss, params, allow_unused=True, retain_graph=True) for name, actual_grad, expected_grad in zip(names, actual_grads, expected_grads): if actual_grad is None or expected_grad is None: continue assert_equal( actual_grad, expected_grad, msg="{}\nExpected:\n{}\nActual:\n{}".format( name, expected_grad.detach().cpu().numpy(), actual_grad.detach().cpu().numpy(), ), ) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_1(scale): pyro.param( "guide_probs_x", torch.tensor([0.1, 0.9]), constraint=constraints.simplex ) pyro.param( "model_probs_x", torch.tensor([0.4, 0.6]), constraint=constraints.simplex ) pyro.param( "model_probs_y", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "model_probs_z", torch.tensor([0.3, 0.7]), constraint=constraints.simplex ) @poutine.scale(scale=scale) def auto_model(): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") x = pyro.sample("x", dist.Categorical(probs_x)) pyro.sample("y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"}) pyro.sample("z", dist.Categorical(probs_z), obs=torch.tensor(0)) @poutine.scale(scale=scale) def hand_model(): probs_x = pyro.param("model_probs_x") probs_z = pyro.param("model_probs_z") pyro.sample("x", dist.Categorical(probs_x)) pyro.sample("z", dist.Categorical(probs_z), obs=torch.tensor(0)) @config_enumerate @poutine.scale(scale=scale) def guide(): probs_x = pyro.param("guide_probs_x") pyro.sample("x", dist.Categorical(probs_x)) elbo = TraceEnum_ELBO(strict_enumeration_warning=False) auto_loss = elbo.differentiable_loss(auto_model, guide) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_2(scale): pyro.param( "guide_probs_x", torch.tensor([0.1, 0.9]), constraint=constraints.simplex ) pyro.param( "model_probs_x", torch.tensor([0.4, 0.6]), constraint=constraints.simplex ) pyro.param( "model_probs_y", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "model_probs_z", torch.tensor([[0.3, 0.7], [0.2, 0.8]]), constraint=constraints.simplex, ) @poutine.scale(scale=scale) def auto_model(): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") x = pyro.sample("x", dist.Categorical(probs_x)) y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"} ) pyro.sample("z", dist.Categorical(probs_z[y]), obs=torch.tensor(0)) @poutine.scale(scale=scale) def hand_model(): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") probs_yz = probs_y.mm(probs_z) x = pyro.sample("x", dist.Categorical(probs_x)) pyro.sample("z", dist.Categorical(probs_yz[x]), obs=torch.tensor(0)) @config_enumerate @poutine.scale(scale=scale) def guide(): probs_x = pyro.param("guide_probs_x") pyro.sample("x", dist.Categorical(probs_x)) elbo = TraceEnum_ELBO(strict_enumeration_warning=False) auto_loss = elbo.differentiable_loss(auto_model, guide) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_3(scale): pyro.param( "guide_probs_x", torch.tensor([0.1, 0.9]), constraint=constraints.simplex ) pyro.param( "model_probs_x", torch.tensor([0.4, 0.6]), constraint=constraints.simplex ) pyro.param( "model_probs_y", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "model_probs_z", torch.tensor([[0.3, 0.7], [0.2, 0.8]]), constraint=constraints.simplex, ) def auto_model(): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") x = pyro.sample("x", dist.Categorical(probs_x)) with poutine.scale(scale=scale): y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"} ) pyro.sample("z", dist.Categorical(probs_z[y]), obs=torch.tensor(0)) def hand_model(): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") probs_yz = probs_y.mm(probs_z) x = pyro.sample("x", dist.Categorical(probs_x)) with poutine.scale(scale=scale): pyro.sample("z", dist.Categorical(probs_yz[x]), obs=torch.tensor(0)) @config_enumerate def guide(): probs_x = pyro.param("guide_probs_x") pyro.sample("x", dist.Categorical(probs_x)) elbo = TraceEnum_ELBO(strict_enumeration_warning=False) auto_loss = elbo.differentiable_loss(auto_model, guide) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) @pytest.mark.parametrize( "num_samples,num_masked", [(1, 1), (2, 2), (3, 2)], ids=["single", "batch", "masked"], ) def test_elbo_enumerate_plate_1(num_samples, num_masked, scale): # +---------+ # x ----> y ----> z | # | N | # +---------+ pyro.param( "guide_probs_x", torch.tensor([0.1, 0.9]), constraint=constraints.simplex ) pyro.param( "model_probs_x", torch.tensor([0.4, 0.6]), constraint=constraints.simplex ) pyro.param( "model_probs_y", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "model_probs_z", torch.tensor([[0.3, 0.7], [0.2, 0.8]]), constraint=constraints.simplex, ) def auto_model(data): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") x = pyro.sample("x", dist.Categorical(probs_x)) with poutine.scale(scale=scale): y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"} ) if num_masked == num_samples: with pyro.plate("data", len(data)): pyro.sample("z", dist.Categorical(probs_z[y]), obs=data) else: with pyro.plate("data", len(data)): with poutine.mask(mask=torch.arange(num_samples) < num_masked): pyro.sample("z", dist.Categorical(probs_z[y]), obs=data) def hand_model(data): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") x = pyro.sample("x", dist.Categorical(probs_x)) with poutine.scale(scale=scale): y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"} ) for i in pyro.plate("data", num_masked): pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i]) @config_enumerate def guide(data): probs_x = pyro.param("guide_probs_x") pyro.sample("x", dist.Categorical(probs_x)) data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,)) elbo = TraceEnum_ELBO(max_plate_nesting=1) auto_loss = elbo.differentiable_loss(auto_model, guide, data) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, guide, data) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) @pytest.mark.parametrize( "num_samples,num_masked", [(1, 1), (2, 2), (3, 2)], ids=["single", "batch", "masked"], ) def test_elbo_enumerate_plate_2(num_samples, num_masked, scale): # +-----------------+ # x ----> y ----> z | # | N | # +-----------------+ pyro.param( "guide_probs_x", torch.tensor([0.1, 0.9]), constraint=constraints.simplex ) pyro.param( "model_probs_x", torch.tensor([0.4, 0.6]), constraint=constraints.simplex ) pyro.param( "model_probs_y", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "model_probs_z", torch.tensor([[0.3, 0.7], [0.2, 0.8]]), constraint=constraints.simplex, ) def auto_model(data): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") x = pyro.sample("x", dist.Categorical(probs_x)) with poutine.scale(scale=scale): with pyro.plate("data", len(data)): if num_masked == num_samples: y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"}, ) pyro.sample("z", dist.Categorical(probs_z[y]), obs=data) else: with poutine.mask(mask=torch.arange(num_samples) < num_masked): y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"}, ) pyro.sample("z", dist.Categorical(probs_z[y]), obs=data) def hand_model(data): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") x = pyro.sample("x", dist.Categorical(probs_x)) with poutine.scale(scale=scale): for i in pyro.plate("data", num_masked): y = pyro.sample( "y_{}".format(i), dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"}, ) pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i]) @config_enumerate def guide(data): probs_x = pyro.param("guide_probs_x") pyro.sample("x", dist.Categorical(probs_x)) data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,)) elbo = TraceEnum_ELBO(max_plate_nesting=1) auto_loss = elbo.differentiable_loss(auto_model, guide, data) hand_loss = elbo.differentiable_loss(hand_model, guide, data) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) @pytest.mark.parametrize( "num_samples,num_masked", [(1, 1), (2, 2), (3, 2)], ids=["single", "batch", "masked"], ) def test_elbo_enumerate_plate_3(num_samples, num_masked, scale): # +-----------------------+ # | x ----> y ----> z | # | N | # +-----------------------+ # This plate should remain unreduced since all enumeration is in a single plate. pyro.param( "guide_probs_x", torch.tensor([0.1, 0.9]), constraint=constraints.simplex ) pyro.param( "model_probs_x", torch.tensor([0.4, 0.6]), constraint=constraints.simplex ) pyro.param( "model_probs_y", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "model_probs_z", torch.tensor([[0.3, 0.7], [0.2, 0.8]]), constraint=constraints.simplex, ) @poutine.scale(scale=scale) def auto_model(data): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") with pyro.plate("data", len(data)): if num_masked == num_samples: x = pyro.sample("x", dist.Categorical(probs_x)) y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"} ) pyro.sample("z", dist.Categorical(probs_z[y]), obs=data) else: with poutine.mask(mask=torch.arange(num_samples) < num_masked): x = pyro.sample("x", dist.Categorical(probs_x)) y = pyro.sample( "y", dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"}, ) pyro.sample("z", dist.Categorical(probs_z[y]), obs=data) @poutine.scale(scale=scale) @config_enumerate def auto_guide(data): probs_x = pyro.param("guide_probs_x") with pyro.plate("data", len(data)): if num_masked == num_samples: pyro.sample("x", dist.Categorical(probs_x)) else: with poutine.mask(mask=torch.arange(num_samples) < num_masked): pyro.sample("x", dist.Categorical(probs_x)) @poutine.scale(scale=scale) def hand_model(data): probs_x = pyro.param("model_probs_x") probs_y = pyro.param("model_probs_y") probs_z = pyro.param("model_probs_z") for i in pyro.plate("data", num_masked): x = pyro.sample("x_{}".format(i), dist.Categorical(probs_x)) y = pyro.sample( "y_{}".format(i), dist.Categorical(probs_y[x]), infer={"enumerate": "parallel"}, ) pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i]) @poutine.scale(scale=scale) @config_enumerate def hand_guide(data): probs_x = pyro.param("guide_probs_x") for i in pyro.plate("data", num_masked): pyro.sample("x_{}".format(i), dist.Categorical(probs_x)) data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,)) elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False) auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data) hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) @pytest.mark.parametrize( "outer_obs,inner_obs", [(False, True), (True, False), (True, True)] ) def test_elbo_enumerate_plate_4(outer_obs, inner_obs, scale): # a ---> outer_obs # \ # +-----\------------------+ # | \ | # | b ---> inner_obs N=2 | # +------------------------+ # This tests two different observations, one outside and one inside an plate. pyro.param("probs_a", torch.tensor([0.4, 0.6]), constraint=constraints.simplex) pyro.param("probs_b", torch.tensor([0.6, 0.4]), constraint=constraints.simplex) pyro.param("locs", torch.tensor([-1.0, 1.0])) pyro.param("scales", torch.tensor([1.0, 2.0]), constraint=constraints.positive) outer_data = torch.tensor(2.0) inner_data = torch.tensor([0.5, 1.5]) @poutine.scale(scale=scale) def auto_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") locs = pyro.param("locs") scales = pyro.param("scales") a = pyro.sample("a", dist.Categorical(probs_a), infer={"enumerate": "parallel"}) if outer_obs: pyro.sample("outer_obs", dist.Normal(0.0, scales[a]), obs=outer_data) with pyro.plate("inner", 2): b = pyro.sample( "b", dist.Categorical(probs_b), infer={"enumerate": "parallel"} ) if inner_obs: pyro.sample( "inner_obs", dist.Normal(locs[b], scales[a]), obs=inner_data ) @poutine.scale(scale=scale) def hand_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") locs = pyro.param("locs") scales = pyro.param("scales") a = pyro.sample("a", dist.Categorical(probs_a), infer={"enumerate": "parallel"}) if outer_obs: pyro.sample("outer_obs", dist.Normal(0.0, scales[a]), obs=outer_data) for i in pyro.plate("inner", 2): b = pyro.sample( "b_{}".format(i), dist.Categorical(probs_b), infer={"enumerate": "parallel"}, ) if inner_obs: pyro.sample( "inner_obs_{}".format(i), dist.Normal(locs[b], scales[a]), obs=inner_data[i], ) def guide(): pass elbo = TraceEnum_ELBO(max_plate_nesting=1) auto_loss = elbo.differentiable_loss(auto_model, guide) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) def test_elbo_enumerate_plate_5(): # Guide Model # a # +---------------|--+ # | M=2 V | # | b ----> c | # +------------------+ pyro.param( "model_probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex ) pyro.param( "model_probs_b", torch.tensor([0.6, 0.4]), constraint=constraints.simplex ) pyro.param( "model_probs_c", torch.tensor( [[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]], [[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]] ), constraint=constraints.simplex, ) pyro.param( "guide_probs_b", torch.tensor([0.8, 0.2]), constraint=constraints.simplex ) data = torch.tensor([1, 2]) @config_enumerate def model_plate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("b_axis", 2): b = pyro.sample("b", dist.Categorical(probs_b)) pyro.sample("c", dist.Categorical(Vindex(probs_c)[a, b]), obs=data) @config_enumerate def guide_plate(): probs_b = pyro.param("guide_probs_b") with pyro.plate("b_axis", 2): pyro.sample("b", dist.Categorical(probs_b)) @config_enumerate def model_iplate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) for i in pyro.plate("b_axis", 2): b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b)) pyro.sample( "c_{}".format(i), dist.Categorical(Vindex(probs_c)[a, b]), obs=data[i] ) @config_enumerate def guide_iplate(): probs_b = pyro.param("guide_probs_b") for i in pyro.plate("b_axis", 2): pyro.sample("b_{}".format(i), dist.Categorical(probs_b)) elbo = TraceEnum_ELBO(max_plate_nesting=0) expected_loss = elbo.differentiable_loss(model_iplate, guide_iplate) elbo = TraceEnum_ELBO(max_plate_nesting=1) with pytest.raises( ValueError, match="Expected model enumeration to be no more global than guide" ): actual_loss = elbo.differentiable_loss(model_plate, guide_plate) # This never gets run because we don't support this yet. _check_loss_and_grads(expected_loss, actual_loss) @pytest.mark.parametrize("enumerate1", ["parallel", "sequential"]) def test_elbo_enumerate_plate_6(enumerate1): # Guide Model # +-------+ # b ----> c <---- a # | M=2 | # +-------+ # This tests that sequential enumeration over b works, even though # model-side enumeration moves c into b's plate via contraction. pyro.param( "model_probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex ) pyro.param( "model_probs_b", torch.tensor([0.6, 0.4]), constraint=constraints.simplex ) pyro.param( "model_probs_c", torch.tensor( [[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]], [[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]] ), constraint=constraints.simplex, ) pyro.param( "guide_probs_b", torch.tensor([0.8, 0.2]), constraint=constraints.simplex ) data = torch.tensor([1, 2]) @config_enumerate def model_plate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) b = pyro.sample("b", dist.Categorical(probs_b)) with pyro.plate("b_axis", 2): pyro.sample("c", dist.Categorical(Vindex(probs_c)[a, b]), obs=data) @config_enumerate def model_iplate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) b = pyro.sample("b", dist.Categorical(probs_b)) for i in pyro.plate("b_axis", 2): pyro.sample( "c_{}".format(i), dist.Categorical(Vindex(probs_c)[a, b]), obs=data[i] ) @config_enumerate(default=enumerate1) def guide(): probs_b = pyro.param("guide_probs_b") pyro.sample("b", dist.Categorical(probs_b)) elbo = TraceEnum_ELBO(max_plate_nesting=0) expected_loss = elbo.differentiable_loss(model_iplate, guide) elbo = TraceEnum_ELBO(max_plate_nesting=1) actual_loss = elbo.differentiable_loss(model_plate, guide) _check_loss_and_grads(expected_loss, actual_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plate_7(scale): # Guide Model # a -----> b # | | # +-|--------|----------------+ # | V V | # | c -----> d -----> e N=2 | # +---------------------------+ # This tests a mixture of model and guide enumeration. pyro.param( "model_probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex ) pyro.param( "model_probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) pyro.param( "model_probs_c", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "model_probs_d", torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.3, 0.7], [0.2, 0.8]]]), constraint=constraints.simplex, ) pyro.param( "model_probs_e", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "guide_probs_a", torch.tensor([0.35, 0.64]), constraint=constraints.simplex ) pyro.param( "guide_probs_c", torch.tensor([[0.0, 1.0], [1.0, 0.0]]), # deterministic constraint=constraints.simplex, ) @poutine.scale(scale=scale) def auto_model(data): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") probs_d = pyro.param("model_probs_d") probs_e = pyro.param("model_probs_e") a = pyro.sample("a", dist.Categorical(probs_a)) b = pyro.sample( "b", dist.Categorical(probs_b[a]), infer={"enumerate": "parallel"} ) with pyro.plate("data", 2): c = pyro.sample("c", dist.Categorical(probs_c[a])) d = pyro.sample( "d", dist.Categorical(Vindex(probs_d)[b, c]), infer={"enumerate": "parallel"}, ) pyro.sample("obs", dist.Categorical(probs_e[d]), obs=data) @poutine.scale(scale=scale) def auto_guide(data): probs_a = pyro.param("guide_probs_a") probs_c = pyro.param("guide_probs_c") a = pyro.sample("a", dist.Categorical(probs_a), infer={"enumerate": "parallel"}) with pyro.plate("data", 2): pyro.sample("c", dist.Categorical(probs_c[a])) @poutine.scale(scale=scale) def hand_model(data): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") probs_d = pyro.param("model_probs_d") probs_e = pyro.param("model_probs_e") a = pyro.sample("a", dist.Categorical(probs_a)) b = pyro.sample( "b", dist.Categorical(probs_b[a]), infer={"enumerate": "parallel"} ) for i in pyro.plate("data", 2): c = pyro.sample("c_{}".format(i), dist.Categorical(probs_c[a])) d = pyro.sample( "d_{}".format(i), dist.Categorical(Vindex(probs_d)[b, c]), infer={"enumerate": "parallel"}, ) pyro.sample("obs_{}".format(i), dist.Categorical(probs_e[d]), obs=data[i]) @poutine.scale(scale=scale) def hand_guide(data): probs_a = pyro.param("guide_probs_a") probs_c = pyro.param("guide_probs_c") a = pyro.sample("a", dist.Categorical(probs_a), infer={"enumerate": "parallel"}) for i in pyro.plate("data", 2): pyro.sample("c_{}".format(i), dist.Categorical(probs_c[a])) data = torch.tensor([0, 0]) elbo = TraceEnum_ELBO(max_plate_nesting=1) auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plates_1(scale): # +-----------------+ # | a ----> b M=2 | # +-----------------+ # +-----------------+ # | c ----> d N=3 | # +-----------------+ # This tests two unrelated plates. # Each should remain uncontracted. pyro.param("probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex) pyro.param( "probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) pyro.param("probs_c", torch.tensor([0.75, 0.25]), constraint=constraints.simplex) pyro.param( "probs_d", torch.tensor([[0.4, 0.6], [0.3, 0.7]]), constraint=constraints.simplex, ) b_data = torch.tensor([0, 1]) d_data = torch.tensor([0, 0, 1]) @config_enumerate @poutine.scale(scale=scale) def auto_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") with pyro.plate("a_axis", 2): a = pyro.sample("a", dist.Categorical(probs_a)) pyro.sample("b", dist.Categorical(probs_b[a]), obs=b_data) with pyro.plate("c_axis", 3): c = pyro.sample("c", dist.Categorical(probs_c)) pyro.sample("d", dist.Categorical(probs_d[c]), obs=d_data) @config_enumerate @poutine.scale(scale=scale) def hand_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") for i in pyro.plate("a_axis", 2): a = pyro.sample("a_{}".format(i), dist.Categorical(probs_a)) pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]), obs=b_data[i]) for j in pyro.plate("c_axis", 3): c = pyro.sample("c_{}".format(j), dist.Categorical(probs_c)) pyro.sample("d_{}".format(j), dist.Categorical(probs_d[c]), obs=d_data[j]) def guide(): pass elbo = TraceEnum_ELBO(max_plate_nesting=1) auto_loss = elbo.differentiable_loss(auto_model, guide) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plates_2(scale): # +---------+ +---------+ # | b <---- a ----> c | # | M=2 | | N=3 | # +---------+ +---------+ # This tests two different plates with recycled dimension. pyro.param("probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex) pyro.param( "probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) pyro.param( "probs_c", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) b_data = torch.tensor([0, 1]) c_data = torch.tensor([0, 0, 1]) @config_enumerate @poutine.scale(scale=scale) def auto_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("b_axis", 2): pyro.sample("b", dist.Categorical(probs_b[a]), obs=b_data) with pyro.plate("c_axis", 3): pyro.sample("c", dist.Categorical(probs_c[a]), obs=c_data) @config_enumerate @poutine.scale(scale=scale) def hand_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) for i in pyro.plate("b_axis", 2): pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]), obs=b_data[i]) for j in pyro.plate("c_axis", 3): pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a]), obs=c_data[j]) def guide(): pass elbo = TraceEnum_ELBO(max_plate_nesting=1) auto_loss = elbo.differentiable_loss(auto_model, guide) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plates_3(scale): # +--------------------+ # | +----------+ | # a -------> b | | # | | N=2 | | # | +----------+ M=2 | # +--------------------+ # This is tests the case of multiple plate contractions in # a single step. pyro.param("probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex) pyro.param( "probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) data = torch.tensor([[0, 1], [0, 0]]) @config_enumerate @poutine.scale(scale=scale) def auto_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("outer", 2): with pyro.plate("inner", 2): pyro.sample("b", dist.Categorical(probs_b[a]), obs=data) @config_enumerate @poutine.scale(scale=scale) def hand_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") inner = pyro.plate("inner", 2) a = pyro.sample("a", dist.Categorical(probs_a)) for i in pyro.plate("outer", 2): for j in inner: pyro.sample( "b_{}_{}".format(i, j), dist.Categorical(probs_b[a]), obs=data[i, j] ) def guide(): pass elbo = TraceEnum_ELBO(max_plate_nesting=2) auto_loss = elbo.differentiable_loss(auto_model, guide) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plates_4(scale): # +--------------------+ # | +----------+ | # a ----> b ----> c | | # | | N=2 | | # | M=2 +----------+ | # +--------------------+ pyro.param("probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex) pyro.param( "probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) pyro.param( "probs_c", torch.tensor([[0.4, 0.6], [0.3, 0.7]]), constraint=constraints.simplex, ) @config_enumerate @poutine.scale(scale=scale) def auto_model(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("outer", 2): b = pyro.sample("b", dist.Categorical(probs_b[a])) with pyro.plate("inner", 2): pyro.sample("c", dist.Categorical(probs_c[b]), obs=data) @config_enumerate @poutine.scale(scale=scale) def hand_model(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") inner = pyro.plate("inner", 2) a = pyro.sample("a", dist.Categorical(probs_a)) for i in pyro.plate("outer", 2): b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for j in inner: pyro.sample( "c_{}_{}".format(i, j), dist.Categorical(probs_c[b]), obs=data[i, j] ) def guide(data): pass data = torch.tensor([[0, 1], [0, 0]]) elbo = TraceEnum_ELBO(max_plate_nesting=2) auto_loss = elbo.differentiable_loss(auto_model, guide, data) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, guide, data) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plates_5(scale): # a # | \ # +--|---\------------+ # | V +-\--------+ | # | b ----> c | | # | | N=2 | | # | M=2 +----------+ | # +-------------------+ pyro.param("probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex) pyro.param( "probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) pyro.param( "probs_c", torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.2, 0.8], [0.1, 0.9]]]), constraint=constraints.simplex, ) data = torch.tensor([[0, 1], [0, 0]]) @config_enumerate @poutine.scale(scale=scale) def auto_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("outer", 2): b = pyro.sample("b", dist.Categorical(probs_b[a])) with pyro.plate("inner", 2): pyro.sample("c", dist.Categorical(Vindex(probs_c)[a, b]), obs=data) @config_enumerate @poutine.scale(scale=scale) def hand_model(): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") inner = pyro.plate("inner", 2) a = pyro.sample("a", dist.Categorical(probs_a)) for i in pyro.plate("outer", 2): b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for j in inner: pyro.sample( "c_{}_{}".format(i, j), dist.Categorical(Vindex(probs_c)[a, b]), obs=data[i, j], ) def guide(): pass elbo = TraceEnum_ELBO(max_plate_nesting=2) auto_loss = elbo.differentiable_loss(auto_model, guide) elbo = TraceEnum_ELBO(max_plate_nesting=0) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plates_6(scale): # +----------+ # | M=2 | # a ----> b | # | | | | # +--|-------|--+ | # | V | V | | # | c ----> d | | # | | | | # | N=2 +------|---+ # +-------------+ # This tests different ways of mixing two independence contexts, # where each can be either sequential or vectorized plate. pyro.param("probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex) pyro.param( "probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) pyro.param( "probs_c", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "probs_d", torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.3, 0.7], [0.2, 0.8]]]), constraint=constraints.simplex, ) @config_enumerate @poutine.scale(scale=scale) def model_iplate_iplate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") b_axis = pyro.plate("b_axis", 2) c_axis = pyro.plate("c_axis", 2) a = pyro.sample("a", dist.Categorical(probs_a)) b = [ pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis ] c = [ pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis ] for i in b_axis: for j in c_axis: pyro.sample( "d_{}_{}".format(i, j), dist.Categorical(Vindex(probs_d)[b[i], c[j]]), obs=data[i, j], ) @config_enumerate @poutine.scale(scale=scale) def model_iplate_plate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") b_axis = pyro.plate("b_axis", 2) c_axis = pyro.plate("c_axis", 2) a = pyro.sample("a", dist.Categorical(probs_a)) b = [ pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis ] with c_axis: c = pyro.sample("c", dist.Categorical(probs_c[a])) for i in b_axis: with c_axis: pyro.sample( "d_{}".format(i), dist.Categorical(Vindex(probs_d)[b[i], c]), obs=data[i], ) @config_enumerate @poutine.scale(scale=scale) def model_plate_iplate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") b_axis = pyro.plate("b_axis", 2) c_axis = pyro.plate("c_axis", 2) a = pyro.sample("a", dist.Categorical(probs_a)) with b_axis: b = pyro.sample("b", dist.Categorical(probs_b[a])) c = [ pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis ] with b_axis: for j in c_axis: pyro.sample( "d_{}".format(j), dist.Categorical(Vindex(probs_d)[b, c[j]]), obs=data[:, j], ) @config_enumerate @poutine.scale(scale=scale) def model_plate_plate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") b_axis = pyro.plate("b_axis", 2, dim=-1) c_axis = pyro.plate("c_axis", 2, dim=-2) a = pyro.sample("a", dist.Categorical(probs_a)) with b_axis: b = pyro.sample("b", dist.Categorical(probs_b[a])) with c_axis: c = pyro.sample("c", dist.Categorical(probs_c[a])) with b_axis, c_axis: pyro.sample("d", dist.Categorical(Vindex(probs_d)[b, c]), obs=data) def guide(data): pass # Check that either one of the sequential plates can be promoted to be vectorized. data = torch.tensor([[0, 1], [0, 0]]) elbo = TraceEnum_ELBO(max_plate_nesting=0) loss_iplate_iplate = elbo.differentiable_loss(model_iplate_iplate, guide, data) elbo = TraceEnum_ELBO(max_plate_nesting=1) loss_plate_iplate = elbo.differentiable_loss(model_plate_iplate, guide, data) loss_iplate_plate = elbo.differentiable_loss(model_iplate_plate, guide, data) _check_loss_and_grads(loss_iplate_iplate, loss_plate_iplate) _check_loss_and_grads(loss_iplate_iplate, loss_iplate_plate) # But promoting both to plates should result in an error. elbo = TraceEnum_ELBO(max_plate_nesting=2) with pytest.raises( NotImplementedError, match="Expected tree-structured plate nesting.*" ): elbo.differentiable_loss(model_plate_plate, guide, data) @pytest.mark.parametrize("scale", [1, 10]) def test_elbo_enumerate_plates_7(scale): # +-------------+ # | N=2 | # a -------> c | # | | | | # +--|----------|--+ | # | | | V | | # | V | e | | # | b ----> d | | # | | | | # | M=2 +---------|---+ # +----------------+ # This tests tree-structured dependencies among variables but # non-tree dependencies among plate nestings. pyro.param("probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex) pyro.param( "probs_b", torch.tensor([[0.6, 0.4], [0.4, 0.6]]), constraint=constraints.simplex, ) pyro.param( "probs_c", torch.tensor([[0.75, 0.25], [0.55, 0.45]]), constraint=constraints.simplex, ) pyro.param( "probs_d", torch.tensor([[0.3, 0.7], [0.2, 0.8]]), constraint=constraints.simplex, ) pyro.param( "probs_e", torch.tensor([[0.4, 0.6], [0.3, 0.7]]), constraint=constraints.simplex, ) @config_enumerate @poutine.scale(scale=scale) def model_iplate_iplate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") probs_e = pyro.param("probs_e") b_axis = pyro.plate("b_axis", 2) c_axis = pyro.plate("c_axis", 2) a = pyro.sample("a", dist.Categorical(probs_a)) b = [ pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis ] c = [ pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis ] for i in b_axis: for j in c_axis: pyro.sample( "d_{}_{}".format(i, j), dist.Categorical(probs_d[b[i]]), obs=data[i, j], ) pyro.sample( "e_{}_{}".format(i, j), dist.Categorical(probs_e[c[j]]), obs=data[i, j], ) @config_enumerate @poutine.scale(scale=scale) def model_iplate_plate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") probs_e = pyro.param("probs_e") b_axis = pyro.plate("b_axis", 2) c_axis = pyro.plate("c_axis", 2) a = pyro.sample("a", dist.Categorical(probs_a)) b = [ pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis ] with c_axis: c = pyro.sample("c", dist.Categorical(probs_c[a])) for i in b_axis: with c_axis: pyro.sample( "d_{}".format(i), dist.Categorical(probs_d[b[i]]), obs=data[i] ) pyro.sample("e_{}".format(i), dist.Categorical(probs_e[c]), obs=data[i]) @config_enumerate @poutine.scale(scale=scale) def model_plate_iplate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") probs_e = pyro.param("probs_e") b_axis = pyro.plate("b_axis", 2) c_axis = pyro.plate("c_axis", 2) a = pyro.sample("a", dist.Categorical(probs_a)) with b_axis: b = pyro.sample("b", dist.Categorical(probs_b[a])) c = [ pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis ] with b_axis: for j in c_axis: pyro.sample( "d_{}".format(j), dist.Categorical(probs_d[b]), obs=data[:, j] ) pyro.sample( "e_{}".format(j), dist.Categorical(probs_e[c[j]]), obs=data[:, j] ) @config_enumerate @poutine.scale(scale=scale) def model_plate_plate(data): probs_a = pyro.param("probs_a") probs_b = pyro.param("probs_b") probs_c = pyro.param("probs_c") probs_d = pyro.param("probs_d") probs_e = pyro.param("probs_e") b_axis = pyro.plate("b_axis", 2, dim=-1) c_axis = pyro.plate("c_axis", 2, dim=-2) a = pyro.sample("a", dist.Categorical(probs_a)) with b_axis: b = pyro.sample("b", dist.Categorical(probs_b[a])) with c_axis: c = pyro.sample("c", dist.Categorical(probs_c[a])) with b_axis, c_axis: pyro.sample("d", dist.Categorical(probs_d[b]), obs=data) pyro.sample("e", dist.Categorical(probs_e[c]), obs=data) def guide(data): pass # Check that any combination of sequential plates can be promoted to be vectorized. data = torch.tensor([[0, 1], [0, 0]]) elbo = TraceEnum_ELBO(max_plate_nesting=0) loss_iplate_iplate = elbo.differentiable_loss(model_iplate_iplate, guide, data) elbo = TraceEnum_ELBO(max_plate_nesting=1) loss_plate_iplate = elbo.differentiable_loss(model_plate_iplate, guide, data) loss_iplate_plate = elbo.differentiable_loss(model_iplate_plate, guide, data) elbo = TraceEnum_ELBO(max_plate_nesting=2) loss_plate_plate = elbo.differentiable_loss(model_plate_plate, guide, data) _check_loss_and_grads(loss_iplate_iplate, loss_plate_iplate) _check_loss_and_grads(loss_iplate_iplate, loss_iplate_plate) _check_loss_and_grads(loss_iplate_iplate, loss_plate_plate) @pytest.mark.parametrize("guide_scale", [1]) @pytest.mark.parametrize("model_scale", [1]) @pytest.mark.parametrize( "outer_vectorized,inner_vectorized,xfail", [(False, True, False), (True, False, True), (True, True, True)], ids=["iplate-plate", "plate-iplate", "plate-plate"], ) def test_elbo_enumerate_plates_8( model_scale, guide_scale, inner_vectorized, outer_vectorized, xfail ): # Guide Model # a # +-----------|--------+ # | M=2 +---|------+ | # | | V N=2 | | # | b ----> c | | # | +----------+ | # +--------------------+ pyro.param( "model_probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex ) pyro.param( "model_probs_b", torch.tensor([0.6, 0.4]), constraint=constraints.simplex ) pyro.param( "model_probs_c", torch.tensor( [[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]], [[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]] ), constraint=constraints.simplex, ) pyro.param( "guide_probs_b", torch.tensor([0.8, 0.2]), constraint=constraints.simplex ) data = torch.tensor([[0, 1], [0, 2]]) @config_enumerate @poutine.scale(scale=model_scale) def model_plate_plate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("outer", 2): b = pyro.sample("b", dist.Categorical(probs_b)) with pyro.plate("inner", 2): pyro.sample("c", dist.Categorical(Vindex(probs_c)[a, b]), obs=data) @config_enumerate @poutine.scale(scale=model_scale) def model_iplate_plate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") inner = pyro.plate("inner", 2) a = pyro.sample("a", dist.Categorical(probs_a)) for i in pyro.plate("outer", 2): b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b)) with inner: pyro.sample( "c_{}".format(i), dist.Categorical(Vindex(probs_c)[a, b]), obs=data[:, i], ) @config_enumerate @poutine.scale(scale=model_scale) def model_plate_iplate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("outer", 2): b = pyro.sample("b", dist.Categorical(probs_b)) for j in pyro.plate("inner", 2): pyro.sample( "c_{}".format(j), dist.Categorical(Vindex(probs_c)[a, b]), obs=data[j], ) @config_enumerate @poutine.scale(scale=model_scale) def model_iplate_iplate(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") inner = pyro.plate("inner", 2) a = pyro.sample("a", dist.Categorical(probs_a)) for i in pyro.plate("outer", 2): b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b)) for j in inner: pyro.sample( "c_{}_{}".format(i, j), dist.Categorical(Vindex(probs_c)[a, b]), obs=data[j, i], ) @config_enumerate @poutine.scale(scale=guide_scale) def guide_plate(): probs_b = pyro.param("guide_probs_b") with pyro.plate("outer", 2): pyro.sample("b", dist.Categorical(probs_b)) @config_enumerate @poutine.scale(scale=guide_scale) def guide_iplate(): probs_b = pyro.param("guide_probs_b") for i in pyro.plate("outer", 2): pyro.sample("b_{}".format(i), dist.Categorical(probs_b)) elbo = TraceEnum_ELBO(max_plate_nesting=0) expected_loss = elbo.differentiable_loss(model_iplate_iplate, guide_iplate) with ExitStack() as stack: if xfail: stack.enter_context( pytest.raises( ValueError, match="Expected model enumeration to be no more global than guide", ) ) if inner_vectorized: if outer_vectorized: elbo = TraceEnum_ELBO(max_plate_nesting=2) actual_loss = elbo.differentiable_loss(model_plate_plate, guide_plate) else: elbo = TraceEnum_ELBO(max_plate_nesting=1) actual_loss = elbo.differentiable_loss(model_iplate_plate, guide_iplate) else: elbo = TraceEnum_ELBO(max_plate_nesting=1) actual_loss = elbo.differentiable_loss(model_plate_iplate, guide_plate) _check_loss_and_grads(expected_loss, actual_loss) def test_elbo_scale(): # Consider a mixture model with two components, toggled by `which`. def component_model(data, which, suffix=""): loc = pyro.param("locs", torch.tensor([-1.0, 1.0]))[which] with pyro.plate("data" + suffix, len(data)): pyro.sample("obs" + suffix, dist.Normal(loc, 1.0), obs=data) pyro.param( "mixture_probs", torch.tensor([0.25, 0.75]), constraint=constraints.simplex ) # We can implement this in two ways. # First consider automatic enumeration in the guide. def auto_model(data): mixture_probs = pyro.param("mixture_probs") which = pyro.sample("which", dist.Categorical(mixture_probs)) component_model(data, which) def auto_guide(data): mixture_probs = pyro.param("mixture_probs") pyro.sample( "which", dist.Categorical(mixture_probs), infer={"enumerate": "parallel"} ) # Second consider explicit enumeration in the model, where we # marginalize out the `which` variable by hand. def hand_model(data): mixture_probs = pyro.param("mixture_probs") for which in pyro.plate("which", len(mixture_probs)): with pyro.poutine.scale(scale=mixture_probs[which]): component_model(data, which, suffix="_{}".format(which)) def hand_guide(data): pass data = dist.Normal(0.0, 2.0).sample((3,)) elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False) auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data) hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data) _check_loss_and_grads(hand_loss, auto_loss) def test_elbo_hmm_growth(): pyro.clear_param_store() init_probs = torch.tensor([0.5, 0.5]) elbo = TraceEnum_ELBO(max_plate_nesting=0) def model(data): transition_probs = pyro.param( "transition_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) emission_probs = pyro.param( "emission_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) x = None for i, y in pyro.markov(enumerate(data)): probs = init_probs if x is None else transition_probs[x] x = pyro.sample("x_{}".format(i), dist.Categorical(probs)) pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y) @config_enumerate def guide(data): transition_probs = pyro.param( "transition_probs", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) x = None for i, y in pyro.markov(enumerate(data)): probs = init_probs if x is None else transition_probs[x] x = pyro.sample("x_{}".format(i), dist.Categorical(probs)) sizes = range(3, 1 + int(os.environ.get("GROWTH_SIZE", 15))) costs = [] times1 = [] times2 = [] for size in sizes: data = torch.ones(size) time0 = timeit.default_timer() elbo.loss_and_grads(model, guide, data) # compiles paths time1 = timeit.default_timer() elbo.loss_and_grads(model, guide, data) # reuses compiled path time2 = timeit.default_timer() times1.append(time1 - time0) times2.append(time2 - time1) costs.append(LAST_CACHE_SIZE[0]) collated_costs = defaultdict(list) for counts in costs: for key, cost in counts.items(): collated_costs[key].append(cost) logger.debug( "\n".join( [ "HMM Growth:", "sizes = {}".format(repr(sizes)), "costs = {}".format(repr(dict(collated_costs))), "times1 = {}".format(repr(times1)), "times2 = {}".format(repr(times2)), ] ) ) @pytest.mark.skipif( "CUDA_TEST" in os.environ, reason="https://github.com/pyro-ppl/pyro/issues/1380" ) def test_elbo_dbn_growth(): pyro.clear_param_store() elbo = TraceEnum_ELBO(max_plate_nesting=0) def model(data): uniform = torch.tensor([0.5, 0.5]) probs_z = pyro.param( "probs_z", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) for i, z in pyro.markov(enumerate(data)): pyro.sample("x_{}".format(i), dist.Categorical(uniform)) y = pyro.sample("y_{}".format(i), dist.Categorical(uniform)) pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=z) @config_enumerate def guide(data): probs_x = pyro.param( "probs_x", torch.tensor([[0.75, 0.25], [0.25, 0.75]]), constraint=constraints.simplex, ) probs_y = pyro.param( "probs_y", torch.tensor([[[0.75, 0.25], [0.45, 0.55]], [[0.55, 0.45], [0.25, 0.75]]]), constraint=constraints.simplex, ) x = 0 y = 0 for i in pyro.markov(range(len(data))): x = pyro.sample("x_{}".format(i), dist.Categorical(probs_x[x])) y = pyro.sample("y_{}".format(i), dist.Categorical(probs_y[x, y])) sizes = range(3, 1 + int(os.environ.get("GROWTH_SIZE", 15))) costs = [] times1 = [] times2 = [] for size in sizes: data = torch.ones(size) time0 = timeit.default_timer() elbo.loss_and_grads(model, guide, data) # compiles paths time1 = timeit.default_timer() elbo.loss_and_grads(model, guide, data) # reuses compiled path time2 = timeit.default_timer() times1.append(time1 - time0) times2.append(time2 - time1) costs.append(LAST_CACHE_SIZE[0]) collated_costs = defaultdict(list) for counts in costs: for key, cost in counts.items(): collated_costs[key].append(cost) logger.debug( "\n".join( [ "DBN Growth:", "sizes = {}".format(repr(sizes)), "costs = {}".format(repr(dict(collated_costs))), "times1 = {}".format(repr(times1)), "times2 = {}".format(repr(times2)), ] ) ) @pytest.mark.parametrize("pi_a", [0.33]) @pytest.mark.parametrize("pi_b", [0.51, 0.77]) @pytest.mark.parametrize("pi_c", [0.37]) @pytest.mark.parametrize("N_b", [3, 4]) @pytest.mark.parametrize("N_c", [5, 6]) @pytest.mark.parametrize("enumerate1", ["sequential", "parallel"]) @pytest.mark.parametrize("expand", [True, False]) def test_bernoulli_pyramid_elbo_gradient( enumerate1, N_b, N_c, pi_a, pi_b, pi_c, expand ): pyro.clear_param_store() def model(): a = pyro.sample("a", dist.Bernoulli(0.33)) with pyro.plate("b_plate", N_b): b = pyro.sample("b", dist.Bernoulli(0.25 * a + 0.50)) with pyro.plate("c_plate", N_c): pyro.sample("c", dist.Bernoulli(0.15 * a + 0.20 * b + 0.32)) def guide(): qa = pyro.param("qa", torch.tensor(pi_a, requires_grad=True)) qb = pyro.param("qb", torch.tensor(pi_b, requires_grad=True)) qc = pyro.param("qc", torch.tensor(pi_c, requires_grad=True)) pyro.sample("a", dist.Bernoulli(qa)) with pyro.plate("b_plate", N_b): pyro.sample("b", dist.Bernoulli(qb).expand_by([N_b])) with pyro.plate("c_plate", N_c): pyro.sample("c", dist.Bernoulli(qc).expand_by([N_c, N_b])) logger.info("Computing gradients using surrogate loss") elbo = TraceEnum_ELBO(max_plate_nesting=2, strict_enumeration_warning=True) elbo.loss_and_grads( model, config_enumerate(guide, default=enumerate1, expand=expand) ) actual_grad_qa = pyro.param("qa").grad actual_grad_qb = pyro.param("qb").grad actual_grad_qc = pyro.param("qc").grad logger.info("Computing analytic gradients") qa = torch.tensor(pi_a, requires_grad=True) qb = torch.tensor(pi_b, requires_grad=True) qc = torch.tensor(pi_c, requires_grad=True) elbo = kl_divergence(dist.Bernoulli(qa), dist.Bernoulli(0.33)) elbo = elbo + N_b * qa * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.75)) elbo = elbo + N_b * (1.0 - qa) * kl_divergence( dist.Bernoulli(qb), dist.Bernoulli(0.50) ) elbo = elbo + N_c * N_b * qa * qb * kl_divergence( dist.Bernoulli(qc), dist.Bernoulli(0.67) ) elbo = elbo + N_c * N_b * (1.0 - qa) * qb * kl_divergence( dist.Bernoulli(qc), dist.Bernoulli(0.52) ) elbo = elbo + N_c * N_b * qa * (1.0 - qb) * kl_divergence( dist.Bernoulli(qc), dist.Bernoulli(0.47) ) elbo = elbo + N_c * N_b * (1.0 - qa) * (1.0 - qb) * kl_divergence( dist.Bernoulli(qc), dist.Bernoulli(0.32) ) expected_grad_qa, expected_grad_qb, expected_grad_qc = grad(elbo, [qa, qb, qc]) prec = 0.001 assert_equal( actual_grad_qa, expected_grad_qa, prec=prec, msg="".join( [ "\nqa expected = {}".format(expected_grad_qa.data.cpu().numpy()), "\nqa actual = {}".format(actual_grad_qa.data.cpu().numpy()), ] ), ) assert_equal( actual_grad_qb, expected_grad_qb, prec=prec, msg="".join( [ "\nqb expected = {}".format(expected_grad_qb.data.cpu().numpy()), "\nqb actual = {}".format(actual_grad_qb.data.cpu().numpy()), ] ), ) assert_equal( actual_grad_qc, expected_grad_qc, prec=prec, msg="".join( [ "\nqc expected = {}".format(expected_grad_qc.data.cpu().numpy()), "\nqc actual = {}".format(actual_grad_qc.data.cpu().numpy()), ] ), ) @pytest.mark.parametrize("pi_a", [0.33]) @pytest.mark.parametrize("pi_b", [0.51]) @pytest.mark.parametrize("pi_c", [0.37]) @pytest.mark.parametrize("pi_d", [0.29]) @pytest.mark.parametrize("b_factor", [0.03, 0.04]) @pytest.mark.parametrize("c_factor", [0.04, 0.06]) @pytest.mark.parametrize("d_offset", [0.32]) @pytest.mark.parametrize("enumerate1", ["sequential", "parallel"]) @pytest.mark.parametrize("expand", [True, False]) def test_bernoulli_non_tree_elbo_gradient( enumerate1, b_factor, c_factor, pi_a, pi_b, pi_c, pi_d, expand, d_offset, N_b=2, N_c=2, ): pyro.clear_param_store() def model(): a = pyro.sample("a", dist.Bernoulli(0.33)) b = pyro.sample("b", dist.Bernoulli(0.25 * a + 0.50)) c = pyro.sample("c", dist.Bernoulli(0.25 * a + 0.10 * b + 0.50)) pyro.sample("d", dist.Bernoulli(b_factor * b + c_factor * c + d_offset)) def guide(): qa = pyro.param("qa", torch.tensor(pi_a, requires_grad=True)) qb = pyro.param("qb", torch.tensor(pi_b, requires_grad=True)) qc = pyro.param("qc", torch.tensor(pi_c, requires_grad=True)) qd = pyro.param("qd", torch.tensor(pi_d, requires_grad=True)) pyro.sample("a", dist.Bernoulli(qa)) pyro.sample("b", dist.Bernoulli(qb)) pyro.sample("c", dist.Bernoulli(qc)) pyro.sample("d", dist.Bernoulli(qd)) logger.info("Computing gradients using surrogate loss") elbo = TraceEnum_ELBO(max_plate_nesting=2, strict_enumeration_warning=True) elbo.loss_and_grads( model, config_enumerate(guide, default=enumerate1, expand=expand) ) actual_grad_qa = pyro.param("qa").grad actual_grad_qb = pyro.param("qb").grad actual_grad_qc = pyro.param("qc").grad actual_grad_qd = pyro.param("qd").grad logger.info("Computing analytic gradients") qa = torch.tensor(pi_a, requires_grad=True) qb = torch.tensor(pi_b, requires_grad=True) qc = torch.tensor(pi_c, requires_grad=True) qd = torch.tensor(pi_d, requires_grad=True) elbo = kl_divergence(dist.Bernoulli(qa), dist.Bernoulli(0.33)) elbo = elbo + qa * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.75)) elbo = elbo + (1.0 - qa) * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.50)) elbo = elbo + qa * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.85)) elbo = elbo + (1.0 - qa) * qb * kl_divergence( dist.Bernoulli(qc), dist.Bernoulli(0.60) ) elbo = elbo + qa * (1.0 - qb) * kl_divergence( dist.Bernoulli(qc), dist.Bernoulli(0.75) ) elbo = elbo + (1.0 - qa) * (1.0 - qb) * kl_divergence( dist.Bernoulli(qc), dist.Bernoulli(0.50) ) elbo = elbo + qb * qc * kl_divergence( dist.Bernoulli(qd), dist.Bernoulli(b_factor + c_factor + d_offset) ) elbo = elbo + (1.0 - qb) * qc * kl_divergence( dist.Bernoulli(qd), dist.Bernoulli(c_factor + d_offset) ) elbo = elbo + qb * (1.0 - qc) * kl_divergence( dist.Bernoulli(qd), dist.Bernoulli(b_factor + d_offset) ) elbo = elbo + (1.0 - qb) * (1.0 - qc) * kl_divergence( dist.Bernoulli(qd), dist.Bernoulli(d_offset) ) expected_grad_qa, expected_grad_qb, expected_grad_qc, expected_grad_qd = grad( elbo, [qa, qb, qc, qd] ) prec = 0.0001 assert_equal( actual_grad_qa, expected_grad_qa, prec=prec, msg="".join( [ "\nqa expected = {}".format(expected_grad_qa.data.cpu().numpy()), "\nqa actual = {}".format(actual_grad_qa.data.cpu().numpy()), ] ), ) assert_equal( actual_grad_qb, expected_grad_qb, prec=prec, msg="".join( [ "\nqb expected = {}".format(expected_grad_qb.data.cpu().numpy()), "\nqb actual = {}".format(actual_grad_qb.data.cpu().numpy()), ] ), ) assert_equal( actual_grad_qc, expected_grad_qc, prec=prec, msg="".join( [ "\nqc expected = {}".format(expected_grad_qc.data.cpu().numpy()), "\nqc actual = {}".format(actual_grad_qc.data.cpu().numpy()), ] ), ) assert_equal( actual_grad_qd, expected_grad_qd, prec=prec, msg="".join( [ "\nqd expected = {}".format(expected_grad_qd.data.cpu().numpy()), "\nqd actual = {}".format(actual_grad_qd.data.cpu().numpy()), ] ), ) @pytest.mark.parametrize("gate", [0.1, 0.25, 0.5, 0.75, 0.9]) @pytest.mark.parametrize("rate", [0.1, 1.0, 3.0]) def test_elbo_zip(gate, rate): # test for ZIP distribution def zip_model(data): gate = pyro.param("gate") rate = pyro.param("rate") with pyro.plate("data", len(data)): pyro.sample("obs", dist.ZeroInflatedPoisson(rate, gate=gate), obs=data) def composite_model(data): gate = pyro.param("gate") rate = pyro.param("rate") dist1 = dist.Delta(torch.tensor(0.0)) dist0 = dist.Poisson(rate) with pyro.plate("data", len(data)): mask = pyro.sample( "mask", dist.Bernoulli(gate), infer={"enumerate": "parallel"} ).bool() pyro.sample("obs", dist.MaskedMixture(mask, dist0, dist1), obs=data) def guide(data): pass pyro.param("gate", torch.tensor(gate), constraint=constraints.unit_interval) pyro.param("rate", torch.tensor(rate), constraint=constraints.positive) data = torch.tensor([0.0, 1.0, 2.0]) elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False) zip_loss = elbo.differentiable_loss(zip_model, guide, data) composite_loss = elbo.differentiable_loss(composite_model, guide, data) _check_loss_and_grads(zip_loss, composite_loss) @pytest.mark.parametrize( "mixture,scale", [ (dist.MixtureOfDiagNormals, [[2.0, 1.0], [1.0, 2], [4.0, 4.0]]), (dist.MixtureOfDiagNormalsSharedCovariance, [2.0, 1.0]), ], ) def test_mixture_of_diag_normals(mixture, scale): # K = 3, D = 2 pyro.param("locs", torch.tensor([[0.0, 0.0], [0.0, 1.0], [0.0, 10.0]])) pyro.param("coord_scale", torch.tensor(scale), constraint=constraints.positive) pyro.param("component_logits", torch.tensor([0.0, -1.0, 2.0])) data = torch.tensor([[0.0, 0.0], [1.0, 1.0], [2.0, 3.0], [1.0, 11.0]]) def auto_model(): locs = pyro.param("locs") coord_scale = pyro.param("coord_scale") component_logits = pyro.param("component_logits") with pyro.plate("data", len(data)): pyro.sample("obs", mixture(locs, coord_scale, component_logits), obs=data) def hand_model(): locs = pyro.param("locs") coord_scale = pyro.param("coord_scale") component_logits = pyro.param("component_logits") with pyro.plate("data", len(data), dim=-2): which = pyro.sample( "mask", dist.Categorical(logits=component_logits), infer={"enumerate": "parallel"}, ) with pyro.plate( "components", len(component_logits), dim=-1 ) as component_ind: with poutine.mask(mask=(which == component_ind)): pyro.sample( "obs", dist.Normal(locs, coord_scale).to_event(1), obs=data.unsqueeze(-2), ) def guide(): pass elbo = TraceEnum_ELBO(max_plate_nesting=2, strict_enumeration_warning=False) auto_loss = elbo.differentiable_loss(auto_model, guide) hand_loss = elbo.differentiable_loss(hand_model, guide) _check_loss_and_grads(hand_loss, auto_loss) @pytest.mark.parametrize( "Dist, prior", [ (dist.Bernoulli, 0.2), (dist.Categorical, [0.2, 0.8]), (dist.Categorical, [0.2, 0.3, 0.5]), (dist.Categorical, [0.2, 0.3, 0.3, 0.2]), (dist.OneHotCategorical, [0.2, 0.8]), (dist.OneHotCategorical, [0.2, 0.3, 0.5]), (dist.OneHotCategorical, [0.2, 0.3, 0.3, 0.2]), ], ) def test_compute_marginals_single(Dist, prior): prior = torch.tensor(prior) data = torch.tensor([0.0, 0.1, 0.2, 0.9, 1.0, 1.1]) @config_enumerate def model(): locs = torch.tensor([-1.0, 0.0, 1.0, 2.0]) x = pyro.sample("x", Dist(prior)) if Dist is dist.Bernoulli: x = x.long() elif Dist is dist.OneHotCategorical: x = x.max(-1)[1] with pyro.plate("data", len(data)): pyro.sample("obs", dist.Normal(locs[x], 1.0), obs=data) # First compute marginals using an empty guide. def empty_guide(): pass elbo = TraceEnum_ELBO(max_plate_nesting=1) marginals = elbo.compute_marginals(model, empty_guide) assert len(marginals) == 1 assert type(marginals["x"]) is Dist probs = marginals["x"].probs assert probs.shape == prior.shape # Next insert the computed marginals in an enumerating guide # and ensure that they are exact, or at least locally optimal. pyro.param("probs", probs) @config_enumerate def exact_guide(): probs = pyro.param("probs") pyro.sample("x", Dist(probs)) loss = elbo.differentiable_loss(model, exact_guide) assert_equal(grad(loss, [pyro.param("probs")])[0], torch.zeros_like(probs)) @pytest.mark.parametrize( "ok,enumerate_guide,num_particles,vectorize_particles", [ (True, None, 1, False), (False, "sequential", 1, False), (False, "parallel", 1, False), (False, None, 2, False), (False, None, 2, True), ], ) def test_compute_marginals_restrictions( ok, enumerate_guide, num_particles, vectorize_particles ): @config_enumerate def model(): w = pyro.sample("w", dist.Bernoulli(0.1)) x = pyro.sample("x", dist.Bernoulli(0.2)) y = pyro.sample("y", dist.Bernoulli(0.3)) z = pyro.sample("z", dist.Bernoulli(0.4)) pyro.sample("obs", dist.Normal(0.0, 1.0), obs=w + x + y + z) @config_enumerate(default=enumerate_guide) def guide(): pyro.sample("w", dist.Bernoulli(0.4)) pyro.sample("y", dist.Bernoulli(0.7)) # Check that the ELBO works fine. elbo = TraceEnum_ELBO( max_plate_nesting=0, num_particles=num_particles, vectorize_particles=vectorize_particles, ) loss = elbo.loss(model, guide) assert not torch_isnan(loss) if ok: marginals = elbo.compute_marginals(model, guide) assert set(marginals.keys()) == {"x", "z"} else: with pytest.raises(NotImplementedError, match="compute_marginals"): elbo.compute_marginals(model, guide) @pytest.mark.parametrize("size", [1, 2, 3, 4, 10, 20, _skip_cuda(30)]) def test_compute_marginals_hmm(size): @config_enumerate def model(data): transition_probs = torch.tensor([[0.75, 0.25], [0.25, 0.75]]) emission_probs = torch.tensor([[0.75, 0.25], [0.25, 0.75]]) x = torch.tensor(0) for i in pyro.markov(range(len(data) + 1)): if i < len(data): x = pyro.sample("x_{}".format(i), dist.Categorical(transition_probs[x])) pyro.sample( "y_{}".format(i), dist.Categorical(emission_probs[x]), obs=data[i] ) else: pyro.sample( "x_{}".format(i), dist.Categorical(transition_probs[x]), obs=torch.tensor(1), ) def guide(data): pass data = torch.zeros(size, dtype=torch.long) elbo = TraceEnum_ELBO(max_plate_nesting=0) marginals = elbo.compute_marginals(model, guide, data) assert set(marginals.keys()) == {"x_{}".format(i) for i in range(size)} for i in range(size): d = marginals["x_{}".format(i)] assert d.batch_shape == () # The x's should be monotonically increasing, since we've observed x[-1]==0 # and x[size]==1, and since the y's are constant. for i in range(size - 1): d1 = marginals["x_{}".format(i)] d2 = marginals["x_{}".format(i + 1)] assert d1.probs[0] > d2.probs[0] assert d1.probs[1] < d2.probs[1] @pytest.mark.parametrize("observed", ["", "a", "b", "ab"]) def test_marginals_2678(observed): @config_enumerate def model(a=None, b=None): a = pyro.sample("a", dist.Bernoulli(0.75), obs=a) pyro.sample("b", dist.Bernoulli(1 - 0.25 * a), obs=b) def guide(a=None, b=None): pass kwargs = {name: torch.tensor(1.0) for name in observed} elbo = TraceEnum_ELBO(strict_enumeration_warning=False) elbo.compute_marginals(model, guide, **kwargs) @pytest.mark.parametrize( "data", [ [None, None], [torch.tensor(0.0), None], [None, torch.tensor(0.0)], [torch.tensor(0.0), torch.tensor(0)], ], ) def test_backwardsample_posterior_smoke(data): @config_enumerate def model(data): xs = list(data) zs = [] for i in range(2): K = i + 2 # number of mixture components zs.append(pyro.sample("z_{}".format(i), dist.Categorical(torch.ones(K)))) if i == 0: loc = pyro.param("loc", torch.randn(K))[zs[i]] xs[i] = pyro.sample( "x_{}".format(i), dist.Normal(loc, 1.0), obs=data[i] ) elif i == 1: logits = pyro.param("logits", torch.randn(K, 2))[zs[i]] xs[i] = pyro.sample( "x_{}".format(i), dist.Categorical(logits=logits), obs=data[i] ) z12 = zs[0] + 2 * zs[1] pyro.sample("z_12", dist.Categorical(torch.arange(6.0)), obs=z12) return xs, zs def guide(data): pass elbo = TraceEnum_ELBO(max_plate_nesting=1) xs, zs = elbo.sample_posterior(model, guide, data) for x, datum in zip(xs, data): assert datum is None or datum is x for z in zs: assert z.shape == () def test_backwardsample_posterior_2(): num_particles = 10000 @config_enumerate def model(data): with pyro.plate("particles", num_particles): p_z = torch.tensor([0.1, 0.9]) x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5]))) z = pyro.sample("z", dist.Bernoulli(p_z[x]), obs=data) return x, z def guide(data): pass elbo = TraceEnum_ELBO(max_plate_nesting=1) x, z = elbo.sample_posterior(model, guide, data=torch.zeros(num_particles)) expected = 0.9 actual = (x.type_as(z) == z).float().mean().item() assert abs(expected - actual) < 0.05 def test_backwardsample_posterior_3(): num_particles = 10000 @config_enumerate def model(data): with pyro.plate("particles", num_particles): p_z = torch.tensor([[0.9, 0.1], [0.1, 0.9]]) x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5]))) y = pyro.sample("y", dist.Categorical(torch.tensor([0.5, 0.5]))) z = pyro.sample("z", dist.Bernoulli(p_z[x, y]), obs=data) return x, y, z def guide(data): pass elbo = TraceEnum_ELBO(max_plate_nesting=1) x, y, z = elbo.sample_posterior(model, guide, data=torch.ones(num_particles)) expected = 0.9 actual = (x == y).float().mean().item() assert abs(expected - actual) < 0.05 x, y, z = elbo.sample_posterior(model, guide, data=torch.zeros(num_particles)) expected = 0.1 actual = (x == y).float().mean().item() assert abs(expected - actual) < 0.05 @pytest.mark.parametrize( "ok,enumerate_guide,num_particles,vectorize_particles", [ (True, None, 1, False), (False, "sequential", 1, False), (False, "parallel", 1, False), (False, None, 2, False), (False, None, 2, True), ], ) def test_backwardsample_posterior_restrictions( ok, enumerate_guide, num_particles, vectorize_particles ): @config_enumerate def model(): w = pyro.sample("w", dist.Bernoulli(0.1)) x = pyro.sample("x", dist.Bernoulli(0.2)) y = pyro.sample("y", dist.Bernoulli(0.3)) z = pyro.sample("z", dist.Bernoulli(0.4)) pyro.sample("obs", dist.Normal(0.0, 1.0), obs=w + x + y + z) return w, x, y, z @config_enumerate(default=enumerate_guide) def guide(): pyro.sample("w", dist.Bernoulli(0.4)) pyro.sample("y", dist.Bernoulli(0.7)) # Check that the ELBO works fine. elbo = TraceEnum_ELBO( max_plate_nesting=0, num_particles=num_particles, vectorize_particles=vectorize_particles, ) loss = elbo.loss(model, guide) assert not torch_isnan(loss) if ok: w, x, y, z = elbo.sample_posterior(model, guide) assert w.shape == () assert x.shape == () assert y.shape == () assert z.shape == () else: with pytest.raises(NotImplementedError, match="sample_posterior"): elbo.sample_posterior(model, guide) @pytest.mark.parametrize("num_samples", [10000, 100000]) def test_vectorized_importance(num_samples): pyro.param( "model_probs_a", torch.tensor([0.45, 0.55]), constraint=constraints.simplex ) pyro.param( "model_probs_b", torch.tensor([0.6, 0.4]), constraint=constraints.simplex ) pyro.param( "model_probs_c", torch.tensor( [[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]], [[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]] ), constraint=constraints.simplex, ) pyro.param( "guide_probs_a", torch.tensor([0.33, 0.67]), constraint=constraints.simplex ) pyro.param( "guide_probs_b", torch.tensor([0.8, 0.2]), constraint=constraints.simplex ) data = torch.tensor([[0, 1], [0, 2]]) def model(): probs_a = pyro.param("model_probs_a") probs_b = pyro.param("model_probs_b") probs_c = pyro.param("model_probs_c") a = pyro.sample("a", dist.Categorical(probs_a)) with pyro.plate("outer", 2): b = pyro.sample("b", dist.Categorical(probs_b)) with pyro.plate("inner", 2): pyro.sample("c", dist.Categorical(Vindex(probs_c)[a, b]), obs=data) def guide(): probs_a = pyro.param("guide_probs_a") pyro.sample("a", dist.Categorical(probs_a)) probs_b = pyro.param("guide_probs_b") with pyro.plate("outer", 2): pyro.sample("b", dist.Categorical(probs_b)) vectorized_weights, _, _ = vectorized_importance_weights( model, guide, max_plate_nesting=4, num_samples=num_samples ) elbo = Trace_ELBO(vectorize_particles=True, num_particles=num_samples).loss( model, guide ) assert_equal(vectorized_weights.sum().item() / num_samples, -elbo, prec=0.02) def test_multi_dependence_enumeration(): """ This test checks whether enumeration works correctly in the case where multiple downstream variables are coupled to the same random discrete variable. This is based on [issue 2223](https://github.com/pyro-ppl/pyro/issues/2223), and should pass when it has been resolved """ K = 5 d = 2 N_obs = 3 @config_enumerate def model(N=1): with pyro.plate("data_plate", N, dim=-2): mixing_weights = pyro.param( "pi", torch.ones(K) / K, constraint=constraints.simplex ) means = pyro.sample( "mu", dist.Normal(torch.zeros(K, d), torch.ones(K, d)).to_event(2) ) with pyro.plate("observations", N_obs, dim=-1): s = pyro.sample("s", dist.Categorical(mixing_weights)) pyro.sample("x", dist.Normal(Vindex(means)[..., s, :], 0.1).to_event(1)) pyro.sample("y", dist.Normal(Vindex(means)[..., s, :], 0.1).to_event(1)) x = poutine.trace(model).get_trace(N=2).nodes["x"]["value"] pyro.clear_param_store() conditioned_model = pyro.condition(model, data={"x": x}) guide = infer.autoguide.AutoDelta(poutine.block(conditioned_model, hide=["s"])) elbo = infer.TraceEnum_ELBO(max_plate_nesting=2) elbo.loss_and_grads(conditioned_model, guide, x.size(0)) assert pyro.get_param_store()._params["pi"].grad is not None
{ "content_hash": "733344d6d19b91d6c773aedd4f2df91e", "timestamp": "", "source": "github", "line_count": 4105, "max_line_length": 94, "avg_line_length": 35.44652862362972, "alnum_prop": 0.5454957803007395, "repo_name": "uber/pyro", "id": "503d773d3c40e64c47e59cc18f87e3e17f28fea4", "size": "145597", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "tests/infer/test_enum.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "6121" }, { "name": "CSS", "bytes": "478" }, { "name": "Dockerfile", "bytes": "1635" }, { "name": "Makefile", "bytes": "6857" }, { "name": "Python", "bytes": "3388193" }, { "name": "Shell", "bytes": "6465" }, { "name": "TeX", "bytes": "3649" } ], "symlink_target": "" }
import os import sys import logging from importlib import import_module # 3rd party import click import yaml from flask_appbuilder import const as fab_const LOGGER = logging.getLogger(__name__) FLASK_PROJECT_DIRECTORY = '/opt/trans_passports/code/trans_passports' def import_application(app_package, appbuilder): sys.path.append(os.getcwd()) try: _app = import_module(app_package) except Exception as e: click.echo(click.style('Was unable to import {0} Error: {1}'.format(app_package, e), fg='red')) exit(3) if hasattr(_app, 'appbuilder'): return getattr(_app, appbuilder) else: click.echo(click.style('There in no appbuilder var on your package, you can use appbuilder parameter to config', fg='red')) exit(3) def create_flask_admin(**params): curdir = os.path.curdir os.chdir(FLASK_PROJECT_DIRECTORY) try: # TBD: Put in a yaml file params_dflt = { 'app': 'trans_passports', 'appbuilder': u'appbuilder', 'username': u'admin', 'firstname': u'admin', 'lastname': u'user', 'email': u'mjr.berends@gmail.com', 'password': u'admin' } # Update defaults and rename as 'params' for clarity params_dflt.update(params) params = params_dflt _appbuilder = import_application(params['app'], params['appbuilder']) # Describe auth method auth_type = {fab_const.AUTH_DB:"Database Authentications", fab_const.AUTH_OID:"OpenID Authentication", fab_const.AUTH_LDAP:"LDAP Authentication", fab_const.AUTH_REMOTE_USER:"WebServer REMOTE_USER Authentication", fab_const.AUTH_OAUTH:"OAuth Authentication"} click.echo(click.style('Recognized auth method {0}.'.format(auth_type.get(_appbuilder.sm.auth_type,'No Auth method')), fg='green')) # Create roles role_admin = _appbuilder.sm.find_role(_appbuilder.sm.auth_role_admin) user = _appbuilder.sm.add_user( params['username'], params['firstname'], params['lastname'], params['email'], role_admin, params['password']) if user: click.echo(click.style('Admin User {0} created.'.format(params['username']), fg='green')) else: click.echo(click.style('No user created an error occured', fg='red')) finally: # Change back to the original directory os.chdir(curdir) if __name__ == '__main__': create_flask_admin()
{ "content_hash": "cc8bfd6366e78d0e6e2c84fcddaa8777", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 139, "avg_line_length": 35.94444444444444, "alnum_prop": 0.6093508500772797, "repo_name": "approximatelylinear/trans-passports-docker", "id": "09240a4521a25e4dd80a6845088ed5da60ec7a3a", "size": "2624", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bin/create_flask_admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "6477" }, { "name": "HTML", "bytes": "128" }, { "name": "Makefile", "bytes": "9526" }, { "name": "Nginx", "bytes": "1133" }, { "name": "Python", "bytes": "1548921" }, { "name": "Shell", "bytes": "1604" } ], "symlink_target": "" }
"""A middleware that turns exceptions into parsable string. Inspired by Cinder's faultwrapper """ import sys import traceback from oslo.config import cfg import webob from murano.common import wsgi from murano.packages import exceptions as pkg_exc cfg.CONF.import_opt('debug', 'murano.openstack.common.log') class HTTPExceptionDisguise(Exception): """Disguises HTTP exceptions so they can be handled by the webob fault application in the wsgi pipeline. """ def __init__(self, exception): self.exc = exception self.tb = sys.exc_info()[2] class Fault(object): def __init__(self, error): self.error = error @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if req.content_type == 'application/xml': serializer = wsgi.XMLDictSerializer() else: serializer = wsgi.JSONDictSerializer() resp = webob.Response(request=req) default_webob_exc = webob.exc.HTTPInternalServerError() resp.status_code = self.error.get('code', default_webob_exc.code) serializer.default(resp, self.error) return resp class FaultWrapper(wsgi.Middleware): """Replace error body with something the client can parse.""" @classmethod def factory(cls, global_conf, **local_conf): def filter(app): return cls(app) return filter error_map = { 'ValueError': webob.exc.HTTPBadRequest, 'LookupError': webob.exc.HTTPNotFound, 'PackageClassLoadError': webob.exc.HTTPBadRequest, 'PackageUILoadError': webob.exc.HTTPBadRequest, 'PackageLoadError': webob.exc.HTTPBadRequest, 'PackageFormatError': webob.exc.HTTPBadRequest, } def _map_exception_to_error(self, class_exception): if class_exception == Exception: return webob.exc.HTTPInternalServerError if class_exception.__name__ not in self.error_map: return self._map_exception_to_error(class_exception.__base__) return self.error_map[class_exception.__name__] def _error(self, ex): trace = None webob_exc = None if isinstance(ex, HTTPExceptionDisguise): # An HTTP exception was disguised so it could make it here # let's remove the disguise and set the original HTTP exception if cfg.CONF.debug: trace = ''.join(traceback.format_tb(ex.tb)) ex = ex.exc webob_exc = ex ex_type = ex.__class__.__name__ full_message = unicode(ex) if full_message.find('\n') > -1: message, msg_trace = full_message.split('\n', 1) else: msg_trace = traceback.format_exc() message = full_message if isinstance(ex, pkg_exc.PackageException): message = ex.message if cfg.CONF.debug and not trace: trace = msg_trace if not webob_exc: webob_exc = self._map_exception_to_error(ex.__class__) error = { 'code': webob_exc.code, 'title': webob_exc.title, 'explanation': webob_exc.explanation, 'error': { 'message': message, 'type': ex_type, 'traceback': trace, } } return error def process_request(self, req): try: return req.get_response(self.application) except Exception as exc: return req.get_response(Fault(self._error(exc)))
{ "content_hash": "ec0176b5e90c628c319822525df826f0", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 75, "avg_line_length": 29.73109243697479, "alnum_prop": 0.6031656302996043, "repo_name": "sergmelikyan/murano", "id": "278c73fe45690174db8faed9b069acfcabc85df7", "size": "4084", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "murano/api/middleware/fault.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "PowerShell", "bytes": "8634" }, { "name": "Python", "bytes": "873914" }, { "name": "Shell", "bytes": "5656" } ], "symlink_target": "" }
import urllib try: import simplejson as json except ImportError: import json import os import httplib import mimetypes import re import csv # This is the main interface class. You can see an example of it in use # below, implementing a command-line tool, but you basically just instantiate # dstk = DSTK() # and then call the method you want # coordinates = dstk.ip2coordinates('12.34.56.78') # The full documentation is at http://www.datasciencetoolkit.org/developerdocs class DSTK: api_base = None def __init__(self, options=None): if options is None: options = {} defaultOptions = { 'apiBase': 'http://www.datasciencetoolkit.org', 'checkVersion': True } if 'DSTK_API_BASE' in os.environ: defaultOptions['apiBase'] = os.environ['DSTK_API_BASE'] for key, value in defaultOptions.items(): if key not in options: options[key] = value self.api_base = options['apiBase'] if options['checkVersion']: self.check_version() def check_version(self): required_version = 40 api_url = self.api_base+'/info' try: response_string = urllib.urlopen(api_url).read() response = json.loads(response_string) except: raise Exception('The server at "'+self.api_base+'" doesn\'t seem to be running DSTK, no version information found.') actual_version = response['version'] if actual_version < required_version: raise Exception('DSTK: Version '+str(actual_version)+' found at "'+api_url+'" but '+str(required_version)+' is required') def ip2coordinates(self, ips): if not isinstance(ips, (list, tuple)): ips = [ips] api_url = self.api_base+'/ip2coordinates' api_body = json.dumps(ips) response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def street2coordinates(self, addresses): if not isinstance(addresses, (list, tuple)): addresses = [addresses] api_url = self.api_base+'/street2coordinates' api_body = json.dumps(addresses) response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def coordinates2politics(self, coordinates): api_url = self.api_base+'/coordinates2politics' api_body = json.dumps(coordinates) response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def text2places(self, text): api_url = self.api_base+'/text2places' api_body = text response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def file2text(self, file_name, file_data): host = self.api_base.replace('http://', '') response = post_multipart(host, '/file2text',[],[('inputfile', file_name, file_data)]) return response def text2sentences(self, text): api_url = self.api_base+'/text2sentences' api_body = text response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def html2text(self, html): api_url = self.api_base+'/html2text' api_body = html response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def html2story(self, html): api_url = self.api_base+'/html2story' api_body = html response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def text2people(self, text): api_url = self.api_base+'/text2people' api_body = text response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response def text2times(self, text): api_url = self.api_base+'/text2times' api_body = text response_string = urllib.urlopen(api_url, api_body).read() response = json.loads(response_string) if 'error' in response: raise Exception(response['error']) return response # We need to post files as multipart form data, and Python has no native function for # that, so these utility functions implement what we need. # See http://code.activestate.com/recipes/146306/ def post_multipart(host, selector, fields, files): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return the server's response page. """ content_type, body = encode_multipart_formdata(fields, files) h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('content-type', content_type) h.putheader('content-length', str(len(body))) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read() def encode_multipart_formdata(fields, files): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) for (key, filename, value) in files: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) L.append('Content-Type: %s' % guess_content_type(filename)) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body def guess_content_type(filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' # End of the interface. The rest of this file is an example implementation of a # command line client. def ip2coordinates_cli(dstk, options, inputs, output): writer = csv.writer(sys.stdout) input_ips = [] for input_line in inputs: ip_match = re.match(r'[12]?\d?\d\.[12]?\d?\d\.[12]?\d?\d\.[12]?\d?\d', input_line) if ip_match is not None: input_ips.append(ip_match.group(0)) else: print 'No match' result = dstk.ip2coordinates(input_ips) if options['showHeaders']: for ip, info in result.items(): if info is None: continue row = ['ip_address'] for key, value in info.items(): row.append(str(key)) writer.writerow(row) break for ip, info in result.items(): if info is None: info = {} row = [ip] for key, value in info.items(): row.append(str(value)) writer.writerow(row) return def street2coordinates_cli(dstk, options, inputs, output): writer = csv.writer(sys.stdout) result = dstk.street2coordinates(inputs) if options['showHeaders']: for ip, info in result.items(): if info is None: continue row = ['address'] for key, value in info.items(): row.append(str(key)) writer.writerow(row) break for ip, info in result.items(): if info is None: info = {} row = [ip] for key, value in info.items(): row.append(str(value)) writer.writerow(row) return def coordinates2politics_cli(dstk, options, inputs, output): writer = csv.writer(output) coordinates_list = [] for input in inputs: coordinates = input.split(',') if len(coordinates)!=2: output.write('You must enter coordinates as a series of comma-separated pairs, eg 37.76,-122.42') exit(-1) coordinates_list.append([coordinates[0], coordinates[1]]) result = dstk.coordinates2politics(coordinates_list) if options['showHeaders']: row = ['latitude', 'longitude', 'name', 'code', 'type', 'friendly_type'] writer.writerow(row) for info in result: location = info['location'] politics = info['politics'] for politic in politics: row = [location['latitude'], location['longitude'], politic['name'], politic['code'], politic['type'], politic['friendly_type'], ] writer.writerow(row) return def file2text_cli(dstk, options, inputs, output): for file_name in inputs: if os.path.isdir(file_name): children = os.listdir(file_name) full_children = [] for child in children: full_children.append(os.path.join(file_name, child)) file2text_cli(dstk, options, full_children) else: file_data = get_file_or_url_contents(file_name) if options['showHeaders']: output.write('--File--: '+file_name+"\n") result = dstk.file2text(file_name, file_data) print result return def text2places_cli(dstk, options, inputs, output): writer = csv.writer(output) if options['showHeaders']: row = ['latitude', 'longitude', 'name', 'type', 'start_index', 'end_index', 'matched_string', 'file_name'] writer.writerow(row) options['showHeaders'] = False if options['from_stdin']: result = dstk.text2places("\n".join(inputs)) text2places_format(result, 'stdin', writer) return for file_name in inputs: if os.path.isdir(file_name): children = os.listdir(file_name) full_children = [] for child in children: full_children.append(os.path.join(file_name, child)) text2places_cli(dstk, options, full_children, output) else: file_data = get_file_or_url_contents(file_name) result = dstk.text2places(file_data) text2places_format(result, file_name, writer) return def text2places_format(result, file_name, writer): for info in result: row = [info['latitude'], info['longitude'], info['name'], info['type'], info['start_index'], info['end_index'], info['matched_string'], file_name ] writer.writerow(row) return def html2text_cli(dstk, options, inputs, output): if options['from_stdin']: result = dstk.html2text("\n".join(inputs)) print result['text'] return for file_name in inputs: if os.path.isdir(file_name): children = os.listdir(file_name) full_children = [] for child in children: full_children.append(os.path.join(file_name, child)) html2text_cli(dstk, options, full_children, output) else: file_data = get_file_or_url_contents(file_name) if options['showHeaders']: output.write('--File--: '+file_name+"\n") result = dstk.html2text(file_data) print result['text'] return def text2sentences_cli(dstk, options, inputs, output): if options['from_stdin']: result = dstk.text2sentences("\n".join(inputs)) print result['sentences'] return for file_name in inputs: if os.path.isdir(file_name): children = os.listdir(file_name) full_children = [] for child in children: full_children.append(os.path.join(file_name, child)) text2sentences_cli(dstk, options, full_children, output) else: file_data = get_file_or_url_contents(file_name) if options['showHeaders']: output.write('--File--: '+file_name+"\n") result = dstk.text2sentences(file_data) print result['sentences'] return def html2story_cli(dstk, options, inputs, output): if options['from_stdin']: result = dstk.html2story("\n".join(inputs)) print result['story'] return for file_name in inputs: if os.path.isdir(file_name): children = os.listdir(file_name) full_children = [] for child in children: full_children.append(os.path.join(file_name, child)) html2story_cli(dstk, options, full_children, output) else: file_data = get_file_or_url_contents(file_name) if options['showHeaders']: output.write('--File--: '+file_name+"\n") result = dstk.html2story(file_data) print result['story'] return def text2people_cli(dstk, options, inputs, output): writer = csv.writer(sys.stdout) if options['showHeaders']: row = ['matched_string', 'first_name', 'surnames', 'title', 'gender', 'start_index', 'end_index', 'file_name'] writer.writerow(row) options['showHeaders'] = False if options['from_stdin']: result = dstk.text2people("\n".join(inputs)) text2people_format(result, 'stdin', writer) return for file_name in inputs: if os.path.isdir(file_name): children = os.listdir(file_name) full_children = [] for child in children: full_children.append(os.path.join(file_name, child)) text2places_cli(dstk, options, full_children, output) else: file_data = get_file_or_url_contents(file_name) result = dstk.text2people(file_data) text2people_format(result, file_name, writer) return def text2people_format(result, file_name, writer): for info in result: row = [ info['matched_string'], info['first_name'], info['surnames'], info['title'], info['gender'], str(info['start_index']), str(info['end_index']), file_name ] writer.writerow(row) return def text2times_cli(dstk, options, inputs, output): writer = csv.writer(sys.stdout) if options['showHeaders']: row = ['matched_string', 'time_string', 'time_seconds', 'is_relative', 'start_index', 'end_index', 'file_name'] writer.writerow(row) options['showHeaders'] = False if options['from_stdin']: result = dstk.text2times("\n".join(inputs)) text2times_format(result, 'stdin', writer) return for file_name in inputs: if os.path.isdir(file_name): children = os.listdir(file_name) full_children = [] for child in children: full_children.append(os.path.join(file_name, child)) text2times_cli(dstk, options, full_children, output) else: file_data = get_file_or_url_contents(file_name) result = dstk.text2times(file_data) text2times_format(result, file_name, writer) return def text2times_format(result, file_name, writer): for info in result: row = [ info['matched_string'], info['time_string'], info['time_seconds'], info['is_relative'], str(info['start_index']), str(info['end_index']), file_name ] writer.writerow(row) return def get_file_or_url_contents(file_name): if re.match(r'http://', file_name): file_data = urllib.urlopen(file_name).read() else: file_data = open(file_name).read() return file_data def print_usage(message=''): print message print "Usage:" print "python dstk.py <command> [-a/--api_base 'http://yourhost.com'] [-h/--show_headers] <inputs>" print "Where <command> is one of:" print " ip2coordinates (lat/lons for IP addresses)" print " street2coordinates (lat/lons for postal addresses)" print " coordinates2politics (country/state/county/constituency/etc for lat/lon)" print " text2places (lat/lons for places mentioned in unstructured text)" print " file2text (PDF/Excel/Word to text, and OCR on PNG/Jpeg/Tiff images)" print " text2sentences (parts of the text that look like proper sentences)" print " html2text (text version of the HTML document)" print " html2story (text version of the HTML with no boilerplate)" print " text2people (gender for people mentioned in unstructured text)" print " text2times (times and dates mentioned in unstructured text)" print "If no inputs are specified, then standard input will be read and used" print "See http://www.datasciencetoolkit.org/developerdocs for more details" print "Examples:" print "python dstk.py ip2coordinates 67.169.73.113" print "python dstk.py street2coordinates \"2543 Graystone Place, Simi Valley, CA 93065\"" print "python dstk.py file2text scanned.jpg" exit(-1) if __name__ == '__main__': import sys commands = { 'ip2coordinates': { 'handler': ip2coordinates_cli }, 'street2coordinates': { 'handler': street2coordinates_cli }, 'coordinates2politics': { 'handler': coordinates2politics_cli }, 'text2places': { 'handler': text2places_cli }, 'file2text': { 'handler': file2text_cli }, 'text2sentences': { 'handler': text2sentences_cli }, 'html2text': { 'handler': html2text_cli }, 'html2story': { 'handler': html2story_cli }, 'text2people': { 'handler': text2people_cli }, 'text2times': { 'handler': text2times_cli }, } switches = { 'api_base': True, 'show_headers': True } command = None options = {'showHeaders': False} inputs = [] ignore_next = False for index, arg in enumerate(sys.argv[1:]): if ignore_next: ignore_next = False continue if arg[0]=='-' and len(arg)>1: if len(arg) == 2: letter = arg[1] if letter == 'a': option = 'api_base' elif letter == 'h': option = 'show_headers' else: option = arg[2:] if option not in switches: print_usage('Unknown option "'+arg+'"') if option == 'api_base': if (index+2) >= len(sys.argv): print_usage('Missing argument for option "'+arg+'"') options['apiBase'] = sys.argv[index+2] ignore_next = True elif option == 'show_headers': options['showHeaders'] = True else: if command is None: command = arg if command not in commands: print_usage('Unknown command "'+arg+'"') else: inputs.append(arg) if command is None: print_usage('No command specified') if len(inputs)<1: options['from_stdin'] = True inputs = sys.stdin.readlines() else: options['from_stdin'] = False command_info = commands[command] dstk = DSTK(options) command_info['handler'](dstk, options, inputs, sys.stdout)
{ "content_hash": "f4ad18ab2d0fb2948ad14dabcafb7b24", "timestamp": "", "source": "github", "line_count": 658, "max_line_length": 133, "avg_line_length": 31.416413373860184, "alnum_prop": 0.5784152476780186, "repo_name": "SwoopSearch/pyaddress", "id": "a2bc28143176b37e1b0d938c2c9925590d59cbf6", "size": "21529", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "address/dstk.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "63859" } ], "symlink_target": "" }
from setuptools import setup import pymesh setup( name=pymesh.__title__, packages=[pymesh.__title__], version=pymesh.__version__, author=pymesh.__author__, author_email="taxpon@gmail.com", description="Library for manipulating (Translate, Rotate and Scale) 3D data using numpy.", url=pymesh.__url__, license=pymesh.__license__, classifiers=[ 'License :: OSI Approved :: MIT License', "Programming Language :: Python", ], install_requires=[ 'numpy' ], )
{ "content_hash": "12bc4bd6f6dbcaa75f42a8ad13343c8a", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 94, "avg_line_length": 25.095238095238095, "alnum_prop": 0.6204933586337761, "repo_name": "taxpon/pymesh", "id": "1fc38170772a629e22919dff2ed0bedc6ab6bd70", "size": "527", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "19674" }, { "name": "Shell", "bytes": "43" } ], "symlink_target": "" }
import time from datetime import datetime from flask import render_template, jsonify, redirect, \ url_for, flash, request from flask_login import login_required, login_user,\ logout_user, current_user from src import app, db, login_manager from src.forms import TaskForm, LoginForm, SignInForm from src.models import User, Task @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) @app.route('/') @app.route('/index') def index(): """Index view""" user = current_user if hasattr(user, 'id'): return render_template('index.html', new_tasks=Task.get_tasks_for_user(user.id)) return render_template('index.html', new_tasks=Task.newest(5)) @app.route('/signup', methods=['GET', 'POST']) def sign_up(): form = SignInForm() if form.validate_on_submit(): user = User( name=form.name.data, password=form.password.data, email=form.email.data, ) db.session.add(user) db.session.commit() flash('Rejestracja zakończyła się pomyślnie!') return redirect(url_for('login')) return render_template('sign_up.html', form=form) @app.route('/login', methods=['GET', 'POST']) def login(): """Login view""" form = LoginForm() if form.validate_on_submit(): name = form.name.data password = form.password.data user = User.get_by_name(name) if user is not None and user.check_password(password): should_stay_logged = form.remember_me.data login_user(user, should_stay_logged) flash('Zalogowano {}.'.format(user.name)) next_page = request.args.get('next') return redirect(next_page or url_for('index')) flash('Niepoprawny użytkownik lub hasło.') return render_template('login.html', form=form) @app.route('/logout') def logout(): """Logout view""" logout_user() return redirect(url_for('index')) @app.route('/task', methods=['GET', 'POST']) @login_required def add_task(): """View for adding new car""" form = TaskForm() if form.validate_on_submit(): description = form.description.data date_due = form.date_due.data # TODO: add date due task = Task(user=current_user, description=description, date_due=date_due) db.session.add(task) db.session.commit() flash('Zapisano zadanie: {}'.format(description)) return redirect(url_for('index')) return render_template('add_task.html', form=form) @app.route('/some_json') def some_json(): """Json example""" json_ = { 'date': datetime.utcnow(), 'epoch_time': time.time(), } return jsonify(json_)
{ "content_hash": "f8cb335d03ba42bcbcf26654754e5063", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 74, "avg_line_length": 28.887755102040817, "alnum_prop": 0.6026139173436948, "repo_name": "hs3city/pythonhacking-flask", "id": "7e4a1fef4652e5fb3d880213fcc9e85fcff108b5", "size": "2837", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "6356" }, { "name": "Makefile", "bytes": "486" }, { "name": "Python", "bytes": "9910" } ], "symlink_target": "" }
import ujson from typing import Mapping, Any, Tuple, Optional from django.utils.translation import ugettext as _ from django.http import HttpRequest, HttpResponse from zerver.decorator import api_key_only_webhook_view, return_success_on_head_request from zerver.lib.response import json_success, json_error from zerver.lib.request import REQ, has_request_variables from zerver.lib.webhooks.common import check_send_webhook_message, \ UnexpectedWebhookEventType from zerver.models import UserProfile from .card_actions import SUPPORTED_CARD_ACTIONS, process_card_action from .board_actions import SUPPORTED_BOARD_ACTIONS, process_board_action from .exceptions import UnsupportedAction @api_key_only_webhook_view('Trello') @return_success_on_head_request @has_request_variables def api_trello_webhook(request: HttpRequest, user_profile: UserProfile, payload: Mapping[str, Any]=REQ(argument_type='body')) -> HttpResponse: payload = ujson.loads(request.body) action_type = payload['action'].get('type') try: message = get_subject_and_body(payload, action_type) if message is None: return json_success() else: subject, body = message except UnsupportedAction: raise UnexpectedWebhookEventType('Trello', action_type) check_send_webhook_message(request, user_profile, subject, body) return json_success() def get_subject_and_body(payload: Mapping[str, Any], action_type: str) -> Optional[Tuple[str, str]]: if action_type in SUPPORTED_CARD_ACTIONS: return process_card_action(payload, action_type) if action_type in SUPPORTED_BOARD_ACTIONS: return process_board_action(payload, action_type) raise UnsupportedAction('{} if not supported'.format(action_type))
{ "content_hash": "7a6d2e82ed82741b5273076c91883782", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 100, "avg_line_length": 43.166666666666664, "alnum_prop": 0.7302813017098732, "repo_name": "jackrzhang/zulip", "id": "d600c2966460fecabd9e81bc26a815013f182d7d", "size": "1851", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "zerver/webhooks/trello/view/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "428151" }, { "name": "Emacs Lisp", "bytes": "158" }, { "name": "HTML", "bytes": "660198" }, { "name": "JavaScript", "bytes": "2910049" }, { "name": "Pascal", "bytes": "1113" }, { "name": "Perl", "bytes": "398747" }, { "name": "Puppet", "bytes": "90611" }, { "name": "Python", "bytes": "6065880" }, { "name": "Ruby", "bytes": "249744" }, { "name": "Shell", "bytes": "112340" }, { "name": "TypeScript", "bytes": "9543" } ], "symlink_target": "" }
try: from setuptools import setup except ImportError: from distutils.core import setup readme = open('README.rst').read() history = open('HISTORY.rst').read().replace('.. :changelog:', '') requirements = [ 'argh', 'mako', 'mandrill', 'sqlalchemy', 'pytz', 'zope.sqlalchemy', 'transaction', 'iso8601', 'python-dateutil', 'pyramid', 'pyramid_tm', 'pyramid_mailer', 'repoze.sendmail == 4.1', ] test_requirements = [ 'testing.postgresql', 'nose', 'pyramid_debugtoolbar', ] setup( name='threethings', version='0.1.0', description='Simple status updates for automated teamwork', long_description=readme + '\n\n' + history, author='Gavin Carothers', author_email='gavin@carothers.name', url='https://github.com/gcarothers/threethings', packages=[ 'threethings', ], package_dir={'threethings': 'threethings'}, include_package_data=True, install_requires=requirements, license="BSD", zip_safe=False, keywords='threethings', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], test_suite='tests', tests_require=test_requirements, entry_points=""" [console_scripts] 3things=threethings.cli:main [paste.app_factory] main = threethings.web:main """, )
{ "content_hash": "26b7ec8973eff049be69e3c76edc611c", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 66, "avg_line_length": 25.014492753623188, "alnum_prop": 0.6019698725376593, "repo_name": "gcarothers/threethings", "id": "0c89ae482b47f708a73ea90bd7b7305a56501311", "size": "1774", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "1515" }, { "name": "Python", "bytes": "25966" }, { "name": "Shell", "bytes": "55" } ], "symlink_target": "" }
import sys from PySide import QtCore, QtGui import pixelator_rc ItemSize = 256 class PixelDelegate(QtGui.QAbstractItemDelegate): def __init__(self, parent=None): super(PixelDelegate, self).__init__(parent) self.pixelSize = 12 def paint(self, painter, option, index): if option.state & QtGui.QStyle.State_Selected: painter.fillRect(option.rect, option.palette.highlight()) size = min(option.rect.width(), option.rect.height()) brightness = index.model().data(index, QtCore.Qt.DisplayRole) radius = (size / 2.0) - (brightness / 255.0 * size / 2.0) if radius == 0.0: return painter.save() painter.setRenderHint(QtGui.QPainter.Antialiasing) painter.setPen(QtCore.Qt.NoPen) if option.state & QtGui.QStyle.State_Selected: painter.setBrush(option.palette.highlightedText()) else: painter.setBrush(QtGui.QBrush(QtCore.Qt.black)) painter.drawEllipse(QtCore.QRectF( option.rect.x() + option.rect.width() / 2 - radius, option.rect.y() + option.rect.height() / 2 - radius, 2 * radius, 2 * radius)) painter.restore() def sizeHint(self, option, index): return QtCore.QSize(self.pixelSize, self.pixelSize) def setPixelSize(self, size): self.pixelSize = size class ImageModel(QtCore.QAbstractTableModel): def __init__(self, parent=None): super(ImageModel, self).__init__(parent) self.modelImage = QtGui.QImage() def setImage(self, image): self.modelImage = QtGui.QImage(image) self.reset() def rowCount(self, parent): return self.modelImage.height() def columnCount(self, parent): return self.modelImage.width() def data(self, index, role): if not index.isValid() or role != QtCore.Qt.DisplayRole: return None return QtGui.qGray(self.modelImage.pixel(index.column(), index.row())) def headerData(self, section, orientation, role): if role == QtCore.Qt.SizeHintRole: return QtCore.QSize(1, 1) return None class MainWindow(QtGui.QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.currentPath = QtCore.QDir.homePath() self.model = ImageModel(self) centralWidget = QtGui.QWidget() self.view = QtGui.QTableView() self.view.setShowGrid(False) self.view.horizontalHeader().hide() self.view.verticalHeader().hide() self.view.horizontalHeader().setMinimumSectionSize(1) self.view.verticalHeader().setMinimumSectionSize(1) self.view.setModel(self.model) delegate = PixelDelegate(self) self.view.setItemDelegate(delegate) pixelSizeLabel = QtGui.QLabel("Pixel size:") pixelSizeSpinBox = QtGui.QSpinBox() pixelSizeSpinBox.setMinimum(4) pixelSizeSpinBox.setMaximum(32) pixelSizeSpinBox.setValue(12) fileMenu = QtGui.QMenu("&File", self) openAction = fileMenu.addAction("&Open...") openAction.setShortcut("Ctrl+O") self.printAction = fileMenu.addAction("&Print...") self.printAction.setEnabled(False) self.printAction.setShortcut("Ctrl+P") quitAction = fileMenu.addAction("E&xit") quitAction.setShortcut("Ctrl+Q") helpMenu = QtGui.QMenu("&Help", self) aboutAction = helpMenu.addAction("&About") self.menuBar().addMenu(fileMenu) self.menuBar().addSeparator() self.menuBar().addMenu(helpMenu) openAction.triggered.connect(self.chooseImage) self.printAction.triggered.connect(self.printImage) quitAction.triggered.connect(QtGui.qApp.quit) aboutAction.triggered.connect(self.showAboutBox) pixelSizeSpinBox.valueChanged[int].connect(delegate.setPixelSize) pixelSizeSpinBox.valueChanged[int].connect(self.updateView) controlsLayout = QtGui.QHBoxLayout() controlsLayout.addWidget(pixelSizeLabel) controlsLayout.addWidget(pixelSizeSpinBox) controlsLayout.addStretch(1) mainLayout = QtGui.QVBoxLayout() mainLayout.addWidget(self.view) mainLayout.addLayout(controlsLayout) centralWidget.setLayout(mainLayout) self.setCentralWidget(centralWidget) self.setWindowTitle("Pixelator") self.resize(640, 480) def chooseImage(self): fileName, _ = QtGui.QFileDialog.getOpenFileName(self, "Choose an Image", self.currentPath, '*') if fileName: self.openImage(fileName) def openImage(self, fileName): image = QtGui.QImage() if image.load(fileName): self.model.setImage(image) if not fileName.startswith(':/'): self.currentPath = fileName self.setWindowTitle("%s - Pixelator" % self.currentPath) self.printAction.setEnabled(True) self.updateView() def printImage(self): if self.model.rowCount(QtCore.QModelIndex()) * self.model.columnCount( QtCore.QModelIndex()) > 90000: answer = QtGui.QMessageBox.question(self, "Large Image Size", "The printed image may be very large. Are you sure that " "you want to print it?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No) if answer == QtGui.QMessageBox.No: return printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution) dlg = QtGui.QPrintDialog(printer, self) dlg.setWindowTitle("Print Image") if dlg.exec_() != QtGui.QDialog.Accepted: return painter = QtGui.QPainter() painter.begin(printer) rows = self.model.rowCount(QtCore.QModelIndex()) columns = self.model.columnCount(QtCore.QModelIndex()) sourceWidth = (columns + 1) * ItemSize sourceHeight = (rows + 1) * ItemSize painter.save() xscale = printer.pageRect().width() / float(sourceWidth) yscale = printer.pageRect().height() / float(sourceHeight) scale = min(xscale, yscale) painter.translate( printer.pageRect().x() + printer.pageRect().width() / 2, printer.pageRect().y() + printer.pageRect().height() / 2) painter.scale(scale, scale) painter.translate(-sourceWidt / 2, -sourceHeight / 2) option = QtGui.QStyleOptionViewItem() parent = QtCore.QModelIndex() progress = QtGui.QProgressDialog("Printing...", "Cancel", 0, rows, self) y = ItemSize / 2.0 for row in range(rows): progress.setValue(row) QtGui.qApp.processEvents() if progress.wasCanceled(): break x = ItemSize / 2.0 for col in range(columns): option.rect = QtCore.QRect(x, y, ItemSize, ItemSize) self.view.itemDelegate.paint(painter, option, self.model.index(row, column, parent)) x = x + ItemSize y = y + ItemSize progress.setValue(rows) painter.restore() painter.end() if progress.wasCanceled(): QtGui.QMessageBox.information(self, "Printing canceled", "The printing process was canceled.", QtGui.QMessageBox.Cancel) def showAboutBox(self): QtGui.QMessageBox.about(self, "About the Pixelator example", "This example demonstrates how a standard view and a custom\n" "delegate can be used to produce a specialized " "representation\nof data in a simple custom model.") def updateView(self): self.view.resizeColumnsToContents() self.view.resizeRowsToContents() if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) window = MainWindow() window.show() window.openImage(':/images/qt.png') sys.exit(app.exec_())
{ "content_hash": "c91d323b79efdf428dc1e5c4e24cf31b", "timestamp": "", "source": "github", "line_count": 253, "max_line_length": 105, "avg_line_length": 33.37549407114624, "alnum_prop": 0.5900047370914259, "repo_name": "mmanhertz/elopic", "id": "f2d45745ddfd9273572b5f0deb74c93b961ef273", "size": "9517", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sample_code/pixelator.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Makefile", "bytes": "63" }, { "name": "Python", "bytes": "41403" } ], "symlink_target": "" }
#!/usr/bin/env python3 # Tool that assists in upgrading the Envoy source tree to the latest API. # Internally, Envoy uses the latest vN or vNalpha for a given package. Envoy # will perform a reflection based version upgrade on any older protos that are # presented to it in configuration at ingestion time. # # Usage (from a clean tree): # # api_boost.py --generate_compilation_database --build_api_booster import argparse import functools import json import os import multiprocessing as mp import pathlib import re import shlex import subprocess as sp # Detect API #includes. API_INCLUDE_REGEX = re.compile('#include "(envoy/.*)/[^/]+\.pb\.(validate\.)?h"') # Needed for CI to pass down bazel options. BAZEL_BUILD_OPTIONS = shlex.split(os.environ.get('BAZEL_BUILD_OPTIONS', '')) # Obtain the directory containing a path prefix, e.g. ./foo/bar.txt is ./foo, # ./foo/ba is ./foo, ./foo/bar/ is ./foo/bar. def PrefixDirectory(path_prefix): return path_prefix if os.path.isdir(path_prefix) else os.path.dirname(path_prefix) # Update a C++ file to the latest API. def ApiBoostFile(llvm_include_path, debug_log, path): print('Processing %s' % path) if 'API_NO_BOOST_FILE' in pathlib.Path(path).read_text(): if debug_log: print('Not boosting %s due to API_NO_BOOST_FILE\n' % path) return None # Run the booster try: result = sp.run([ './bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster', '--extra-arg-before=-xc++', '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', '--extra-arg=-Wno-old-style-cast', path ], capture_output=True, check=True) except sp.CalledProcessError as e: print('api_booster failure for %s: %s %s' % (path, e, e.stderr.decode('utf-8'))) raise if debug_log: print(result.stderr.decode('utf-8')) # Consume stdout containing the list of inferred API headers. return sorted(set(result.stdout.decode('utf-8').splitlines())) # Rewrite API includes to the inferred headers. Currently this is handled # outside of the clang-ast-replacements. In theory we could either integrate # with this or with clang-include-fixer, but it's pretty simply to handle as done # below, we have more control over special casing as well, so ¯\_(ツ)_/¯. def RewriteIncludes(args): path, api_includes = args # Files with API_NO_BOOST_FILE will have None returned by ApiBoostFile. if api_includes is None: return # We just dump the inferred API header includes at the start of the #includes # in the file and remove all the present API header includes. This does not # match Envoy style; we rely on later invocations of fix_format.sh to take # care of this alignment. output_lines = [] include_lines = ['#include "%s"' % f for f in api_includes] input_text = pathlib.Path(path).read_text() for line in input_text.splitlines(): if include_lines and line.startswith('#include'): output_lines.extend(include_lines) include_lines = None # Exclude API includes, except for a special case related to v2alpha # ext_authz; this is needed to include the service descriptor in the build # and is a hack that will go away when we remove v2. if re.match(API_INCLUDE_REGEX, line) and 'envoy/service/auth/v2alpha' not in line: continue output_lines.append(line) # Rewrite file. pathlib.Path(path).write_text('\n'.join(output_lines) + '\n') # Update the Envoy source tree the latest API. def ApiBoostTree(target_paths, generate_compilation_database=False, build_api_booster=False, debug_log=False, sequential=False): dep_build_targets = ['//%s/...' % PrefixDirectory(prefix) for prefix in target_paths] # Optional setup of state. We need the compilation database and api_booster # tool in place before we can start boosting. if generate_compilation_database: print('Building compilation database for %s' % dep_build_targets) sp.run(['./tools/gen_compilation_database.py', '--include_headers'] + dep_build_targets, check=True) if build_api_booster: # Similar to gen_compilation_database.py, we only need the cc_library for # setup. The long term fix for this is in # https://github.com/bazelbuild/bazel/issues/9578. # # Figure out some cc_libraries that cover most of our external deps. This is # the same logic as in gen_compilation_database.py. query = 'kind(cc_library, {})'.format(' union '.join(dep_build_targets)) dep_lib_build_targets = sp.check_output(['bazel', 'query', query]).decode().splitlines() # We also need some misc. stuff such as test binaries for setup of benchmark # dep. query = 'attr("tags", "compilation_db_dep", {})'.format(' union '.join(dep_build_targets)) dep_lib_build_targets.extend(sp.check_output(['bazel', 'query', query]).decode().splitlines()) extra_api_booster_args = [] if debug_log: extra_api_booster_args.append('--copt=-DENABLE_DEBUG_LOG') # Slightly easier to debug when we build api_booster on its own. sp.run([ 'bazel', 'build', '--strip=always', '@envoy_dev//clang_tools/api_booster', ] + BAZEL_BUILD_OPTIONS + extra_api_booster_args, check=True) sp.run([ 'bazel', 'build', '--config=libc++', '--strip=always', ] + BAZEL_BUILD_OPTIONS + dep_lib_build_targets, check=True) # Figure out where the LLVM include path is. We need to provide this # explicitly as the api_booster is built inside the Bazel cache and doesn't # know about this path. # TODO(htuch): this is fragile and depends on Clang version, should figure out # a cleaner approach. llvm_include_path = os.path.join( sp.check_output([os.getenv('LLVM_CONFIG'), '--libdir']).decode().rstrip(), 'clang/9.0.0/include') # Determine the files in the target dirs eligible for API boosting, based on # known files in the compilation database. file_paths = set([]) for entry in json.loads(pathlib.Path('compile_commands.json').read_text()): file_path = entry['file'] if any(file_path.startswith(prefix) for prefix in target_paths): file_paths.add(file_path) # Ensure a determinstic ordering if we are going to process sequentially. if sequential: file_paths = sorted(file_paths) # The API boosting is file local, so this is trivially parallelizable, use # multiprocessing pool with default worker pool sized to cpu_count(), since # this is CPU bound. try: with mp.Pool(processes=1 if sequential else None) as p: # We need multiple phases, to ensure that any dependency on files being modified # in one thread on consumed transitive headers on the other thread isn't an # issue. This also ensures that we complete all analysis error free before # any mutation takes place. # TODO(htuch): we should move to run-clang-tidy.py once the headers fixups # are Clang-based. api_includes = p.map(functools.partial(ApiBoostFile, llvm_include_path, debug_log), file_paths) # Apply Clang replacements before header fixups, since the replacements # are all relative to the original file. for prefix_dir in set(map(PrefixDirectory, target_paths)): sp.run(['clang-apply-replacements', prefix_dir], check=True) # Fixup headers. p.map(RewriteIncludes, zip(file_paths, api_includes)) finally: # Cleanup any stray **/*.clang-replacements.yaml. for prefix in target_paths: clang_replacements = pathlib.Path( PrefixDirectory(prefix)).glob('**/*.clang-replacements.yaml') for path in clang_replacements: path.unlink() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Update Envoy tree to the latest API') parser.add_argument('--generate_compilation_database', action='store_true') parser.add_argument('--build_api_booster', action='store_true') parser.add_argument('--debug_log', action='store_true') parser.add_argument('--sequential', action='store_true') parser.add_argument('paths', nargs='*', default=['source', 'test', 'include']) args = parser.parse_args() ApiBoostTree(args.paths, generate_compilation_database=args.generate_compilation_database, build_api_booster=args.build_api_booster, debug_log=args.debug_log, sequential=args.sequential)
{ "content_hash": "6de232f017be9cd718d2ac48bc4f2ab7", "timestamp": "", "source": "github", "line_count": 200, "max_line_length": 98, "avg_line_length": 42.685, "alnum_prop": 0.6785756120417008, "repo_name": "envoyproxy/envoy-wasm", "id": "eda6eaf94088136f84eef73eb98206c87cfa2b59", "size": "8541", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tools/api_boost/api_boost.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "34272" }, { "name": "C++", "bytes": "18917650" }, { "name": "Dockerfile", "bytes": "245" }, { "name": "Emacs Lisp", "bytes": "966" }, { "name": "Go", "bytes": "695" }, { "name": "JavaScript", "bytes": "1760" }, { "name": "Makefile", "bytes": "1985" }, { "name": "PowerShell", "bytes": "5725" }, { "name": "PureBasic", "bytes": "472" }, { "name": "Python", "bytes": "392412" }, { "name": "Rust", "bytes": "3471" }, { "name": "Shell", "bytes": "115198" }, { "name": "Starlark", "bytes": "1134460" }, { "name": "Thrift", "bytes": "748" } ], "symlink_target": "" }
import errno import os import ddt import mock from oslo_config import cfg from oslo_utils import importutils from manila import exception from manila.share import configuration as config from manila.share import driver from manila.share.drivers.glusterfs import layout from manila import test from manila.tests import fake_share from manila.tests import fake_utils CONF = cfg.CONF fake_local_share_path = '/mnt/nfs/testvol/fakename' fake_path_to_private_key = '/fakepath/to/privatekey' fake_remote_server_password = 'fakepassword' def fake_access(kwargs): fake_access_rule = fake_share.fake_access(**kwargs) fake_access_rule.to_dict = lambda: fake_access_rule.values return fake_access_rule class GlusterfsFakeShareDriver(layout.GlusterfsShareDriverBase): supported_layouts = ('layout_fake.FakeLayout', 'layout_something.SomeLayout') supported_protocols = ('NFS,') _supported_access_types = ('ip',) _supported_access_levels = ('rw',) @ddt.ddt class GlusterfsShareDriverBaseTestCase(test.TestCase): """Tests GlusterfsShareDriverBase.""" def setUp(self): super(GlusterfsShareDriverBaseTestCase, self).setUp() CONF.set_default('driver_handles_share_servers', False) fake_conf, __ = self._setup() self._driver = GlusterfsFakeShareDriver(False, configuration=fake_conf) self.fake_share = mock.Mock(name='fake_share') self.fake_context = mock.Mock(name='fake_context') self.fake_access = mock.Mock(name='fake_access') def _setup(self): fake_conf = config.Configuration(None) fake_layout = mock.Mock() self.mock_object(importutils, "import_object", mock.Mock(return_value=fake_layout)) return fake_conf, fake_layout def test_init(self): self.assertRaises(IndexError, layout.GlusterfsShareDriverBase, False, configuration=config.Configuration(None)) @ddt.data({'has_snap': None, 'layout_name': None}, {'has_snap': False, 'layout_name': 'layout_fake.FakeLayout'}, {'has_snap': True, 'layout_name': 'layout_something.SomeLayout'}) @ddt.unpack def test_init_subclass(self, has_snap, layout_name): conf, _layout = self._setup() if layout_name is not None: conf.glusterfs_share_layout = layout_name if has_snap is None: del(_layout._snapshots_are_supported) else: _layout._snapshots_are_supported = has_snap _driver = GlusterfsFakeShareDriver(False, configuration=conf) snap_result = {None: False}.get(has_snap, has_snap) layout_result = {None: 'layout_fake.FakeLayout'}.get(layout_name, layout_name) importutils.import_object.assert_called_once_with( 'manila.share.drivers.glusterfs.%s' % layout_result, _driver, configuration=conf) self.assertEqual(_layout, _driver.layout) self.assertEqual(snap_result, _driver.snapshots_are_supported) def test_init_nosupp_layout(self): conf = config.Configuration(None) conf.glusterfs_share_layout = 'nonsense_layout' self.assertRaises(exception.GlusterfsException, GlusterfsFakeShareDriver, False, configuration=conf) def test_setup_via_manager(self): self.assertIsNone(self._driver._setup_via_manager(mock.Mock())) def test_supported_access_types(self): self.assertEqual(('ip',), self._driver.supported_access_types) def test_supported_access_levels(self): self.assertEqual(('rw',), self._driver.supported_access_levels) def test_access_rule_validator(self): rule = mock.Mock() abort = mock.Mock() valid = mock.Mock() self.mock_object(layout.ganesha_utils, 'validate_access_rule', mock.Mock(return_value=valid)) ret = self._driver._access_rule_validator(abort)(rule) self.assertEqual(valid, ret) layout.ganesha_utils.validate_access_rule.assert_called_once_with( ('ip',), ('rw',), rule, abort) @ddt.data({'inset': ([], ['ADD'], []), 'outset': (['ADD'], []), 'recovery': False}, {'inset': ([], [], ['DELETE']), 'outset': ([], ['DELETE']), 'recovery': False}, {'inset': (['EXISTING'], ['ADD'], ['DELETE']), 'outset': (['ADD'], ['DELETE']), 'recovery': False}, {'inset': (['EXISTING'], [], []), 'outset': (['EXISTING'], []), 'recovery': True}) @ddt.unpack def test_update_access(self, inset, outset, recovery): conf, _layout = self._setup() gluster_mgr = mock.Mock(name='gluster_mgr') self.mock_object(_layout, '_share_manager', mock.Mock(return_value=gluster_mgr)) _driver = GlusterfsFakeShareDriver(False, configuration=conf) self.mock_object(_driver, '_update_access_via_manager', mock.Mock()) rulemap = {t: fake_access({'access_type': "ip", 'access_level': "rw", 'access_to': t}) for t in ( 'EXISTING', 'ADD', 'DELETE')} in_rules, out_rules = ( [ [ rulemap[t] for t in r ] for r in rs ] for rs in (inset, outset)) _driver.update_access(self.fake_context, self.fake_share, *in_rules) _layout._share_manager.assert_called_once_with(self.fake_share) _driver._update_access_via_manager.assert_called_once_with( gluster_mgr, self.fake_context, self.fake_share, *out_rules, recovery=recovery) def test_update_access_via_manager(self): self.assertRaises(NotImplementedError, self._driver._update_access_via_manager, mock.Mock(), self.fake_context, self.fake_share, [self.fake_access], [self.fake_access]) @ddt.data('NFS', 'PROTATO') def test_check_proto_baseclass(self, proto): self.assertRaises(exception.ShareBackendException, layout.GlusterfsShareDriverBase._check_proto, {'share_proto': proto}) def test_check_proto(self): GlusterfsFakeShareDriver._check_proto({'share_proto': 'NFS'}) def test_check_proto_notsupported(self): self.assertRaises(exception.ShareBackendException, GlusterfsFakeShareDriver._check_proto, {'share_proto': 'PROTATO'}) @ddt.data('', '_from_snapshot') def test_create_share(self, variant): conf, _layout = self._setup() _driver = GlusterfsFakeShareDriver(False, configuration=conf) self.mock_object(_driver, '_check_proto', mock.Mock()) getattr(_driver, 'create_share%s' % variant)(self.fake_context, self.fake_share) _driver._check_proto.assert_called_once_with(self.fake_share) getattr(_layout, 'create_share%s' % variant).assert_called_once_with( self.fake_context, self.fake_share) @ddt.data(True, False) def test_update_share_stats(self, internal_exception): data = mock.Mock() conf, _layout = self._setup() def raise_exception(*args, **kwargs): raise NotImplementedError layoutstats = mock.Mock() mock_kw = ({'side_effect': raise_exception} if internal_exception else {'return_value': layoutstats}) self.mock_object(_layout, '_update_share_stats', mock.Mock(**mock_kw)) self.mock_object(driver.ShareDriver, '_update_share_stats', mock.Mock()) _driver = GlusterfsFakeShareDriver(False, configuration=conf) _driver._update_share_stats(data) if internal_exception: self.assertFalse(data.update.called) else: data.update.assert_called_once_with(layoutstats) driver.ShareDriver._update_share_stats.assert_called_once_with( data) @ddt.data('do_setup', 'create_snapshot', 'delete_share', 'delete_snapshot', 'ensure_share', 'manage_existing', 'unmanage', 'extend_share', 'shrink_share') def test_delegated_methods(self, method): conf, _layout = self._setup() _driver = GlusterfsFakeShareDriver(False, configuration=conf) fake_args = (mock.Mock(), mock.Mock(), mock.Mock()) getattr(_driver, method)(*fake_args) getattr(_layout, method).assert_called_once_with(*fake_args) @ddt.ddt class GlusterfsShareLayoutBaseTestCase(test.TestCase): """Tests GlusterfsShareLayoutBaseTestCase.""" def setUp(self): super(GlusterfsShareLayoutBaseTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) self.fake_driver = mock.Mock() self.mock_object(self.fake_driver, '_execute', self._execute) class FakeLayout(layout.GlusterfsShareLayoutBase): def _share_manager(self, share): """Return GlusterManager object representing share's backend.""" def do_setup(self, context): """Any initialization the share driver does while starting.""" def create_share(self, context, share, share_server=None): """Is called to create share.""" def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" def delete_share(self, context, share, share_server=None): """Is called to remove share.""" def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" def manage_existing(self, share, driver_options): """Brings an existing share under Manila management.""" def unmanage(self, share): """Removes the specified share from Manila management.""" def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" def test_init_invalid(self): self.assertRaises(TypeError, layout.GlusterfsShareLayoutBase, mock.Mock()) def test_subclass(self): fake_conf = mock.Mock() _layout = self.FakeLayout(self.fake_driver, configuration=fake_conf) self.assertEqual(fake_conf, _layout.configuration) self.assertRaises(NotImplementedError, _layout._update_share_stats) def test_check_mount_glusterfs(self): fake_conf = mock.Mock() _driver = mock.Mock() _driver._execute = mock.Mock() _layout = self.FakeLayout(_driver, configuration=fake_conf) _layout._check_mount_glusterfs() _driver._execute.assert_called_once_with( 'mount.glusterfs', check_exit_code=False) @ddt.data({'_errno': errno.ENOENT, '_exception': exception.GlusterfsException}, {'_errno': errno.EACCES, '_exception': OSError}) @ddt.unpack def test_check_mount_glusterfs_not_installed(self, _errno, _exception): fake_conf = mock.Mock() _layout = self.FakeLayout(self.fake_driver, configuration=fake_conf) def exec_runner(*ignore_args, **ignore_kwargs): raise OSError(_errno, os.strerror(_errno)) expected_exec = ['mount.glusterfs'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(_exception, _layout._check_mount_glusterfs)
{ "content_hash": "a9db47b66ae5a51dde25e4544336d4a6", "timestamp": "", "source": "github", "line_count": 316, "max_line_length": 79, "avg_line_length": 38.9746835443038, "alnum_prop": 0.6066904839233518, "repo_name": "bswartz/manila", "id": "5582cee1f43434b1290c29593a020025bd81641c", "size": "12949", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "manila/tests/share/drivers/glusterfs/test_layout.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "953" }, { "name": "Python", "bytes": "9952105" }, { "name": "Shell", "bytes": "106606" } ], "symlink_target": "" }
"""Test Waze Travel Time sensors.""" from WazeRouteCalculator import WRCError import pytest from homeassistant.components.waze_travel_time.const import ( CONF_AVOID_FERRIES, CONF_AVOID_SUBSCRIPTION_ROADS, CONF_AVOID_TOLL_ROADS, CONF_REALTIME, CONF_UNITS, CONF_VEHICLE_TYPE, DOMAIN, ) from homeassistant.const import CONF_UNIT_SYSTEM_IMPERIAL from .const import MOCK_CONFIG from tests.common import MockConfigEntry @pytest.fixture(name="mock_config") async def mock_config_fixture(hass, data, options): """Mock a Waze Travel Time config entry.""" config_entry = MockConfigEntry( domain=DOMAIN, data=data, options=options, entry_id="test", ) config_entry.add_to_hass(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() @pytest.fixture(name="mock_update_wrcerror") def mock_update_wrcerror_fixture(mock_wrc): """Mock an update to the sensor failed with WRCError.""" obj = mock_wrc.return_value obj.calc_all_routes_info.side_effect = WRCError("test") yield @pytest.fixture(name="mock_update_keyerror") def mock_update_keyerror_fixture(mock_wrc): """Mock an update to the sensor failed with KeyError.""" obj = mock_wrc.return_value obj.calc_all_routes_info.side_effect = KeyError("test") yield @pytest.mark.parametrize( "data,options", [(MOCK_CONFIG, {})], ) @pytest.mark.usefixtures("mock_update", "mock_config") async def test_sensor(hass): """Test that sensor works.""" assert hass.states.get("sensor.waze_travel_time").state == "150" assert ( hass.states.get("sensor.waze_travel_time").attributes["attribution"] == "Powered by Waze" ) assert hass.states.get("sensor.waze_travel_time").attributes["duration"] == 150 assert hass.states.get("sensor.waze_travel_time").attributes["distance"] == 300 assert hass.states.get("sensor.waze_travel_time").attributes["route"] == "My route" assert ( hass.states.get("sensor.waze_travel_time").attributes["origin"] == "location1" ) assert ( hass.states.get("sensor.waze_travel_time").attributes["destination"] == "location2" ) assert ( hass.states.get("sensor.waze_travel_time").attributes["unit_of_measurement"] == "min" ) assert hass.states.get("sensor.waze_travel_time").attributes["icon"] == "mdi:car" @pytest.mark.parametrize( "data,options", [ ( MOCK_CONFIG, { CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_REALTIME: True, CONF_VEHICLE_TYPE: "car", CONF_AVOID_TOLL_ROADS: True, CONF_AVOID_SUBSCRIPTION_ROADS: True, CONF_AVOID_FERRIES: True, }, ) ], ) @pytest.mark.usefixtures("mock_update", "mock_config") async def test_imperial(hass): """Test that the imperial option works.""" assert hass.states.get("sensor.waze_travel_time").attributes[ "distance" ] == pytest.approx(186.4113) @pytest.mark.usefixtures("mock_update_wrcerror") async def test_sensor_failed_wrcerror(hass, caplog): """Test that sensor update fails with log message.""" config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id="test") config_entry.add_to_hass(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() assert hass.states.get("sensor.waze_travel_time").state == "unknown" assert "Error on retrieving data: " in caplog.text @pytest.mark.usefixtures("mock_update_keyerror") async def test_sensor_failed_keyerror(hass, caplog): """Test that sensor update fails with log message.""" config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id="test") config_entry.add_to_hass(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() assert hass.states.get("sensor.waze_travel_time").state == "unknown" assert "Error retrieving data from server" in caplog.text
{ "content_hash": "f7abdfef9a098210e772634dd968a531", "timestamp": "", "source": "github", "line_count": 126, "max_line_length": 87, "avg_line_length": 32.84920634920635, "alnum_prop": 0.6663445276636869, "repo_name": "nkgilley/home-assistant", "id": "67ba7c6e3115d508dc30d6ac2495419ba4b6174f", "size": "4139", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "tests/components/waze_travel_time/test_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "51597279" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
from __future__ import print_function import os import sys import warnings if __name__ == '__main__' and sys.argv[1] == "gevent_nosetests": print("Running gevent tests") from gevent.monkey import patch_all patch_all() if __name__ == '__main__' and sys.argv[1] == "eventlet_nosetests": print("Running eventlet tests") from eventlet import monkey_patch monkey_patch() import ez_setup ez_setup.use_setuptools() from setuptools import setup from distutils.command.build_ext import build_ext from distutils.core import Extension from distutils.errors import (CCompilerError, DistutilsPlatformError, DistutilsExecError) from distutils.cmd import Command PY3 = sys.version_info[0] == 3 try: import subprocess has_subprocess = True except ImportError: has_subprocess = False from cassandra import __version__ long_description = "" with open("README.rst") as f: long_description = f.read() try: from nose.commands import nosetests except ImportError: gevent_nosetests = None eventlet_nosetests = None else: class gevent_nosetests(nosetests): description = "run nosetests with gevent monkey patching" class eventlet_nosetests(nosetests): description = "run nosetests with eventlet monkey patching" has_cqlengine = False if __name__ == '__main__' and sys.argv[1] == "install": try: import cqlengine has_cqlengine = True except ImportError: pass PROFILING = False class DocCommand(Command): description = "generate or test documentation" user_options = [("test", "t", "run doctests instead of generating documentation")] boolean_options = ["test"] def initialize_options(self): self.test = False def finalize_options(self): pass def run(self): if self.test: path = "docs/_build/doctest" mode = "doctest" else: path = "docs/_build/%s" % __version__ mode = "html" try: os.makedirs(path) except: pass if has_subprocess: try: output = subprocess.check_output( ["sphinx-build", "-b", mode, "docs", path], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as exc: raise RuntimeError("Documentation step '%s' failed: %s: %s" % (mode, exc, exc.output)) else: print(output) print("") print("Documentation step '%s' performed, results here:" % mode) print(" file://%s/%s/index.html" % (os.path.dirname(os.path.realpath(__file__)), path)) class BuildFailed(Exception): def __init__(self, ext): self.ext = ext murmur3_ext = Extension('cassandra.cmurmur3', sources=['cassandra/cmurmur3.c']) libev_ext = Extension('cassandra.io.libevwrapper', sources=['cassandra/io/libevwrapper.c'], include_dirs=['/usr/include/libev', '/usr/local/include', '/opt/local/include'], libraries=['ev'], library_dirs=['/usr/local/lib', '/opt/local/lib']) platform_unsupported_msg = \ """ =============================================================================== The optional C extensions are not supported on this platform. =============================================================================== """ arch_unsupported_msg = \ """ =============================================================================== The optional C extensions are not supported on big-endian systems. =============================================================================== """ pypy_unsupported_msg = \ """ ================================================================================= Some optional C extensions are not supported in PyPy. Only murmur3 will be built. ================================================================================= """ is_windows = os.name == 'nt' is_pypy = "PyPy" in sys.version if is_pypy: sys.stderr.write(pypy_unsupported_msg) is_supported_platform = sys.platform != "cli" and not sys.platform.startswith("java") is_supported_arch = sys.byteorder != "big" if not is_supported_platform: sys.stderr.write(platform_unsupported_msg) elif not is_supported_arch: sys.stderr.write(arch_unsupported_msg) try_extensions = "--no-extensions" not in sys.argv and is_supported_platform and is_supported_arch try_murmur3 = try_extensions and "--no-murmur3" not in sys.argv try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not is_windows try_cython = try_extensions and "--no-cython" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_CYTHON') sys.argv = [a for a in sys.argv if a not in ("--no-murmur3", "--no-libev", "--no-cython", "--no-extensions")] build_concurrency = int(os.environ.get('CASS_DRIVER_BUILD_CONCURRENCY', '0')) class build_extensions(build_ext): error_message = """ =============================================================================== WARNING: could not compile %s. The C extensions are not required for the driver to run, but they add support for token-aware routing with the Murmur3Partitioner. On Windows, make sure Visual Studio or an SDK is installed, and your environment is configured to build for the appropriate architecture (matching your Python runtime). This is often a matter of using vcvarsall.bat from your install directory, or running from a command prompt in the Visual Studio Tools Start Menu. =============================================================================== """ if is_windows else """ =============================================================================== WARNING: could not compile %s. The C extensions are not required for the driver to run, but they add support for libev and token-aware routing with the Murmur3Partitioner. Linux users should ensure that GCC and the Python headers are available. On Ubuntu and Debian, this can be accomplished by running: $ sudo apt-get install build-essential python-dev On RedHat and RedHat-based systems like CentOS and Fedora: $ sudo yum install gcc python-devel On OSX, homebrew installations of Python should provide the necessary headers. libev Support ------------- For libev support, you will also need to install libev and its headers. On Debian/Ubuntu: $ sudo apt-get install libev4 libev-dev On RHEL/CentOS/Fedora: $ sudo yum install libev libev-devel On OSX, via homebrew: $ brew install libev =============================================================================== """ def run(self): try: self._setup_extensions() build_ext.run(self) except DistutilsPlatformError as exc: sys.stderr.write('%s\n' % str(exc)) warnings.warn(self.error_message % "C extensions.") def build_extensions(self): if build_concurrency > 1: self.check_extensions_list(self.extensions) import multiprocessing.pool multiprocessing.pool.ThreadPool(processes=build_concurrency).map(self.build_extension, self.extensions) else: build_ext.build_extensions(self) def build_extension(self, ext): try: build_ext.build_extension(self, ext) except (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError) as exc: sys.stderr.write('%s\n' % str(exc)) name = "The %s extension" % (ext.name,) warnings.warn(self.error_message % (name,)) def _setup_extensions(self): # We defer extension setup until this command to leveraage 'setup_requires' pulling in Cython before we # attempt to import anything self.extensions = [] if try_murmur3: self.extensions.append(murmur3_ext) if try_libev: self.extensions.append(libev_ext) if try_cython: try: from Cython.Build import cythonize cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'metadata', 'pool', 'protocol', 'query', 'util'] compile_args = [] if is_windows else ['-Wno-unused-function'] self.extensions.extend(cythonize( [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], extra_compile_args=compile_args) for m in cython_candidates], nthreads=build_concurrency, exclude_failures=True)) self.extensions.extend(cythonize("cassandra/*.pyx", nthreads=build_concurrency)) except Exception: sys.stderr.write("Failed to cythonize one or more modules. These will not be compiled as extensions (optional).\n") def run_setup(extensions): kw = {'cmdclass': {'doc': DocCommand}} if gevent_nosetests is not None: kw['cmdclass']['gevent_nosetests'] = gevent_nosetests if eventlet_nosetests is not None: kw['cmdclass']['eventlet_nosetests'] = eventlet_nosetests kw['cmdclass']['build_ext'] = build_extensions kw['ext_modules'] = [Extension('DUMMY', [])] # dummy extension makes sure build_ext is called for install if try_cython: kw['setup_requires'] = ['Cython >=0.21'] dependencies = ['six >=1.6'] if not PY3: dependencies.append('futures') setup( name='cassandra-driver', version=__version__, description='Python driver for Cassandra', long_description=long_description, url='http://github.com/datastax/python-driver', author='Tyler Hobbs', author_email='tyler@datastax.com', packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine'], keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' ], **kw) run_setup(None) if has_cqlengine: warnings.warn("\n#######\n'cqlengine' package is present on path: %s\n" "cqlengine is now an integrated sub-package of this driver.\n" "It is recommended to remove this package to reduce the chance for conflicting usage" % cqlengine.__file__)
{ "content_hash": "de7f4cf8892956279ad7b62d2b15d4b9", "timestamp": "", "source": "github", "line_count": 328, "max_line_length": 131, "avg_line_length": 34.38109756097561, "alnum_prop": 0.5816263190564867, "repo_name": "beobal/python-driver", "id": "b92063bfac5edd0753e635553bc1bf4581c7c702", "size": "11857", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "28918" }, { "name": "Python", "bytes": "1710751" } ], "symlink_target": "" }
import account_analytic_default # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
{ "content_hash": "b74c682f92ddc61526eacf6b7fc3245b", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 65, "avg_line_length": 33, "alnum_prop": 0.8282828282828283, "repo_name": "cristianquaglio/odoo", "id": "1734f02dc579fc775a6c575063c3f3d2564ae3aa", "size": "1087", "binary": false, "copies": "437", "ref": "refs/heads/master", "path": "addons/account_analytic_default/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "9611" }, { "name": "C++", "bytes": "108790" }, { "name": "CSS", "bytes": "671328" }, { "name": "HTML", "bytes": "212829" }, { "name": "JavaScript", "bytes": "5984109" }, { "name": "Makefile", "bytes": "12332" }, { "name": "Mako", "bytes": "561" }, { "name": "PHP", "bytes": "14033" }, { "name": "Python", "bytes": "8366254" }, { "name": "Ruby", "bytes": "220" }, { "name": "Shell", "bytes": "19163" }, { "name": "Vim script", "bytes": "406" }, { "name": "XSLT", "bytes": "92945" } ], "symlink_target": "" }
from mtTkinter import * from threading import Thread, Lock from datetime import datetime, timedelta from time import sleep import os class GameClock(Thread): def __init__(self, scorebox): Thread.__init__(self) self.daemon = True self.active = True self.l = Lock() self.win = scorebox #base time on clock self.base = timedelta(0,0) #Time from last start point self.ref = datetime.now() self.clock_running = False self.warn_thresh = None self.alarm_thresh = None self.warn_fired = False self.alarm_fired = False self.alarm_callback = None self._update_display() def _reset_alarms(self): self.warn_fired = False self.alarm_fired = False def _warn_alarm(self): if not self.warn_fired: self.warn_fired = True self.win.set_clock_color("orange") os.system("beep -f 1000 -l 50 &>/dev/null &") def _alarm_alarm(self): if not self.alarm_fired: self.alarm_fired = True self.win.set_clock_color("red") os.system("beep -f 2000 -r 5 &>/dev/null &") if self.alarm_callback is not None: self.alarm_callback() def _update_display(self): cdt = self._get_delta() tot_secs = int(cdt.total_seconds()) ms = cdt.microseconds / 1e3 mins = tot_secs / 60 secs = tot_secs - mins * 60 self.win.set_clock_text("%02d:%02d" % (mins, secs), "%03d" % ms) if self.alarm_thresh is not None and tot_secs >= self.alarm_thresh: self._alarm_alarm() elif self.warn_thresh is not None and tot_secs >= self.warn_thresh: self._warn_alarm() def clock_start(self): with self.l: if self.clock_running: return self.clock_running = True self.ref = datetime.now() self.win.set_clock_color("black") def clock_reset(self): with self.l: self.base = timedelta(0,0) self.ref = datetime.now() self._reset_alarms() if self.clock_running: self.win.set_clock_color("black") def _get_delta(self): time = self.base if self.clock_running: dt = datetime.now() - self.ref time += dt return time def clock_stop(self): with self.l: if not self.clock_running: return self.base = self._get_delta() self.clock_running = False self.win.set_clock_color("blue") def kill(self): self.active=False def run(self): while self.active: with self.l: self._update_display() sleep(0.073)
{ "content_hash": "afd26d2757e554bd116512e8294d550f", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 75, "avg_line_length": 27.805825242718445, "alnum_prop": 0.5321229050279329, "repo_name": "jheidel/scrabble-opencv", "id": "6860c5ed64d3230c47b5408190c09c8cb5bb4e03", "size": "2864", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gameclock.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "2228" }, { "name": "JavaScript", "bytes": "1069" }, { "name": "Python", "bytes": "586254" } ], "symlink_target": "" }
import mock import requests from keystoneclient import httpclient from keystoneclient.tests.unit import utils FAKE_RESPONSE = utils.TestResponse({ "status_code": 200, "text": '{"hi": "there"}', }) REQUEST_URL = 'https://127.0.0.1:5000/hi' RESPONSE_BODY = '{"hi": "there"}' def get_client(): cl = httpclient.HTTPClient(username="username", password="password", tenant_id="tenant", auth_url="auth_test", cacert="ca.pem", key="key.pem", cert="cert.pem") return cl def get_authed_client(): cl = get_client() cl.management_url = "https://127.0.0.1:5000" cl.auth_token = "token" return cl class ClientTest(utils.TestCase): def setUp(self): super(ClientTest, self).setUp() self.request_patcher = mock.patch.object(requests, 'request', self.mox.CreateMockAnything()) self.request_patcher.start() self.addCleanup(self.request_patcher.stop) @mock.patch.object(requests, 'request') def test_get(self, MOCK_REQUEST): MOCK_REQUEST.return_value = FAKE_RESPONSE cl = get_authed_client() resp, body = cl.get("/hi") # this may become too tightly couple later mock_args, mock_kwargs = MOCK_REQUEST.call_args self.assertEqual(mock_args[0], 'GET') self.assertEqual(mock_args[1], REQUEST_URL) self.assertEqual(mock_kwargs['headers']['X-Auth-Token'], 'token') self.assertEqual(mock_kwargs['cert'], ('cert.pem', 'key.pem')) self.assertEqual(mock_kwargs['verify'], 'ca.pem') # Automatic JSON parsing self.assertEqual(body, {"hi": "there"}) @mock.patch.object(requests, 'request') def test_post(self, MOCK_REQUEST): MOCK_REQUEST.return_value = FAKE_RESPONSE cl = get_authed_client() cl.post("/hi", body=[1, 2, 3]) # this may become too tightly couple later mock_args, mock_kwargs = MOCK_REQUEST.call_args self.assertEqual(mock_args[0], 'POST') self.assertEqual(mock_args[1], REQUEST_URL) self.assertEqual(mock_kwargs['data'], '[1, 2, 3]') self.assertEqual(mock_kwargs['headers']['X-Auth-Token'], 'token') self.assertEqual(mock_kwargs['cert'], ('cert.pem', 'key.pem')) self.assertEqual(mock_kwargs['verify'], 'ca.pem') @mock.patch.object(requests, 'request') def test_post_auth(self, MOCK_REQUEST): MOCK_REQUEST.return_value = FAKE_RESPONSE cl = httpclient.HTTPClient( username="username", password="password", tenant_id="tenant", auth_url="auth_test", cacert="ca.pem", key="key.pem", cert="cert.pem") cl.management_url = "https://127.0.0.1:5000" cl.auth_token = "token" cl.post("/hi", body=[1, 2, 3]) # this may become too tightly couple later mock_args, mock_kwargs = MOCK_REQUEST.call_args self.assertEqual(mock_args[0], 'POST') self.assertEqual(mock_args[1], REQUEST_URL) self.assertEqual(mock_kwargs['data'], '[1, 2, 3]') self.assertEqual(mock_kwargs['headers']['X-Auth-Token'], 'token') self.assertEqual(mock_kwargs['cert'], ('cert.pem', 'key.pem')) self.assertEqual(mock_kwargs['verify'], 'ca.pem')
{ "content_hash": "8921bd784ac05e4d5203f9eef332fbd4", "timestamp": "", "source": "github", "line_count": 95, "max_line_length": 79, "avg_line_length": 35.05263157894737, "alnum_prop": 0.6015015015015015, "repo_name": "darren-wang/ksc", "id": "e574d375053f63c3aaaa11ee2f899aec654fef68", "size": "3903", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "keystoneclient/tests/unit/test_https.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1405295" }, { "name": "Shell", "bytes": "7107" } ], "symlink_target": "" }
from __future__ import print_function import os, sys, argparse from object_recognition_core.db import models from object_recognition_core.db.tools import args_to_db_params import object_recognition_core.db.tools as dbtools import couchdb def parse_args(): parser = argparse.ArgumentParser(description='Add an object to the db.', fromfile_prefix_chars='@') parser.add_argument('-n', '--object_name', metavar='OBJECT_NAME', dest='object_name', type=str, default='') parser.add_argument('-d', '--description', metavar='DESCRIPTION', dest='description', type=str, default='') parser.add_argument('-a', '--author', metavar='AUTHOR_NAME', dest='author_name', type=str, default='') parser.add_argument('-e', '--email', metavar='EMAIL_ADDRESS', dest='author_email', type=str, default='') parser.add_argument('tags', metavar='TAGS', type=str, nargs='*', help='Tags to add to object description.') dbtools.add_db_arguments(parser) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() obj = models.Object(object_name=args.object_name, description=args.description, tags=args.tags, author_name=args.author_name, author_email=args.author_email, ) couch = couchdb.Server(args.db_root) db = dbtools.init_object_databases(couch) objects = db existing = models.Object.by_object_name(objects, key=obj.object_name) store_new = True if len(existing) > 0: print('It appears that there are %d object(s) with the same name.' % len(existing)) for x in existing: print(x) print('Use the object id above? [y,n]') use_it = raw_input('') if 'y' in use_it.lower(): store_new = False obj = x break else: store_new = True if store_new: if args.commit: obj.store(objects) print('Stored new object with id:', obj.id) else: print('Use the --commit option to commit the proposed change.')
{ "content_hash": "4037163cf8da481af206ce428032cbc4", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 111, "avg_line_length": 41.490566037735846, "alnum_prop": 0.5898135516143702, "repo_name": "WalkingMachine/sara_commun", "id": "0855bb788c8807b62cdd5c0539981735b94e68b7", "size": "2267", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wm_ork/object_recognition_core/apps/dbscripts/object_add.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CMake", "bytes": "6113" } ], "symlink_target": "" }
import tensorflow as tf from ..utils import logger def describe_model(): """ print a description of the current model parameters """ train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) msg = [""] total = 0 for v in train_vars: shape = v.get_shape() ele = shape.num_elements() total += ele msg.append("{}: shape={}, dim={}".format( v.name, shape.as_list(), ele)) size_mb = total * 4 / 1024.0**2 msg.append("Total param={} ({:01f} MB assuming all float32)".format(total, size_mb)) logger.info("Model Parameters: {}".format('\n'.join(msg))) def get_shape_str(tensors): """ :param tensors: a tensor or a list of tensors :returns: a string to describe the shape """ if isinstance(tensors, (list, tuple)): for v in tensors: assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v)) shape_str = ",".join( map(lambda x: str(x.get_shape().as_list()), tensors)) else: assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors)) shape_str = str(tensors.get_shape().as_list()) return shape_str
{ "content_hash": "70d4d35fc6d0321168bcba97279cbbe3", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 102, "avg_line_length": 34.857142857142854, "alnum_prop": 0.5959016393442623, "repo_name": "yinglanma/AI-project", "id": "6b0046744b8c8868c476e83bc18d0f5e9420a567", "size": "1307", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tensorpack/tfutils/modelutils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "319384" } ], "symlink_target": "" }
import os import sys from azure import ( WindowsAzureError, DEFAULT_HTTP_TIMEOUT, DEV_ACCOUNT_NAME, DEV_ACCOUNT_KEY, _ERROR_STORAGE_MISSING_INFO, ) from azure.http import HTTPError from azure.http.httpclient import _HTTPClient from azure.storage import _storage_error_handler #-------------------------------------------------------------------------- # constants for azure app setting environment variables AZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT' AZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY' EMULATED = 'EMULATED' #-------------------------------------------------------------------------- class _StorageClient(object): ''' This is the base class for BlobManager, TableManager and QueueManager. ''' def __init__(self, account_name=None, account_key=None, protocol='https', host_base='', dev_host='', timeout=DEFAULT_HTTP_TIMEOUT): ''' account_name: your storage account name, required for all operations. account_key: your storage account key, required for all operations. protocol: Optional. Protocol. Defaults to http. host_base: Optional. Live host base url. Defaults to Azure url. Override this for on-premise. dev_host: Optional. Dev host url. Defaults to localhost. timeout: Optional. Timeout for the http request, in seconds. ''' self.account_name = account_name self.account_key = account_key self.requestid = None self.protocol = protocol self.host_base = host_base self.dev_host = dev_host # the app is not run in azure emulator or use default development # storage account and key if app is run in emulator. self.use_local_storage = False # check whether it is run in emulator. if EMULATED in os.environ: self.is_emulated = os.environ[EMULATED].lower() != 'false' else: self.is_emulated = False # get account_name and account key. If they are not set when # constructing, get the account and key from environment variables if # the app is not run in azure emulator or use default development # storage account and key if app is run in emulator. if not self.account_name or not self.account_key: if self.is_emulated: self.account_name = DEV_ACCOUNT_NAME self.account_key = DEV_ACCOUNT_KEY self.protocol = 'http' self.use_local_storage = True else: self.account_name = os.environ.get(AZURE_STORAGE_ACCOUNT) self.account_key = os.environ.get(AZURE_STORAGE_ACCESS_KEY) if not self.account_name or not self.account_key: raise WindowsAzureError(_ERROR_STORAGE_MISSING_INFO) self._httpclient = _HTTPClient( service_instance=self, account_key=self.account_key, account_name=self.account_name, protocol=self.protocol, timeout=timeout) self._batchclient = None self._filter = self._perform_request_worker def with_filter(self, filter): ''' Returns a new service which will process requests with the specified filter. Filtering operations can include logging, automatic retrying, etc... The filter is a lambda which receives the HTTPRequest and another lambda. The filter can perform any pre-processing on the request, pass it off to the next lambda, and then perform any post-processing on the response. ''' res = type(self)(self.account_name, self.account_key, self.protocol, self.host_base, self.dev_host, self._httpclient.timeout) old_filter = self._filter def new_filter(request): return filter(request, old_filter) res._filter = new_filter return res def set_proxy(self, host, port, user=None, password=None): ''' Sets the proxy server host and port for the HTTP CONNECT Tunnelling. host: Address of the proxy. Ex: '192.168.0.100' port: Port of the proxy. Ex: 6000 user: User for proxy authorization. password: Password for proxy authorization. ''' self._httpclient.set_proxy(host, port, user, password) @property def timeout(self): return self._httpclient.timeout @timeout.setter def timeout(self, value): self._httpclient.timeout = value def _get_host(self): if self.use_local_storage: return self.dev_host else: return self.account_name + self.host_base def _perform_request_worker(self, request): return self._httpclient.perform_request(request) def _perform_request(self, request, text_encoding='utf-8'): ''' Sends the request and return response. Catches HTTPError and hand it to error handler ''' try: if self._batchclient is not None: return self._batchclient.insert_request_to_batch(request) else: resp = self._filter(request) if sys.version_info >= (3,) and isinstance(resp, bytes) and \ text_encoding: resp = resp.decode(text_encoding) except HTTPError as ex: _storage_error_handler(ex) return resp
{ "content_hash": "3dc09b2b85a94e5072a6d9472794e870", "timestamp": "", "source": "github", "line_count": 160, "max_line_length": 78, "avg_line_length": 34.98125, "alnum_prop": 0.5913882437019832, "repo_name": "dominoFire/azure-sdk-for-python", "id": "74b1fa02117386d99ffc5e41dcc582434c4d3cae", "size": "6339", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "azure/storage/storageclient.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "7954" }, { "name": "Python", "bytes": "1170771" }, { "name": "Shell", "bytes": "565" } ], "symlink_target": "" }
""" Unit tests for the Assessment object """ from sqlalchemy.orm import attributes from ggrc import db from ggrc.models import Assessment from ggrc.models import mixins from ggrc.models import object_document from ggrc.models import object_person from ggrc.models import relationship from ggrc.models import track_object_state from ggrc.models.mixins import assignable from unit.ggrc.models import test_mixins_base class TestAssessmentMixins(test_mixins_base.TestMixinsBase): """ Tests inclusion of correct mixins and their attributes """ def setUp(self): self.model = Assessment self.included_mixins = [ assignable.Assignable, mixins.BusinessObject, mixins.CustomAttributable, db.Model, object_document.Documentable, track_object_state.HasObjectState, mixins.TestPlanned, mixins.Timeboxed, object_person.Personable, relationship.Relatable, ] self.attributes_introduced = [ ('audit', dict), ('design', attributes.InstrumentedAttribute), ('operationally', attributes.InstrumentedAttribute), ('object', dict), ('status', attributes.InstrumentedAttribute), # Stateful # noqa ('assignees', property), # Assignable # noqa ('contact_id', attributes.InstrumentedAttribute), # WithContact # noqa ('contact', attributes.InstrumentedAttribute), # WithContact # noqa ('secondary_contact', attributes.InstrumentedAttribute), # WithContact # noqa ('custom_attribute_values', attributes.InstrumentedAttribute), # CustomAttrib. # noqa ('description', attributes.InstrumentedAttribute), # Described # noqa ('end_date', attributes.InstrumentedAttribute), # Timeboxed # noqa ('notes', attributes.InstrumentedAttribute), # Noted # noqa ('object_documents', attributes.InstrumentedAttribute), # Documentable # noqa ('object_people', attributes.InstrumentedAttribute), # Personable # noqa ('os_state', attributes.InstrumentedAttribute), # HasObjectState # noqa ('reference_url', attributes.InstrumentedAttribute), # HyperLinked # noqa ('related_sources', attributes.InstrumentedAttribute), # Relatable # noqa ('related_destinations', attributes.InstrumentedAttribute), # Relatable # noqa ('slug', attributes.InstrumentedAttribute), # Slugged # noqa ('start_date', attributes.InstrumentedAttribute), # Timeboxed # noqa ('test_plan', attributes.InstrumentedAttribute), # TestPlanned # noqa ('title', attributes.InstrumentedAttribute), # Titled # noqa ('url', attributes.InstrumentedAttribute), # HyperLinked # noqa ]
{ "content_hash": "63b8c26c1dc0e99e12b9856c20a4d12f", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 96, "avg_line_length": 51.55, "alnum_prop": 0.6110572259941804, "repo_name": "josthkko/ggrc-core", "id": "f1abd02c5022560d29234b22f230c0105cd5de8f", "size": "3206", "binary": false, "copies": "6", "ref": "refs/heads/develop", "path": "test/unit/ggrc/models/test_assessement.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "163629" }, { "name": "Cucumber", "bytes": "136321" }, { "name": "HTML", "bytes": "1057288" }, { "name": "JavaScript", "bytes": "1492054" }, { "name": "Makefile", "bytes": "6161" }, { "name": "Mako", "bytes": "2178" }, { "name": "Python", "bytes": "2148568" }, { "name": "Shell", "bytes": "29929" } ], "symlink_target": "" }
from __future__ import unicode_literals, absolute_import import unittest from whispy_lispy import interpreter2, scopes2 from ..constructors import * class InterpreterTestCase(unittest.TestCase): def test_return_native_int(self): tree = ast.RootAbstractSyntaxNode((ast.Value((types.Int((3, )),)),)) self.assertEqual(interpreter2.interpret_ast(tree, {}), types.Int((3,))) def test_return_the_last_provided_value(self): tree = ast.RootAbstractSyntaxNode(( ast.Value((types.Int((3,)),)), ast.Value((types.String(('ff',)),)))) self.assertEqual( interpreter2.interpret_ast(tree, {}), types.String(('ff',))) def test_simple_literal_assignment_and_returns_nothing(self): tree = ast.RootAbstractSyntaxNode(( ast.Assign(( ast.Symbol(('x',)), ast.Value((types.Int((3,)),)))),)) scope = {} result = interpreter2.interpret_ast(tree, scope) self.assertEqual(result, None) self.assertEqual(scope[types.Symbol(('x',))], types.Int((3,))) def test_sum_internal_function(self): # (def x (sum 1 2)) tree = ast.RootAbstractSyntaxNode(( ast.Apply(( ast.Symbol(('sum',)), ast.Value((types.Int((3,)),)), ast.Value((types.Int((4,)),)) )),)) result = interpreter2.interpret_ast(tree) self.assertEqual(result, types.Int((7,))) def test_assign_value_from_reference(self): # (def x 4) # (def y (sum x 1 2) tree = ast.RootAbstractSyntaxNode(( ast.Assign(( ast.Symbol(('x',)), ast.Value((types.Int((4,)),)) )), ast.Assign(( ast.Symbol(('y',)), ast.Apply(( ast.Symbol(('sum',)), ast.Symbol(('x',)), ast.Value((types.Int((1,)),)), ast.Apply((types.Int((2,)),)))))))) scope = scopes2.Scope() interpreter2.interpret_ast(tree, scope) self.assertEqual( scope[types.Symbol(('y',))], types.Int((7,)) ) def test_assign_value_from_reference_simple(self): # Had troubles with this one after the last test was passing # (def x 9) # (def y x) tree = ast.RootAbstractSyntaxNode(( ast.Assign(( ast.Symbol(('x',)), ast.Value((types.Int((9,)),)) )), ast.Assign(( ast.Symbol(('y',)), ast.Symbol(('x',)) )) )) scope = scopes2.Scope() interpreter2.interpret_ast(tree, scope) self.assertEqual( scope[types.Symbol(('y',))], types.Int((9,)) )
{ "content_hash": "3c57354eb8dd03a65f3f2a69e0f88be8", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 79, "avg_line_length": 32.632183908045974, "alnum_prop": 0.5036984853821769, "repo_name": "vladiibine/whispy_lispy", "id": "fe968130e920d5432fa8163c57d8d27c3fffa332", "size": "2862", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_interpreter2/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1489" }, { "name": "PowerShell", "bytes": "2986" }, { "name": "Python", "bytes": "99352" } ], "symlink_target": "" }
import argparse import os import shutil import time import random import tqdm import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim import torch.utils.data import torchvision.transforms as transforms import torchvision.datasets as datasets import densenet as dn # used for logging to TensorBoard from tensorboard_logger import configure, log_value parser = argparse.ArgumentParser(description='PyTorch DenseNet Training') parser.add_argument('--epochs', default=300, type=int, help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)') parser.add_argument('-b', '--batch-size', default=256, type=int, help='mini-batch size (default: 64)') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, help='momentum') parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, help='weight decay (default: 1e-4)') parser.add_argument('--batchnorm-decay', '--bd', default=1e-4, type=float, help='batchnorm decay (default: 1e-4)') parser.add_argument('--print-freq', '-p', default=10, type=int, help='print frequency (default: 10)') parser.add_argument('--layers', default=100, type=int, help='total number of layers (default: 100)') parser.add_argument('--growth', default=12, type=int, help='number of new channels per layer (default: 12)') parser.add_argument('--droprate', default=0, type=float, help='dropout probability (default: 0.0)') parser.add_argument('--no-augment', dest='augment', action='store_false', help='whether to use standard augmentation (default: True)') parser.add_argument('--reduce', default=0.5, type=float, help='compression rate in transition stage (default: 0.5)') parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false', help='To not use bottleneck block') parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)') parser.add_argument('--name', default='DenseNet_BC_100_12', type=str, help='name of experiment') parser.add_argument('--tensorboard', help='Log progress to TensorBoard', action='store_true') parser.add_argument('--reg', default=0, type=float, help='regularization parameter') parser.add_argument('--reg-method', default=0, type=int, help='regularization method') parser.set_defaults(bottleneck=True) parser.set_defaults(augment=True) def main(): random.seed(3423432) global args, best_acc, suffix datasize = [1000, 2000, 4000, 8000, 16000, 32000, 50000] args = parser.parse_args() if args.tensorboard: configure("runs/%s"%(args.name)) normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]], std=[x/255.0 for x in [63.0, 62.1, 66.7]]) if args.augment: transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) else: transform_train = transforms.Compose([ transforms.ToTensor(), normalize, ]) transform_test = transforms.Compose([ transforms.ToTensor(), normalize ]) dataset_train = datasets.CIFAR10('/home/gh349/bicheng/data', train=True, download=True, transform=transform_train) dataset_test = datasets.CIFAR10('/home/gh349/bicheng/data/', train=False, transform=transform_test) for size in datasize: suffix = " - " + str(size) tmp_train = random.sample(list(dataset_train), size) tmp_test = dataset_test kwargs = {'num_workers': 12, 'pin_memory': True} train_loader = torch.utils.data.DataLoader( tmp_train, batch_size=args.batch_size, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader( tmp_test, batch_size=args.batch_size, shuffle=True, **kwargs) # create model model = dn.DenseNet3(args.layers, 10, args.growth, reduction=args.reduce, bottleneck=args.bottleneck, dropRate=args.droprate) # get the number of model parameters print('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]))) # for training on multiple GPUs. # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use model = model.cuda() cudnn.benchmark = True # define loss function (criterion) and pptimizer criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) best_acc = 0 for epoch in tqdm.trange(args.start_epoch, args.epochs, desc='Train'): adjust_learning_rate(optimizer, epoch) # train for one epoch acc_train = train(train_loader, model, criterion, optimizer, epoch) # evaluate on validation set acc_val = validate(val_loader, model, criterion, epoch) if args.tensorboard: log_value("generalization error" + suffix, acc_train - acc_val, epoch) # remember best precision and save checkpoint is_best = acc_val > best_acc best_acc = max(acc_val, best_acc) print('Best accuracy' + suffix + ': ', best_acc) if args.tensorboard: log_value('dataset accuracy', best_acc, size) def train(train_loader, model, criterion, optimizer, epoch): """Train for one epoch on the training set""" # batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() # switch to train mode model.train() # end = time.time() for i, (input, target) in tqdm.tqdm( enumerate(train_loader), total=len(train_loader), desc='Train Iteration=%d' % epoch, leave=False): target = target.cuda(async=True) input = input.cuda() input_var = torch.autograd.Variable(input) target_var = torch.autograd.Variable(target) # compute output output = model(input_var) loss = criterion(output, target_var) # measure accuracy and record loss prec1 = accuracy(output.data, target, topk=(1,))[0] losses.update(loss.data[0], input.size(0)) # print(top1.avg, top1.count) # print(prec1[0], input.size(0)) top1.update(prec1[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() add_regularization(model, args.reg_method, args.reg) # add_regularization(model, args.batchnorm_decay, 1.0) optimizer.step() # measure elapsed time # batch_time.update(time.time() - end) # end = time.time() # if i % args.print_freq == 0: # print('Epoch: [{0}][{1}/{2}]\t' # 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' # 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' # 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( # epoch, i, len(train_loader), batch_time=batch_time, # loss=losses, top1=top1)) # log to TensorBoard if args.tensorboard: log_value('train_loss' + suffix, losses.avg, epoch) log_value('train_acc' + suffix, top1.avg, epoch) return top1.avg def validate(val_loader, model, criterion, epoch): """Perform validation on the validation set""" # batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() # switch to evaluate mode model.eval() # end = time.time() for i, (input, target) in tqdm.tqdm( enumerate(val_loader), total=len(val_loader), desc='Valid Iteration=%d' % epoch, leave=False): target = target.cuda(async=True) input = input.cuda() input_var = torch.autograd.Variable(input, volatile=True) target_var = torch.autograd.Variable(target, volatile=True) # compute output output = model(input_var) loss = criterion(output, target_var) # measure accuracy and record loss prec1 = accuracy(output.data, target, topk=(1,))[0] losses.update(loss.data[0], input.size(0)) # print(top1.avg, top1.count) # print(prec1[0], input.size(0)) top1.update(prec1[0], input.size(0)) # measure elapsed time # batch_time.update(time.time() - end) # end = time.time() # if i % args.print_freq == 0: # print('Test: [{0}/{1}]\t' # 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' # 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' # 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( # i, len(val_loader), batch_time=batch_time, loss=losses, # top1=top1)) # print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1)) # log to TensorBoard if args.tensorboard: log_value('val_loss' + suffix, losses.avg, epoch) log_value('val_acc' + suffix, top1.avg, epoch) return top1.avg class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def adjust_learning_rate(optimizer, epoch): """Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs""" lr = args.lr * (0.1 ** (epoch // 150)) * (0.1 ** (epoch // 225)) # log to TensorBoard if args.tensorboard: log_value('learning_rate' + suffix, lr, epoch) for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res def add_regularization(model, reg_method, param): if (reg_method <= 0 or reg_method > 2): return {1: add_combined_reg(model, param), 2: add_separate_reg(model, param)}[reg_method] def add_combined_reg(model, param): # pass init_features = len(model.block1.layer[0].bn1.weight.data) for j in range(1, model.nblayer): weight = model.block1.layer[j].bn1.weight feature = model.block1.layer[j].conv1.weight # take the square of each element in the feature maps feature_sum = feature**2 # sum all 48 feature maps of the convolution layer feature_sum = feature_sum.sum(0) feature_sum = (feature_sum.view_as(weight.data)) # expand the feature sum into a tensor with same dimensions as BN weights for i in range(j - 1): reg = i * param / model.nblayer st = init_features + i * 12 ed = st + 12 bn_grad = weight.grad.data[st:ed] bn_data = weight.data[st:ed] feature_sum_data = feature_sum.data[st:ed] bn_grad += reg * feature_sum_data * bn_data for k in range(48): gamma = weight.data[st:ed] gamma = gamma ** 2 conv_grad = feature.grad.data[k][st:ed] conv_data = feature.data[k][st:ed] conv_grad += reg * conv_data * gamma init_features = len(model.block2.layer[0].bn1.weight.data) for j in range(1, model.nblayer): weight = model.block2.layer[j].bn1.weight feature = model.block2.layer[j].conv1.weight # take the square of each element in the feature maps feature_sum = feature**2 # sum all 48 feature maps of the convolution layer feature_sum = feature_sum.sum(0) feature_sum = (feature_sum.view_as(weight.data)) # expand the feature sum into a tensor with same dimensions as BN weights for i in range(j - 1): reg = i * param / model.nblayer st = init_features + i * 12 ed = st + 12 bn_grad = weight.grad.data[st:ed] bn_data = weight.data[st:ed] feature_sum_data = feature_sum.data[st:ed] bn_grad += reg * feature_sum_data * bn_data for k in range(48): gamma = weight.data[st:ed] gamma = gamma ** 2 conv_grad = feature.grad.data[k][st:ed] conv_data = feature.data[k][st:ed] conv_grad += reg * conv_data * gamma init_features = len(model.block3.layer[0].bn1.weight.data) for j in range(1, model.nblayer): weight = model.block3.layer[j].bn1.weight feature = model.block3.layer[j].conv1.weight # take the square of each element in the feature maps feature_sum = feature**2 # sum all 48 feature maps of the convolution layer feature_sum = feature_sum.sum(0) feature_sum = (feature_sum.view_as(weight.data)) # expand the feature sum into a tensor with same dimensions as BN weights for i in range(j - 1): reg = i * param / model.nblayer st = init_features + i * 12 ed = st + 12 bn_grad = weight.grad.data[st:ed] bn_data = weight.data[st:ed] feature_sum_data = feature_sum.data[st:ed] bn_grad += reg * feature_sum_data * bn_data for k in range(48): gamma = weight.data[st:ed] gamma = gamma ** 2 conv_grad = feature.grad.data[k][st:ed] conv_data = feature.data[k][st:ed] conv_grad += reg * conv_data * gamma def add_separate_reg(model, param): pass if __name__ == '__main__': main()
{ "content_hash": "9ad379a6afb9f574575f066f361baeb5", "timestamp": "", "source": "github", "line_count": 386, "max_line_length": 130, "avg_line_length": 38.204663212435236, "alnum_prop": 0.5848647182477792, "repo_name": "volzkzg/densenet-pytorch", "id": "edbe3d1be985c0303fe4eaa1a678e01e78f1db27", "size": "14747", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "train_framework.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "81729" } ], "symlink_target": "" }
import attr import datetime import logging import socket import sakia.i18n_rc import async_timeout import aiohttp from PyQt5.QtCore import QObject, pyqtSignal, QTranslator, QCoreApplication, QLocale, Qt from . import __version__ from .options import SakiaOptions from sakia.data.connectors import BmaConnector from sakia.services import NetworkService, BlockchainService, IdentitiesService, \ SourcesServices, TransactionsService, DocumentsService from sakia.data.repositories import SakiaDatabase from sakia.data.entities import Transaction, Connection, Identity, Dividend from sakia.data.processors import BlockchainProcessor, NodesProcessor, IdentitiesProcessor, \ CertificationsProcessor, SourcesProcessor, TransactionsProcessor, ConnectionsProcessor, DividendsProcessor from sakia.data.files import AppDataFile, UserParametersFile, PluginsDirectory from sakia.decorators import asyncify from sakia.money import * import asyncio @attr.s() class Application(QObject): """ Managing core application datas : Accounts list and general configuration Saving and loading the application state :param QCoreApplication qapp: Qt Application :param quamash.QEventLoop loop: quamash.QEventLoop instance :param sakia.options.SakiaOptions options: the options :param sakia.data.entities.AppData app_data: the application data :param sakia.data.entities.UserParameters parameters: the application current user parameters :param sakia.data.repositories.SakiaDatabase db: The database :param sakia.services.NetworkService network_service: All network services for current currency :param sakia.services.BlockchainService blockchain_service: All blockchain services for current currency :param sakia.services.IdentitiesService identities_service: All identities services for current currency :param sakia.services.SourcesService sources_service: All sources services for current currency :param sakia.Services.TransactionsService transactions_service: All transactions services for current currency :param sakia.services.DocumentsService documents_service: A service to broadcast documents """ new_dividend = pyqtSignal(Connection, Dividend) new_transfer = pyqtSignal(Connection, Transaction) transaction_state_changed = pyqtSignal(Transaction) identity_changed = pyqtSignal(Identity) new_connection = pyqtSignal(Connection) connection_removed = pyqtSignal(Connection) referential_changed = pyqtSignal() sources_refreshed = pyqtSignal() new_blocks_handled = pyqtSignal() view_in_wot = pyqtSignal(Identity) refresh_started = pyqtSignal() refresh_finished = pyqtSignal() qapp = attr.ib() loop = attr.ib() options = attr.ib() app_data = attr.ib() parameters = attr.ib() db = attr.ib() currency = attr.ib() plugins_dir = attr.ib() network_service = attr.ib(default=None) blockchain_service = attr.ib(default=None) identities_service = attr.ib(default=None) sources_service = attr.ib(default=None) transactions_service = attr.ib(default=None) documents_service = attr.ib(default=None) current_ref = attr.ib(default=Quantitative) _logger = attr.ib(default=attr.Factory(lambda:logging.getLogger('sakia'))) available_version = attr.ib(init=False) _translator = attr.ib(init=False) def __attrs_post_init__(self): super().__init__() self._translator = QTranslator(self.qapp) self.available_version = True, __version__, "" @classmethod def startup(cls, argv, qapp, loop): qapp.setAttribute(Qt.AA_EnableHighDpiScaling, True) options = SakiaOptions.from_arguments(argv) app_data = AppDataFile.in_config_path(options.config_path).load_or_init() app = cls(qapp, loop, options, app_data, None, None, options.currency, None) #app.set_proxy() app.load_profile(options.profile) app.documents_service = DocumentsService.instanciate(app) app.switch_language() return app def load_profile(self, profile_name): """ Initialize databases depending on profile loaded :param profile_name: :return: """ self.plugins_dir = PluginsDirectory.in_config_path(self.options.config_path, profile_name).load_or_init(self.options.with_plugin) self.parameters = UserParametersFile.in_config_path(self.options.config_path, profile_name).load_or_init(profile_name) self.db = SakiaDatabase.load_or_init(self.options, profile_name) self.instanciate_services() def instanciate_services(self): nodes_processor = NodesProcessor(self.db.nodes_repo) bma_connector = BmaConnector(nodes_processor, self.parameters) connections_processor = ConnectionsProcessor(self.db.connections_repo) identities_processor = IdentitiesProcessor(self.db.identities_repo, self.db.certifications_repo, self.db.blockchains_repo, bma_connector) certs_processor = CertificationsProcessor(self.db.certifications_repo, self.db.identities_repo, bma_connector) blockchain_processor = BlockchainProcessor.instanciate(self) sources_processor = SourcesProcessor.instanciate(self) transactions_processor = TransactionsProcessor.instanciate(self) dividends_processor = DividendsProcessor.instanciate(self) nodes_processor.initialize_root_nodes(self.currency) self.db.commit() self.documents_service = DocumentsService.instanciate(self) self.identities_service = IdentitiesService(self.currency, connections_processor, identities_processor, certs_processor, blockchain_processor, bma_connector) self.transactions_service = TransactionsService(self.currency, transactions_processor, dividends_processor, identities_processor, connections_processor, bma_connector) self.sources_service = SourcesServices(self.currency, sources_processor, connections_processor, transactions_processor, blockchain_processor, bma_connector) self.blockchain_service = BlockchainService(self, self.currency, blockchain_processor, connections_processor, bma_connector, self.identities_service, self.transactions_service, self.sources_service) self.network_service = NetworkService.load(self, self.currency, nodes_processor, self.blockchain_service, self.identities_service) async def remove_connection(self, connection): connections_processor = ConnectionsProcessor.instanciate(self) connections_processor.remove_connections(connection) CertificationsProcessor.instanciate(self).cleanup_connection(connection, connections_processor.pubkeys()) IdentitiesProcessor.instanciate(self).cleanup_connection(connection) SourcesProcessor.instanciate(self).drop_all_of(currency=connection.currency, pubkey=connection.pubkey) DividendsProcessor.instanciate(self).cleanup_connection(connection) TransactionsProcessor.instanciate(self).cleanup_connection(connection, connections_processor.pubkeys()) self.db.commit() self.connection_removed.emit(connection) async def initialize_blockchain(self): await asyncio.sleep(2) # Give time for the network to connect to nodes await BlockchainProcessor.instanciate(self).initialize_blockchain(self.currency) def switch_language(self): logging.debug("Loading translations") locale = self.parameters.lang QLocale.setDefault(QLocale(locale)) QCoreApplication.removeTranslator(self._translator) self._translator = QTranslator(self.qapp) if locale == "en": QCoreApplication.installTranslator(self._translator) elif self._translator.load(":/i18n/{0}".format(locale)): if QCoreApplication.installTranslator(self._translator): self._logger.debug("Loaded i18n/{0}".format(locale)) else: self._logger.debug("Couldn't load translation") else: self._logger.debug("Couldn't load i18n/{0}".format(locale)) def start_coroutines(self): self.network_service.start_coroutines() async def stop_current_profile(self, closing=False): """ Save the account to the cache and stop the coroutines """ await self.network_service.stop_coroutines(closing) self.db.commit() @asyncify async def get_last_version(self): try: async with aiohttp.ClientSession() as session: async with async_timeout.timeout(10): response = await session.get("https://api.github.com/repos/duniter/sakia/releases", proxy=self.parameters.proxy()) if response.status == 200: releases = await response.json() latest = None for r in releases: if not latest: latest = r else: latest_date = datetime.datetime.strptime(latest['published_at'], "%Y-%m-%dT%H:%M:%SZ") date = datetime.datetime.strptime(r['published_at'], "%Y-%m-%dT%H:%M:%SZ") if latest_date < date: latest = r latest_version = latest["tag_name"] version = (__version__ == latest_version, latest_version, latest["html_url"]) logging.debug("Found version : {0}".format(latest_version)) logging.debug("Current version : {0}".format(__version__)) self.available_version = version except (aiohttp.ClientError, aiohttp.ServerDisconnectedError, asyncio.TimeoutError, socket.gaierror) as e: self._logger.debug("Could not connect to github : {0}".format(str(e))) def save_parameters(self, parameters): self.parameters = UserParametersFile\ .in_config_path(self.options.config_path, parameters.profile_name)\ .save(parameters) def change_referential(self, index): self.current_ref = Referentials[index] self.referential_changed.emit() def connection_exists(self): return len(ConnectionsProcessor.instanciate(self).connections()) > 0
{ "content_hash": "ca5bdbbea3776e99f2a11b2fa0321391", "timestamp": "", "source": "github", "line_count": 231, "max_line_length": 145, "avg_line_length": 48.74458874458875, "alnum_prop": 0.6417406749555951, "repo_name": "ucoin-io/cutecoin", "id": "ecfe2e41f2d99d3d6e7fa45a43013f8e23ba4e06", "size": "11260", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "src/sakia/app.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "2475" }, { "name": "JavaScript", "bytes": "1594" }, { "name": "PowerShell", "bytes": "3111" }, { "name": "Python", "bytes": "718811" }, { "name": "Shell", "bytes": "3983" } ], "symlink_target": "" }
""" GENERATE DOC: [MAJORS] csv/tsv for R processing """ import os, sys import pymongo from pymongo import MongoClient import json import ipdb BASE_DIR = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) counts_dir = os.path.join(BASE_DIR,'dataR/counts') ne_counts = os.path.join(counts_dir,'NE_ene_ing') full_counts = os.path.join(counts_dir,'full_ene_ing') ######################################################################### client = MongoClient() db = client.JobDB db_prueba = db.prueba ######################################################################### temp = json.loads(open('hierarchy/ident_names.json','r').read()) hierarchy = json.loads(open('hierarchy/carreras.json','r').read()) fileByName = {} for k,v in temp.items(): fileByName[v]=k ######################################################################### if __name__ == '__main__': major_eng = [] for level1 in hierarchy["children"]: if level1["name"]=='Ingeniería': for career in level1["children"]: name = career["name"] major_eng.append(fileByName[name]) ipdb.set_trace() #curr_count = ne_counts curr_count = full_counts title_map = [line.strip('\n') for line in open(os.path.join(curr_count,'title_map.dat')) if line.strip('\n')!=''] majors_by_doc = open(os.path.join(curr_count,'majors_by_doc.dat'),'w') for doc_name in title_map: p = db.prueba.find_one({'name':doc_name}) majors = [a for a in p['carreras'] if a in major_eng] majors_by_doc.write(','.join(majors)+'\n')
{ "content_hash": "9ccd4bd8dcf4115175d265ef904bde69", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 115, "avg_line_length": 31.6875, "alnum_prop": 0.5746219592373438, "repo_name": "ronaldahmed/labor-market-demand-analysis", "id": "950d3998d47ac9f080e5b9bb77a9e5ab06cbd4e0", "size": "1522", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "preprocessing/doc_majors_map.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "125767" }, { "name": "CSS", "bytes": "32608" }, { "name": "HTML", "bytes": "235912337" }, { "name": "JavaScript", "bytes": "23952" }, { "name": "Makefile", "bytes": "1369" }, { "name": "Python", "bytes": "1028412" }, { "name": "R", "bytes": "38334" } ], "symlink_target": "" }
''' Created on 2014-7-10 Author: Gavin_Han Email: muyaohan@gmail.com ''' import time import threading class UserCrawler(threading.Thread): def __init__(self, callbacks=None): super(UserCrawler, self).__init__() self.callbacks = callbacks def crawl(self): #print self.threadId time.sleep(5) print 'hello' self.callbacks() def run(self): self.crawl()
{ "content_hash": "66a61ba032fa18010266f05ed0d061ed", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 43, "avg_line_length": 20.181818181818183, "alnum_prop": 0.581081081081081, "repo_name": "GavinHan/sina_weibo_crawler", "id": "34e0528b463f092fb311331283d58ca2d063fac4", "size": "480", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/output/callback.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Java", "bytes": "9350" }, { "name": "Python", "bytes": "34386" } ], "symlink_target": "" }
import hashlib import pytest from pex.hashing import ( HashlibHasher, MultiDigest, Sha1Fingerprint, Sha256Fingerprint, new_fingerprint, ) def test_fingerprint_equality(): # type: () -> None assert Sha1Fingerprint("foo") == Sha1Fingerprint("foo") assert Sha1Fingerprint("foo") != Sha1Fingerprint("bar") assert "foo" == Sha1Fingerprint("foo"), ( "Expected a digest str object to see itself as equal to a Sha256Fingerprint " "object with the same digest value" ) assert Sha1Fingerprint("foo") != Sha256Fingerprint( "foo" ), "Expected fingerprint objects to require types (algorithms) match exactly" def test_fingerprint_new_hasher(): # type: () -> None assert hashlib.sha1().hexdigest() == Sha1Fingerprint.new_hasher().hexdigest() assert hashlib.sha256().hexdigest() == Sha256Fingerprint.new_hasher().hexdigest() def test_new_fingerprint(): # type: () -> None assert Sha1Fingerprint("foo") == new_fingerprint(algorithm="sha1", hexdigest="foo") assert Sha256Fingerprint("foo") == new_fingerprint(algorithm="sha256", hexdigest="foo") with pytest.raises( ValueError, match=( r"There is no fingerprint type registered for hash algorithm md5. The supported " r"algorithms are: " ), ): new_fingerprint(algorithm="md5", hexdigest="foo") def test_hasher(): # type: () -> None hasher = Sha1Fingerprint.new_hasher() assert isinstance(hasher, HashlibHasher) sha1 = hashlib.sha1() assert sha1.name == hasher.name assert sha1.block_size == hasher.block_size multi_digest = MultiDigest((sha1, hasher)) multi_digest.update(b"foo") assert sha1.digest() == hasher.digest() fingerprint = hasher.hexdigest() assert isinstance(fingerprint, Sha1Fingerprint) assert fingerprint == Sha1Fingerprint(sha1.hexdigest())
{ "content_hash": "b616072ac0cd41bd5460cf9bdad0abb8", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 93, "avg_line_length": 28.191176470588236, "alnum_prop": 0.6645800730307773, "repo_name": "jsirois/pex", "id": "26604e26a7c5879be91ea4c339f3313667abf786", "size": "2049", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "tests/test_hashing.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1379" }, { "name": "Python", "bytes": "2182256" }, { "name": "Shell", "bytes": "1472" } ], "symlink_target": "" }
"""Set up the MyAppConfig function to be called""" default_app_config = 'profiles.apps.MyAppConfig'
{ "content_hash": "e4947d845a2aee16ff2b5498de3c7ad6", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 50, "avg_line_length": 50, "alnum_prop": 0.76, "repo_name": "nbeck90/game-chat", "id": "66910bb9caf5ee876efea202ce7ed42a8196eb09", "size": "100", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "gamechat/profiles/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "28229" }, { "name": "HTML", "bytes": "17027" }, { "name": "JavaScript", "bytes": "4994" }, { "name": "Python", "bytes": "108273" } ], "symlink_target": "" }
global squidclient_options squidclient_options = os.getenv('DSTAT_SQUID_OPTS') # -p 8080 class dstat_plugin(dstat): ''' Provides various Squid statistics. ''' def __init__(self): self.name = 'squid status' self.type = 's' self.width = 5 self.scale = 1000 self.vars = ('Number of file desc currently in use', 'CPU Usage, 5 minute avg', 'Total accounted', 'Number of clients accessing cache', 'Mean Object Size') self.nick = ('fdesc', 'cpu5', 'mem', 'clnts', 'objsz') def check(self): if not os.access('/usr/sbin/squidclient', os.X_OK): raise Exception, 'Needs squidclient binary' cmd_test('/usr/sbin/squidclient %s mgr:info' % squidclient_options) return True def extract(self): try: for l in cmd_splitlines('/usr/sbin/squidclient %s mgr:info' % squidclient_options, ':'): if l[0].strip() in self.vars: self.val[l[0].strip()] = l[1].strip() break except IOError, e: if op.debug > 1: print '%s: lost pipe to squidclient, %s' % (self.filename, e) for name in self.vars: self.val[name] = -1 except Exception, e: if op.debug > 1: print '%s: exception' (self.filename, e) for name in self.vars: self.val[name] = -1 # vim:ts=4:sw=4:et
{ "content_hash": "f467a15751d9271d74461159d340329d", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 100, "avg_line_length": 33.63636363636363, "alnum_prop": 0.529054054054054, "repo_name": "dongyoungy/dbseer_middleware", "id": "738517e97bd704c8c9f56243b91233a6b2f46ff4", "size": "1660", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "rs-sysmon2/plugins/dstat_squid.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "325736" }, { "name": "Java", "bytes": "86016" }, { "name": "Makefile", "bytes": "2355" }, { "name": "Python", "bytes": "224950" }, { "name": "Roff", "bytes": "22808" }, { "name": "Shell", "bytes": "1891" } ], "symlink_target": "" }
import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "issueTracking.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
{ "content_hash": "50eed12001a912c972ea031b68ee022f", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 77, "avg_line_length": 26, "alnum_prop": 0.717948717948718, "repo_name": "asifmohd/issueTracking", "id": "b0b8b0b9f2156540b619ee0b426809ebe0afcb6d", "size": "256", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "manage.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "3120" }, { "name": "HTML", "bytes": "17191" }, { "name": "Procfile", "bytes": "33" }, { "name": "Python", "bytes": "18211" } ], "symlink_target": "" }
''' The FBX contect class initializes FBX, and loads and prepares a scene for parsing. ''' from ...core import * from fbx import * from ..fbxutil.geometry import removeBadPolygons #------------------------------------------------------------------------------- class Context : ''' Initialize the FBX manager, load a scene and prepare the scene for parsing ''' def __init__(self) : self.fbxManager = None self.fbxScene = None self.fbxGeometryConverter = None def Setup(self, fbxFilePath) : ''' Setup the FBX SDK, and load and preprocess a scene object ''' self.fbxManager = FbxManager.Create() # load the scene fbxImporter = FbxImporter.Create(self.fbxManager, 'fbxImporter') status = fbxImporter.Initialize(fbxFilePath) if not status : raise Exception('FbxImporter: failed to load scene!') self.fbxScene = FbxScene.Create(self.fbxManager, 'fbxScene') fbxImporter.Import(self.fbxScene) fbxImporter.Destroy() # preprocess the scene self.fbxGeometryConverter = FbxGeometryConverter(self.fbxManager) if not self.fbxGeometryConverter.Triangulate(self.fbxScene, True) : raise Exception('Failed to triangulate FBX scene!') if not self.fbxGeometryConverter.SplitMeshesPerMaterial(self.fbxScene, True) : raise Exception('Failed to split meshes by material!') removeBadPolygons(self.fbxScene) return self.fbxScene def Discard(self) : ''' Discard the FBX SDK ''' if self.fbxGeometryConverter != None : self.fbxGeometryConverter = None if self.fbxScene != None : self.fbxScene.Destroy() self.fbxScene = None if self.fbxManager != None : self.fbxManager.Destroy() self.fbxManager = None #--- eof
{ "content_hash": "d69d7aabc9ea6f730de3924c6f106fab", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 86, "avg_line_length": 32.644067796610166, "alnum_prop": 0.6048805815160956, "repo_name": "floooh/drahtgitter", "id": "8e8bd83f6702c86bf3de172c4a1f1df0260b1feb", "size": "1926", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "drahtgitter/readers/fbxutil/context.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "1713" }, { "name": "Python", "bytes": "97022" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('shop', '0004_productimage_file_field'), ] operations = [ migrations.AlterField( model_name='product', name='publish_date', field=models.DateTimeField(help_text="With Published chosen, won't be shown until this time", null=True, verbose_name='Published from', db_index=True, blank=True), ), ]
{ "content_hash": "5728390cffad967e02b1c06827a9f078", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 175, "avg_line_length": 28.444444444444443, "alnum_prop": 0.638671875, "repo_name": "ryneeverett/cartridge", "id": "ebdf1096c96a06ea139472b526b2b7a487973627", "size": "536", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "cartridge/shop/migrations/0005_auto_20150527_1127.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "5988" }, { "name": "HTML", "bytes": "32789" }, { "name": "JavaScript", "bytes": "3467" }, { "name": "Python", "bytes": "228644" } ], "symlink_target": "" }
""" Transformation Based Learning A general purpose package for Transformation Based Learning, currently used by nltk.tag.BrillTagger. """ from nltk.tbl.template import Template #API: Template(...), Template.expand(...) from nltk.tbl.feature import Feature #API: Feature(...), Feature.expand(...) from nltk.tbl.rule import Rule #API: Rule.format(...), Rule.templatetid from nltk.tbl.erroranalysis import error_list
{ "content_hash": "2b6b96e5d078835745e6bc1cd8890738", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 60, "avg_line_length": 23.210526315789473, "alnum_prop": 0.7165532879818595, "repo_name": "MyRookie/SentimentAnalyse", "id": "4d6a30b748b211e7c9c5dd5a2b65218250585139", "size": "784", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "venv/lib/python2.7/site-packages/nltk/tbl/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "316238" }, { "name": "C++", "bytes": "5171" }, { "name": "CSS", "bytes": "6267" }, { "name": "FORTRAN", "bytes": "3200" }, { "name": "HTML", "bytes": "449" }, { "name": "JavaScript", "bytes": "6187" }, { "name": "Prolog", "bytes": "60188" }, { "name": "Python", "bytes": "13690978" }, { "name": "Shell", "bytes": "8340" }, { "name": "TeX", "bytes": "212" } ], "symlink_target": "" }
class SpotifyException(Exception): def __init__(self, http_status, code, msg, reason=None, headers=None): self.http_status = http_status self.code = code self.msg = msg self.reason = reason # `headers` is used to support `Retry-After` in the event of a # 429 status code. if headers is None: headers = {} self.headers = headers def __str__(self): return 'http status: {0}, code:{1} - {2}, reason: {3}'.format( self.http_status, self.code, self.msg, self.reason)
{ "content_hash": "74eef97cafad818891f6e9c587196c5f", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 74, "avg_line_length": 35.5, "alnum_prop": 0.5704225352112676, "repo_name": "plamere/spotipy", "id": "df503f10bf330346cbd9d8e0aa34a37e17412f01", "size": "568", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "spotipy/exceptions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "192394" } ], "symlink_target": "" }
__author__ = "Kosuke Akizuki <kosuke19952000@gmail.com>" __status__ = "OK" __version__ = "1.0" __date__ = "11 Mar 2015" from .processing import execute def merge(dir_src, dir_dst, pattern, symlinks=False, ignore=list()): if _isNotNone(dir_src, dir_dst, pattern) and \ _isNotEmpty(dir_src, dir_dst, pattern): status_code = execute(dir_src, dir_dst, pattern, symlinks, ignore) # マージが実行されなかった時に、falseが返ってくるので、 # その時は警告を発する if status_code == False: raise SyntaxWarning('''Not able to merge it with this pass or date pattern. Please confirm it!!!''') else: raise ValueError('''The value of the argument is "None" or empty. The value except "None" or empty, please.''') def _isNotNone(*args): for arg in args: if arg is None: return False return True def _isNotEmpty(*args): for arg in args: if not str(arg): return False return True if __name__ == '__main__': merge('../test/src/', '../test/out/', "a")
{ "content_hash": "0314d79b35f03d6953fb98d1e18617b0", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 87, "avg_line_length": 31.176470588235293, "alnum_prop": 0.5877358490566038, "repo_name": "k4zzk/mercre", "id": "7a04065edb8c3d1d71b9503367c678cfedfd2e6b", "size": "1510", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "mercre/api.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "23813" }, { "name": "Shell", "bytes": "564" } ], "symlink_target": "" }
"""Initial database commit Revision ID: 35e7157127b Revises: None Create Date: 2014-06-01 21:15:45.151351 """ # revision identifiers, used by Alembic. revision = '35e7157127b' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### pass ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### pass ### end Alembic commands ###
{ "content_hash": "f553ac888c22e9a1f421c2f64d549c06", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 63, "avg_line_length": 19.076923076923077, "alnum_prop": 0.6834677419354839, "repo_name": "Kerbas-ad-astra/KerbalStuff", "id": "e819d71f73fc4df96eca4a72c948e9ff8aa5285e", "size": "496", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "alembic/versions/35e7157127b_initial_database_commit.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "117498" }, { "name": "CoffeeScript", "bytes": "30971" }, { "name": "HTML", "bytes": "137143" }, { "name": "JavaScript", "bytes": "210471" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "138384" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import tensorflow.contrib.layers as layers from ray.rllib.models import ModelCatalog def _build_q_network(inputs, num_actions, config): dueling = config["dueling"] hiddens = config["hiddens"] frontend = ModelCatalog.get_model(inputs, 1, config["model"]) frontend_out = frontend.last_layer with tf.variable_scope("action_value"): action_out = frontend_out for hidden in hiddens: action_out = layers.fully_connected( action_out, num_outputs=hidden, activation_fn=tf.nn.relu) action_scores = layers.fully_connected( action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = frontend_out for hidden in hiddens: state_out = layers.fully_connected( state_out, num_outputs=hidden, activation_fn=tf.nn.relu) state_score = layers.fully_connected( state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims( action_scores_mean, 1) return state_score + action_scores_centered else: return action_scores def _build_action_network( q_values, observations, num_actions, stochastic, eps): deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations)[0] random_actions = tf.random_uniform( tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform( tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where( chose_random, random_actions, deterministic_actions) return tf.cond( stochastic, lambda: stochastic_actions, lambda: deterministic_actions) def _huber_loss(x, delta=1.0): """Reference: https://en.wikipedia.org/wiki/Huber_loss""" return tf.where( tf.abs(x) < delta, tf.square(x) * 0.5, delta * (tf.abs(x) - 0.5 * delta)) def _minimize_and_clip(optimizer, objective, var_list, clip_val=10): """Minimized `objective` using `optimizer` w.r.t. variables in `var_list` while ensure the norm of the gradients for each variable is clipped to `clip_val` """ gradients = optimizer.compute_gradients(objective, var_list=var_list) for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (tf.clip_by_norm(grad, clip_val), var) return optimizer.apply_gradients(gradients) def _scope_vars(scope, trainable_only=False): """ Get variables inside a scope The scope can be specified as a string Parameters ---------- scope: str or VariableScope scope in which the variables reside. trainable_only: bool whether or not to return only the variables that were marked as trainable. Returns ------- vars: [tf.Variable] list of variables in `scope`. """ return tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.VARIABLES, scope=scope if isinstance(scope, str) else scope.name) class DQNGraph(object): def __init__(self, env, config): self.env = env num_actions = env.action_space.n optimizer = tf.train.AdamOptimizer(learning_rate=config["lr"]) # Action inputs self.stochastic = tf.placeholder(tf.bool, (), name="stochastic") self.eps = tf.placeholder(tf.float32, (), name="eps") self.cur_observations = tf.placeholder( tf.float32, shape=(None,) + env.observation_space.shape) # Action Q network with tf.variable_scope("q_func") as scope: q_values = _build_q_network( self.cur_observations, num_actions, config) q_func_vars = _scope_vars(scope.name) # Action outputs self.output_actions = _build_action_network( q_values, self.cur_observations, num_actions, self.stochastic, self.eps) # Replay inputs self.obs_t = tf.placeholder( tf.float32, shape=(None,) + env.observation_space.shape) self.act_t = tf.placeholder(tf.int32, [None], name="action") self.rew_t = tf.placeholder(tf.float32, [None], name="reward") self.obs_tp1 = tf.placeholder( tf.float32, shape=(None,) + env.observation_space.shape) self.done_mask = tf.placeholder(tf.float32, [None], name="done") self.importance_weights = tf.placeholder( tf.float32, [None], name="weight") # q network evaluation with tf.variable_scope("q_func", reuse=True): self.q_t = _build_q_network(self.obs_t, num_actions, config) # target q network evalution with tf.variable_scope("target_q_func") as scope: self.q_tp1 = _build_q_network(self.obs_tp1, num_actions, config) target_q_func_vars = _scope_vars(scope.name) # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum( self.q_t * tf.one_hot(self.act_t, num_actions), 1) # compute estimate of best possible value starting from state at t + 1 if config["double_q"]: with tf.variable_scope("q_func", reuse=True): q_tp1_using_online_net = _build_q_network( self.obs_tp1, num_actions, config) q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1) q_tp1_best = tf.reduce_sum( self.q_tp1 * tf.one_hot( q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = tf.reduce_max(self.q_tp1, 1) q_tp1_best_masked = (1.0 - self.done_mask) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = self.rew_t + config["gamma"] * q_tp1_best_masked # compute the error (potentially clipped) self.td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = _huber_loss(self.td_error) weighted_error = tf.reduce_mean(self.importance_weights * errors) # compute optimization op (potentially with gradient clipping) if config["grad_norm_clipping"] is not None: self.optimize_expr = _minimize_and_clip( optimizer, weighted_error, var_list=q_func_vars, clip_val=config["grad_norm_clipping"]) else: self.optimize_expr = optimizer.minimize( weighted_error, var_list=q_func_vars) # update_target_fn will be called periodically to copy Q network to # target Q network update_target_expr = [] for var, var_target in zip( sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) self.update_target_expr = tf.group(*update_target_expr) def update_target(self, sess): return sess.run(self.update_target_expr) def act(self, sess, obs, eps, stochastic=True): return sess.run( self.output_actions, feed_dict={ self.cur_observations: obs, self.stochastic: stochastic, self.eps: eps, }) def train( self, sess, obs_t, act_t, rew_t, obs_tp1, done_mask, importance_weights): td_err, _ = sess.run( [self.td_error, self.optimize_expr], feed_dict={ self.obs_t: obs_t, self.act_t: act_t, self.rew_t: rew_t, self.obs_tp1: obs_tp1, self.done_mask: done_mask, self.importance_weights: importance_weights }) return td_err
{ "content_hash": "b972027e6cfefbffd11380239ac4e7d9", "timestamp": "", "source": "github", "line_count": 213, "max_line_length": 79, "avg_line_length": 38.244131455399064, "alnum_prop": 0.6016449791308618, "repo_name": "alanamarzoev/ray", "id": "a4e28381f4c818d40429c05ed16f64ca6fba11ac", "size": "8146", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/ray/rllib/dqn/models.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "115351" }, { "name": "C++", "bytes": "678730" }, { "name": "CMake", "bytes": "17015" }, { "name": "CSS", "bytes": "70" }, { "name": "HTML", "bytes": "396" }, { "name": "Jupyter Notebook", "bytes": "2507" }, { "name": "Python", "bytes": "884337" }, { "name": "Ruby", "bytes": "953" }, { "name": "Shell", "bytes": "28965" } ], "symlink_target": "" }
import sqlalchemy as sa from sqlalchemy import orm from neutron.db import l3_db from neutron.db import model_base from neutron.db import models_v2 from networking_cisco.plugins.cisco.db.device_manager import hd_models class RouterType(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents Neutron router types. A router type is associated with a with hosting device template. The template is used when hosting device for the router type is created. Only 'id', 'name', 'description' are visible in non-admin context. """ __tablename__ = 'cisco_router_types' # name of router type, should preferably be unique name = sa.Column(sa.String(255), nullable=False) # description of this router type description = sa.Column(sa.String(255)) # template to use to create hosting devices for this router type template_id = sa.Column(sa.String(36), sa.ForeignKey('cisco_hosting_device_templates.id', ondelete='CASCADE')) template = orm.relationship(hd_models.HostingDeviceTemplate) # 'ha_enabled_by_default' is True if routers of this type should have HA on ha_enabled_by_default = sa.Column(sa.Boolean, nullable=False, server_default=sa.sql.false()) # 'shared' is True if routertype is available to all tenants shared = sa.Column(sa.Boolean, nullable=False, server_default=sa.sql.true()) #TODO(bobmel): add HA attribute: One of None, 'GPLB', 'VRRP', or 'HSRP' # The number of slots this router type consume in hosting device slot_need = sa.Column(sa.Integer, autoincrement=False) # module to be used as scheduler for router of this type scheduler = sa.Column(sa.String(255), nullable=False) # module to be used by router plugin as router type driver driver = sa.Column(sa.String(255), nullable=False) # module to be used by configuration agent as service helper driver cfg_agent_service_helper = sa.Column(sa.String(255), nullable=False) # module to be used by configuration agent for in-device configurations cfg_agent_driver = sa.Column(sa.String(255), nullable=False) class RouterHostingDeviceBinding(model_base.BASEV2): """Represents binding between Neutron routers and their hosting devices.""" __tablename__ = 'cisco_router_mappings' router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete='CASCADE'), primary_key=True) router = orm.relationship( l3_db.Router, backref=orm.backref('hosting_info', cascade='all', uselist=False)) # 'router_role' specifies the type of role the router serves in role = sa.Column(sa.String(255), default=None) # 'router_type_id' is id of router type for this router router_type_id = sa.Column( sa.String(36), sa.ForeignKey('cisco_router_types.id'), primary_key=True, nullable=False) router_type = orm.relationship(RouterType) # 'inflated_slot_need' is the slot need of the router plus the # number slots needed by other resources to be associated with the # router. It's only considered if > 0. inflated_slot_need = sa.Column(sa.Integer, autoincrement=False, server_default='0') # If 'auto_schedule' is True then router is automatically scheduled # if it lacks a hosting device or its hosting device fails. auto_schedule = sa.Column(sa.Boolean, default=True, nullable=False) share_hosting_device = sa.Column(sa.Boolean, nullable=False, server_default=sa.sql.true()) # id of hosting device hosting this router, None/NULL if unscheduled. hosting_device_id = sa.Column(sa.String(36), sa.ForeignKey('cisco_hosting_devices.id', ondelete='SET NULL')) hosting_device = orm.relationship(hd_models.HostingDevice)
{ "content_hash": "a560de6bcf555512d1aee07fae345c3d", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 79, "avg_line_length": 49.3780487804878, "alnum_prop": 0.6591751049641887, "repo_name": "CiscoSystems/networking-cisco", "id": "18eaa04aa5e7a6787eea50d5201e12b8698b94d7", "size": "4683", "binary": false, "copies": "1", "ref": "refs/heads/asr1k_liberty_master_wip", "path": "networking_cisco/plugins/cisco/db/l3/l3_models.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "1043" }, { "name": "Python", "bytes": "2082062" }, { "name": "Shell", "bytes": "44368" } ], "symlink_target": "" }
import h5py import numpy as np from boadata.core import DataObject from boadata.core.data_conversion import ChainConversion, DataConversion from boadata.data.mixins import GetItemMixin @DataObject.register_type(default=True) @ChainConversion.enable_to( "pandas_data_frame", through="numpy_array", condition=lambda x: x.ndim == 2 ) @ChainConversion.enable_to( "pandas_series", through="numpy_array", condition=lambda x: x.ndim == 1 ) @ChainConversion.enable_to( "csv", through="numpy_array", condition=lambda x: x.ndim <= 2 ) class Hdf5Dataset(DataObject): real_type = h5py.Dataset type_name = "hdf5_dataset" @property def shape(self): return self.inner_data.shape @property def ndim(self): return len(self.shape) def __to_numpy_array__(self): data = np.array(self.inner_data) numpy_type = DataObject.registered_types["numpy_array"] return numpy_type(data, source=self) @classmethod def __from_numpy_array__(cls, data_object, uri, **kwargs): file, dataset = uri.split("::") h5file = h5py.File(file) ds = h5file.create_dataset(dataset, data=data_object.inner_data) return Hdf5Dataset(ds, source=data_object, uri=uri) @classmethod def accepts_uri(cls, uri): if not (".h5::" in uri or ".hdf5::" in uri): return False return True # TODO: Fix the following try: candidate = odo.odo(uri, cls.real_type) if candidate.attrs.get(b"CLASS") != b"TABLE": return True except: pass return False @DataObject.register_type() @ChainConversion.enable_to("csv", through="pandas_data_frame") class Hdf5Table(DataObject, GetItemMixin): real_type = h5py.Dataset type_name = "hdf5_table" @classmethod def accepts_uri(cls, uri): return False # TODO: Fix not supported if not (".h5::" in uri or ".hdf5::" in uri): return False try: candidate = odo.odo(uri, cls.real_type) if candidate.attrs.get(b"CLASS") == b"TABLE": return True except: pass return False @property def columns(self): import re attrs = dict(self.inner_data.attrs) ncols = len([1 for key in attrs.keys() if re.match("FIELD_\\d+_NAME", key)]) return [attrs["FIELD_{0}_NAME".format(i)].decode() for i in range(ncols)] @property def ndim(self): return 2 @property def shape(self): return len(self.inner_data), len(self.columns) def __to_xy_dataseries__(self, x, y, **kwargs): return self.convert("pandas_data_frame", **kwargs).convert( "xy_dataseries", x=x, y=y ) def __to_pandas_data_frame__(self): import pandas as pd df = pd.DataFrame( dict({key: pd.Series(self.inner_data[key]) for key in self.columns}) ) df = df[self.columns] pd_type = DataObject.registered_types["pandas_data_frame"] return pd_type( df, source=self ) # , name=self.inner_data.attrs["TITLE"].decode())
{ "content_hash": "2dae317376795a9b4c2fbc9ca146aa7b", "timestamp": "", "source": "github", "line_count": 115, "max_line_length": 84, "avg_line_length": 27.765217391304347, "alnum_prop": 0.5991230817413091, "repo_name": "janpipek/boadata", "id": "a658fd0f315f4cb2ec8eba8791a26b904808a2eb", "size": "3193", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "boadata/data/hdf5_types.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "131765" } ], "symlink_target": "" }
"""Default variable filters.""" from __future__ import unicode_literals import re import random as random_module import unicodedata from decimal import Decimal, InvalidOperation, Context, ROUND_HALF_UP from functools import wraps from pprint import pformat from django.template.base import Variable, Library, VariableDoesNotExist from django.conf import settings from django.utils import formats from django.utils.dateformat import format, time_format from django.utils.encoding import force_unicode, iri_to_uri from django.utils.html import (conditional_escape, escapejs, fix_ampersands, escape, urlize as urlize_impl, linebreaks, strip_tags) from django.utils.http import urlquote from django.utils.text import Truncator, wrap, phone2numeric from django.utils.safestring import mark_safe, SafeData, mark_for_escaping from django.utils.timesince import timesince, timeuntil from django.utils.translation import ugettext, ungettext from django.utils.text import normalize_newlines register = Library() ####################### # STRING DECORATOR # ####################### def stringfilter(func): """ Decorator for filters which should only receive unicode objects. The object passed as the first positional argument will be converted to a unicode object. """ def _dec(*args, **kwargs): if args: args = list(args) args[0] = force_unicode(args[0]) if (isinstance(args[0], SafeData) and getattr(_dec._decorated_function, 'is_safe', False)): return mark_safe(func(*args, **kwargs)) return func(*args, **kwargs) # Include a reference to the real function (used to check original # arguments by the template parser, and to bear the 'is_safe' attribute # when multiple decorators are applied). _dec._decorated_function = getattr(func, '_decorated_function', func) for attr in ('is_safe', 'needs_autoescape'): if hasattr(func, attr): import warnings warnings.warn("Setting the %s attribute of a template filter " "function is deprecated; use @register.filter(%s=%s) " "instead" % (attr, attr, getattr(func, attr)), DeprecationWarning) setattr(_dec, attr, getattr(func, attr)) return wraps(func)(_dec) ################### # STRINGS # ################### @register.filter(is_safe=True) @stringfilter def addslashes(value): """ Adds slashes before quotes. Useful for escaping strings in CSV, for example. Less useful for escaping JavaScript; use the ``escapejs`` filter instead. """ return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'") @register.filter(is_safe=True) @stringfilter def capfirst(value): """Capitalizes the first character of the value.""" return value and value[0].upper() + value[1:] @register.filter("escapejs") @stringfilter def escapejs_filter(value): """Hex encodes characters for use in JavaScript strings.""" return escapejs(value) @register.filter("fix_ampersands", is_safe=True) @stringfilter def fix_ampersands_filter(value): """Replaces ampersands with ``&amp;`` entities.""" return fix_ampersands(value) # Values for testing floatformat input against infinity and NaN representations, # which differ across platforms and Python versions. Some (i.e. old Windows # ones) are not recognized by Decimal but we want to return them unchanged vs. # returning an empty string as we do for completley invalid input. Note these # need to be built up from values that are not inf/nan, since inf/nan values do # not reload properly from .pyc files on Windows prior to some level of Python 2.5 # (see Python Issue757815 and Issue1080440). pos_inf = 1e200 * 1e200 neg_inf = -1e200 * 1e200 nan = (1e200 * 1e200) // (1e200 * 1e200) special_floats = [str(pos_inf), str(neg_inf), str(nan)] @register.filter(is_safe=True) def floatformat(text, arg=-1): """ Displays a float to a specified number of decimal places. If called without an argument, it displays the floating point number with one decimal place -- but only if there's a decimal place to be displayed: * num1 = 34.23234 * num2 = 34.00000 * num3 = 34.26000 * {{ num1|floatformat }} displays "34.2" * {{ num2|floatformat }} displays "34" * {{ num3|floatformat }} displays "34.3" If arg is positive, it will always display exactly arg number of decimal places: * {{ num1|floatformat:3 }} displays "34.232" * {{ num2|floatformat:3 }} displays "34.000" * {{ num3|floatformat:3 }} displays "34.260" If arg is negative, it will display arg number of decimal places -- but only if there are places to be displayed: * {{ num1|floatformat:"-3" }} displays "34.232" * {{ num2|floatformat:"-3" }} displays "34" * {{ num3|floatformat:"-3" }} displays "34.260" If the input float is infinity or NaN, the (platform-dependent) string representation of that value will be displayed. """ try: input_val = force_unicode(text) d = Decimal(input_val) except UnicodeEncodeError: return '' except InvalidOperation: if input_val in special_floats: return input_val try: d = Decimal(force_unicode(float(text))) except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError): return '' try: p = int(arg) except ValueError: return input_val try: m = int(d) - d except (ValueError, OverflowError, InvalidOperation): return input_val if not m and p < 0: return mark_safe(formats.number_format('%d' % (int(d)), 0)) if p == 0: exp = Decimal(1) else: exp = Decimal('1.0') / (Decimal(10) ** abs(p)) try: # Set the precision high enough to avoid an exception, see #15789. tupl = d.as_tuple() units = len(tupl[1]) - tupl[2] prec = abs(p) + units + 1 # Avoid conversion to scientific notation by accessing `sign`, `digits` # and `exponent` from `Decimal.as_tuple()` directly. sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec)).as_tuple() digits = [unicode(digit) for digit in reversed(digits)] while len(digits) <= abs(exponent): digits.append('0') digits.insert(-exponent, '.') if sign: digits.append('-') number = ''.join(reversed(digits)) return mark_safe(formats.number_format(number, abs(p))) except InvalidOperation: return input_val @register.filter(is_safe=True) @stringfilter def iriencode(value): """Escapes an IRI value for use in a URL.""" return force_unicode(iri_to_uri(value)) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def linenumbers(value, autoescape=None): """Displays text with line numbers.""" lines = value.split('\n') # Find the maximum width of the line count, for use with zero padding # string format command width = unicode(len(unicode(len(lines)))) if not autoescape or isinstance(value, SafeData): for i, line in enumerate(lines): lines[i] = ("%0" + width + "d. %s") % (i + 1, line) else: for i, line in enumerate(lines): lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line)) return mark_safe('\n'.join(lines)) @register.filter(is_safe=True) @stringfilter def lower(value): """Converts a string into all lowercase.""" return value.lower() @register.filter(is_safe=False) @stringfilter def make_list(value): """ Returns the value turned into a list. For an integer, it's a list of digits. For a string, it's a list of characters. """ return list(value) @register.filter(is_safe=True) @stringfilter def slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) return mark_safe(re.sub('[-\s]+', '-', value)) @register.filter(is_safe=True) def stringformat(value, arg): """ Formats the variable according to the arg, a string formatting specifier. This specifier uses Python string formating syntax, with the exception that the leading "%" is dropped. See http://docs.python.org/lib/typesseq-strings.html for documentation of Python string formatting """ try: return ("%" + unicode(arg)) % value except (ValueError, TypeError): return "" @register.filter(is_safe=True) @stringfilter def title(value): """Converts a string into titlecase.""" t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title()) return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t) @register.filter(is_safe=True) @stringfilter def truncatechars(value, arg): """ Truncates a string after a certain number of characters. Argument: Number of characters to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. return Truncator(value).chars(length) @register.filter(is_safe=True) @stringfilter def truncatewords(value, arg): """ Truncates a string after a certain number of words. Argument: Number of words to truncate after. Newlines within the string are removed. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. return Truncator(value).words(length, truncate=' ...') @register.filter(is_safe=True) @stringfilter def truncatewords_html(value, arg): """ Truncates HTML after a certain number of words. Argument: Number of words to truncate after. Newlines in the HTML are preserved. """ try: length = int(arg) except ValueError: # invalid literal for int() return value # Fail silently. return Truncator(value).words(length, html=True, truncate=' ...') @register.filter(is_safe=False) @stringfilter def upper(value): """Converts a string into all uppercase.""" return value.upper() @register.filter(is_safe=False) @stringfilter def urlencode(value, safe=None): """ Escapes a value for use in a URL. Takes an optional ``safe`` parameter used to determine the characters which should not be escaped by Django's ``urlquote`` method. If not provided, the default safe characters will be used (but an empty string can be provided when *all* characters should be escaped). """ kwargs = {} if safe is not None: kwargs['safe'] = safe return urlquote(value, **kwargs) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def urlize(value, autoescape=None): """Converts URLs in plain text into clickable links.""" return mark_safe(urlize_impl(value, nofollow=True, autoescape=autoescape)) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def urlizetrunc(value, limit, autoescape=None): """ Converts URLs into clickable links, truncating URLs to the given character limit, and adding 'rel=nofollow' attribute to discourage spamming. Argument: Length to truncate URLs to. """ return mark_safe(urlize_impl(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape)) @register.filter(is_safe=False) @stringfilter def wordcount(value): """Returns the number of words.""" return len(value.split()) @register.filter(is_safe=True) @stringfilter def wordwrap(value, arg): """ Wraps words at specified line length. Argument: number of characters to wrap the text at. """ return wrap(value, int(arg)) @register.filter(is_safe=True) @stringfilter def ljust(value, arg): """ Left-aligns the value in a field of a given width. Argument: field size. """ return value.ljust(int(arg)) @register.filter(is_safe=True) @stringfilter def rjust(value, arg): """ Right-aligns the value in a field of a given width. Argument: field size. """ return value.rjust(int(arg)) @register.filter(is_safe=True) @stringfilter def center(value, arg): """Centers the value in a field of a given width.""" return value.center(int(arg)) @register.filter @stringfilter def cut(value, arg): """ Removes all values of arg from the given string. """ safe = isinstance(value, SafeData) value = value.replace(arg, '') if safe and arg != ';': return mark_safe(value) return value ################### # HTML STRINGS # ################### @register.filter("escape", is_safe=True) @stringfilter def escape_filter(value): """ Marks the value as a string that should not be auto-escaped. """ return mark_for_escaping(value) @register.filter(is_safe=True) @stringfilter def force_escape(value): """ Escapes a string's HTML. This returns a new string containing the escaped characters (as opposed to "escape", which marks the content for later possible escaping). """ return escape(value) @register.filter("linebreaks", is_safe=True, needs_autoescape=True) @stringfilter def linebreaks_filter(value, autoescape=None): """ Replaces line breaks in plain text with appropriate HTML; a single newline becomes an HTML line break (``<br />``) and a new line followed by a blank line becomes a paragraph break (``</p>``). """ autoescape = autoescape and not isinstance(value, SafeData) return mark_safe(linebreaks(value, autoescape)) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def linebreaksbr(value, autoescape=None): """ Converts all newlines in a piece of plain text to HTML line breaks (``<br />``). """ autoescape = autoescape and not isinstance(value, SafeData) value = normalize_newlines(value) if autoescape: value = escape(value) return mark_safe(value.replace('\n', '<br />')) @register.filter(is_safe=True) @stringfilter def safe(value): """ Marks the value as a string that should not be auto-escaped. """ return mark_safe(value) @register.filter(is_safe=True) def safeseq(value): """ A "safe" filter for sequences. Marks each element in the sequence, individually, as safe, after converting them to unicode. Returns a list with the results. """ return [mark_safe(force_unicode(obj)) for obj in value] @register.filter(is_safe=True) @stringfilter def removetags(value, tags): """Removes a space separated list of [X]HTML tags from the output.""" tags = [re.escape(tag) for tag in tags.split()] tags_re = '(%s)' % '|'.join(tags) starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U) endtag_re = re.compile('</%s>' % tags_re) value = starttag_re.sub('', value) value = endtag_re.sub('', value) return value @register.filter(is_safe=True) @stringfilter def striptags(value): """Strips all [X]HTML tags.""" return strip_tags(value) ################### # LISTS # ################### @register.filter(is_safe=False) def dictsort(value, arg): """ Takes a list of dicts, returns that list sorted by the property given in the argument. """ try: return sorted(value, key=Variable(arg).resolve) except (TypeError, VariableDoesNotExist): return '' @register.filter(is_safe=False) def dictsortreversed(value, arg): """ Takes a list of dicts, returns that list sorted in reverse order by the property given in the argument. """ try: return sorted(value, key=Variable(arg).resolve, reverse=True) except (TypeError, VariableDoesNotExist): return '' @register.filter(is_safe=False) def first(value): """Returns the first item in a list.""" try: return value[0] except IndexError: return '' @register.filter(is_safe=True, needs_autoescape=True) def join(value, arg, autoescape=None): """ Joins a list with a string, like Python's ``str.join(list)``. """ value = map(force_unicode, value) if autoescape: value = [conditional_escape(v) for v in value] try: data = conditional_escape(arg).join(value) except AttributeError: # fail silently but nicely return value return mark_safe(data) @register.filter(is_safe=True) def last(value): "Returns the last item in a list" try: return value[-1] except IndexError: return '' @register.filter(is_safe=True) def length(value): """Returns the length of the value - useful for lists.""" try: return len(value) except (ValueError, TypeError): return '' @register.filter(is_safe=False) def length_is(value, arg): """Returns a boolean of whether the value's length is the argument.""" try: return len(value) == int(arg) except (ValueError, TypeError): return '' @register.filter(is_safe=True) def random(value): """Returns a random item from the list.""" return random_module.choice(value) @register.filter("slice", is_safe=True) def slice_filter(value, arg): """ Returns a slice of the list. Uses the same syntax as Python's list slicing; see http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice for an introduction. """ try: bits = [] for x in arg.split(':'): if len(x) == 0: bits.append(None) else: bits.append(int(x)) return value[slice(*bits)] except (ValueError, TypeError): return value # Fail silently. @register.filter(is_safe=True, needs_autoescape=True) def unordered_list(value, autoescape=None): """ Recursively takes a self-nested list and returns an HTML unordered list -- WITHOUT opening and closing <ul> tags. The list is assumed to be in the proper format. For example, if ``var`` contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then ``{{ var|unordered_list }}`` would return:: <li>States <ul> <li>Kansas <ul> <li>Lawrence</li> <li>Topeka</li> </ul> </li> <li>Illinois</li> </ul> </li> """ if autoescape: escaper = conditional_escape else: escaper = lambda x: x def convert_old_style_list(list_): """ Converts old style lists to the new easier to understand format. The old list format looked like: ['Item 1', [['Item 1.1', []], ['Item 1.2', []]] And it is converted to: ['Item 1', ['Item 1.1', 'Item 1.2]] """ if not isinstance(list_, (tuple, list)) or len(list_) != 2: return list_, False first_item, second_item = list_ if second_item == []: return [first_item], True try: # see if second item is iterable iter(second_item) except TypeError: return list_, False old_style_list = True new_second_item = [] for sublist in second_item: item, old_style_list = convert_old_style_list(sublist) if not old_style_list: break new_second_item.extend(item) if old_style_list: second_item = new_second_item return [first_item, second_item], old_style_list def _helper(list_, tabs=1): indent = '\t' * tabs output = [] list_length = len(list_) i = 0 while i < list_length: title = list_[i] sublist = '' sublist_item = None if isinstance(title, (list, tuple)): sublist_item = title title = '' elif i < list_length - 1: next_item = list_[i+1] if next_item and isinstance(next_item, (list, tuple)): # The next item is a sub-list. sublist_item = next_item # We've processed the next item now too. i += 1 if sublist_item: sublist = _helper(sublist_item, tabs+1) sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist, indent, indent) output.append('%s<li>%s%s</li>' % (indent, escaper(force_unicode(title)), sublist)) i += 1 return '\n'.join(output) value, converted = convert_old_style_list(value) return mark_safe(_helper(value)) ################### # INTEGERS # ################### @register.filter(is_safe=False) def add(value, arg): """Adds the arg to the value.""" try: return int(value) + int(arg) except (ValueError, TypeError): try: return value + arg except Exception: return '' @register.filter(is_safe=False) def get_digit(value, arg): """ Given a whole number, returns the requested digit of it, where 1 is the right-most digit, 2 is the second-right-most digit, etc. Returns the original value for invalid input (if input or argument is not an integer, or if argument is less than 1). Otherwise, output is always an integer. """ try: arg = int(arg) value = int(value) except ValueError: return value # Fail silently for an invalid argument if arg < 1: return value try: return int(str(value)[-arg]) except IndexError: return 0 ################### # DATES # ################### @register.filter(expects_localtime=True, is_safe=False) def date(value, arg=None): """Formats a date according to the given format.""" if not value: return '' if arg is None: arg = settings.DATE_FORMAT try: return formats.date_format(value, arg) except AttributeError: try: return format(value, arg) except AttributeError: return '' @register.filter(expects_localtime=True, is_safe=False) def time(value, arg=None): """Formats a time according to the given format.""" if value in (None, ''): return '' if arg is None: arg = settings.TIME_FORMAT try: return formats.time_format(value, arg) except AttributeError: try: return time_format(value, arg) except AttributeError: return '' @register.filter("timesince", is_safe=False) def timesince_filter(value, arg=None): """Formats a date as the time since that date (i.e. "4 days, 6 hours").""" if not value: return '' try: if arg: return timesince(value, arg) return timesince(value) except (ValueError, TypeError): return '' @register.filter("timeuntil", is_safe=False) def timeuntil_filter(value, arg=None): """Formats a date as the time until that date (i.e. "4 days, 6 hours").""" if not value: return '' try: return timeuntil(value, arg) except (ValueError, TypeError): return '' ################### # LOGIC # ################### @register.filter(is_safe=False) def default(value, arg): """If value is unavailable, use given default.""" return value or arg @register.filter(is_safe=False) def default_if_none(value, arg): """If value is None, use given default.""" if value is None: return arg return value @register.filter(is_safe=False) def divisibleby(value, arg): """Returns True if the value is devisible by the argument.""" return int(value) % int(arg) == 0 @register.filter(is_safe=False) def yesno(value, arg=None): """ Given a string mapping values for true, false and (optionally) None, returns one of those strings according to the value: ========== ====================== ================================== Value Argument Outputs ========== ====================== ================================== ``True`` ``"yeah,no,maybe"`` ``yeah`` ``False`` ``"yeah,no,maybe"`` ``no`` ``None`` ``"yeah,no,maybe"`` ``maybe`` ``None`` ``"yeah,no"`` ``"no"`` (converts None to False if no mapping for None is given. ========== ====================== ================================== """ if arg is None: arg = ugettext('yes,no,maybe') bits = arg.split(',') if len(bits) < 2: return value # Invalid arg. try: yes, no, maybe = bits except ValueError: # Unpack list of wrong size (no "maybe" value provided). yes, no, maybe = bits[0], bits[1], bits[1] if value is None: return maybe if value: return yes return no ################### # MISC # ################### @register.filter(is_safe=True) def filesizeformat(bytes): """ Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc). """ try: bytes = float(bytes) except (TypeError,ValueError,UnicodeDecodeError): return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0} filesize_number_format = lambda value: formats.number_format(round(value, 1), 1) KB = 1<<10 MB = 1<<20 GB = 1<<30 TB = 1<<40 PB = 1<<50 if bytes < KB: return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes} if bytes < MB: return ugettext("%s KB") % filesize_number_format(bytes / KB) if bytes < GB: return ugettext("%s MB") % filesize_number_format(bytes / MB) if bytes < TB: return ugettext("%s GB") % filesize_number_format(bytes / GB) if bytes < PB: return ugettext("%s TB") % filesize_number_format(bytes / TB) return ugettext("%s PB") % filesize_number_format(bytes / PB) @register.filter(is_safe=False) def pluralize(value, arg='s'): """ Returns a plural suffix if the value is not 1. By default, 's' is used as the suffix: * If value is 0, vote{{ value|pluralize }} displays "0 votes". * If value is 1, vote{{ value|pluralize }} displays "1 vote". * If value is 2, vote{{ value|pluralize }} displays "2 votes". If an argument is provided, that string is used instead: * If value is 0, class{{ value|pluralize:"es" }} displays "0 classes". * If value is 1, class{{ value|pluralize:"es" }} displays "1 class". * If value is 2, class{{ value|pluralize:"es" }} displays "2 classes". If the provided argument contains a comma, the text before the comma is used for the singular case and the text after the comma is used for the plural case: * If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies". * If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy". * If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies". """ if not ',' in arg: arg = ',' + arg bits = arg.split(',') if len(bits) > 2: return '' singular_suffix, plural_suffix = bits[:2] try: if int(value) != 1: return plural_suffix except ValueError: # Invalid string that's not a number. pass except TypeError: # Value isn't a string or a number; maybe it's a list? try: if len(value) != 1: return plural_suffix except TypeError: # len() of unsized object. pass return singular_suffix @register.filter("phone2numeric", is_safe=True) def phone2numeric_filter(value): """Takes a phone number and converts it in to its numerical equivalent.""" return phone2numeric(value) @register.filter(is_safe=True) def pprint(value): """A wrapper around pprint.pprint -- for debugging, really.""" try: return pformat(value) except Exception as e: return "Error in formatting: %s" % force_unicode(e, errors="replace")
{ "content_hash": "8627e05601093bdc09f5f88fef372ad1", "timestamp": "", "source": "github", "line_count": 903, "max_line_length": 84, "avg_line_length": 31.427464008859356, "alnum_prop": 0.6102399661721696, "repo_name": "aleida/django", "id": "b9cdd942966208a50419d20f84897e15d4cc78f5", "size": "28379", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django/template/defaultfilters.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "50207" }, { "name": "JavaScript", "bytes": "89078" }, { "name": "Python", "bytes": "8135526" }, { "name": "Shell", "bytes": "11901" } ], "symlink_target": "" }
""" Automatic File """
{ "content_hash": "61e19a74416adff20a94a44660161ef7", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 22, "avg_line_length": 23, "alnum_prop": 0.5652173913043478, "repo_name": "imvu/bluesteel", "id": "b0eec1eba0f5274d00b89aee5ae682d296ff993d", "size": "23", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "app/presenter/views/json/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "16828" }, { "name": "HTML", "bytes": "119014" }, { "name": "JavaScript", "bytes": "36015" }, { "name": "Python", "bytes": "1220104" } ], "symlink_target": "" }
import sys import json import struct def check_answer(fn, name = None): def wrapped(*args, **kwargs): ans = fn(*args, **kwargs) assert ans.get("answer") == "ok", \ "Error during " + (name if name else fn.__name__) + \ " " + ans.get("msg", "") return ans return wrapped class AgentIO: def __init__(self): self.cmdid = 0 self.waitid = None self.myid = None self.isloop = False self.msgs = [] # all received messages self.msg_listeners = {} # named listeners self.msg_def_listener = None # default listener self.reqid = 0 self.wait_req = {} # awaited requests self.wait_ans = {} # awaited answers self.rpc_listeners = {} # function listeners self.rpc_def_listener = None # default function listeners def send_req(self, req): "Send request" self.waitid = self.cmdid self.cmdid += 1 req["id"] = self.waitid data = json.dumps(req, ensure_ascii = False) data = data.encode("utf-8") sys.stdout.buffer.write(struct.pack("!I", len(data))) sys.stdout.buffer.write(data) sys.stdout.buffer.flush() def recv_ans(self): "Receive answer" plen = sys.stdin.buffer.read(4) plen = struct.unpack("!I", plen)[0] data = sys.stdin.buffer.read(plen).decode("utf-8") ans = json.loads(data) #print("ANS", ans, file = sys.stderr) cid = ans.get("id") if cid is None: raise Exception("Protocol error: cmdid not found, expected %d" %\ (self.waitid)) if cid != self.waitid: raise Exception("Protocol error: cmdid mismatch %d != %d" %\ (self.waitid, cid)) return ans def getid(self, cfg = False): "Get agent ID and configuration" self.send_req({"cmd": "getid", "cfg": cfg}) return self.recv_ans() getid_check = check_answer(getid, "getting ID") def push_msg(self, name, msg = None, opts = None): "Push message to hib" self.send_req({"cmd": "push", "name": name, "msg": msg, "opts": opts}) return self.recv_ans() push_msg_check = check_answer(push_msg, "push message") def subscribe(self, mask): "Subscribe to message(s)" self.send_req({"cmd": "sub", "mask": mask}) return self.recv_ans() subscribe_check = check_answer(subscribe, "subscribe to messages") def wait_msg(self, block = True): "Wait for new message(s)" self.send_req({"cmd": "wait_msg", "block": block}) return self.recv_ans() wait_msg_check = check_answer(wait_msg, "getting/waiting messages") def start_agent(self, cfg): "Start remote agent" self.send_req({"cmd": "start_agent", "cfg": cfg}) return self.recv_ans() start_agent_check = check_answer(start_agent, "start agent") def exit(self): "Send normal exit message" self.send_req({"cmd": "exit"}) return self.recv_ans() exit_check = check_answer(exit, "exit") def reg_callback(self, name, cb): "Register message callbacks" if name: self.msg_listeners[name] = cb else: self.msg_def_listener = cb def reg_rpc(self, fn, cb): "Register RPC callbacks" if fn: self.rpc_listeners[fn] = cb else: self.rpc_def_listener = cb def init_loop(self, cfg = False): "Loop initialization" # negotiate protocol protocols = sys.stdin.buffer.readline().strip().split(b"|") if not b"4bj" in protocols: raise Exception("No acceptable protocol (%s)" % \ ", ".join([x.decode("utf-8") for x in protocols])) sys.stdout.buffer.write(b"4bj\n") sys.stdout.buffer.flush() # initial exchange self.reg_callback(None, self.default_message) ans = self.getid_check(cfg = cfg) self.myid = ans.get("agentid") self.cfg = ans.get("cfg") def process_msgs(self): "Process messages until exhaustion" if not self.msgs: ans = self.wait_msg_check(block = True) self.msgs += ans.get("msgs", []) while self.msgs: nsname, msg, opts = self.msgs.pop(0) self.process_msg(nsname, msg, opts) def process_msg(self, name, msg, opts): "Process one message" # get namespace ns, name = name.split("/", 1) if ns == self.myid and name == "ret": # rpc for answers reqid = msg.get("reqid") if reqid in self.wait_req: self.wait_ans[reqid] = msg return if ns in self.msg_listeners: return self.msg_listeners[ns](ns, name, msg, opts) if self.msg_def_listener: return self.msg_def_listener(ns, name, msg, opts) def start_loop(self): "Main processing loop" self.isloop = True self.init_loop() while self.isloop: self.process_msgs() def end_loop(self): self.isloop = False def call(self, rpc, fn, args = None): "RPC from client" assert rpc, "RPC address can't be empty" reqid = self.reqid self.reqid += 1 self.push_msg_check(name = rpc + "/call", msg = {"fn": fn, "args": args}, opts = {"reqid": reqid, "from": self.myid, "check": "call"}) self.wait_req[reqid] = (reqid, rpc, fn, args) while reqid not in self.wait_ans: self.process_msgs() self.wait_req.pop(reqid) return self.wait_ans.pop(reqid) call_check = check_answer(call, "call rpc") def default_message(self, ns, name, msg, opts): "Default message handler" if ns == "*" and name == "pulse": return if ns == self.myid and name == "call": return self.default_rpc(ns, name, msg, opts) print("Unprocessed", ns, name, msg, opts, file = sys.stderr) def default_rpc(self, ns, name, msg, opts): "Default RPC server handler" # process rpc if name != "call": return msg = msg or {} opts = opts or {} fn = msg.get("fn") args = msg.get("args") reqid = opts.get("reqid") sender = opts.get("from") try: if fn in self.rpc_listeners: ret = self.rpc_listeners[fn](fn, args, reqid, sender) msg = {"answer": "ok", "ret": ret} elif self.rpc_def_listener: ret = self.rpc_def_listener(fn, args, reqid, sender) msg = {"answer": "ok", "ret": ret} else: msg = {"answer": "error", "msg": "No function \"%s\"" % fn} except Exception as e: traceback.print_exc(file = sys.stderr) msg = {"answer": "error", "msg": repr(e)} opts = {"reqid": reqid, "from": sender} self.push_msg_check(name = sender + "/ret", msg = msg, opts = opts) def send_signal(self): "Send signal to hub" sys.stdout.buffer.write(b"\"-\"") sys.stdout.buffer.flush()
{ "content_hash": "115a05b92e81262ba3af74b2866df07e", "timestamp": "", "source": "github", "line_count": 237, "max_line_length": 78, "avg_line_length": 31.135021097046412, "alnum_prop": 0.5297465781271175, "repo_name": "RomanKharin/lrmq", "id": "370efff5025e2d0980a7c9f3486151363bdd753e", "size": "8581", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lrmq/client/sync.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "70073" } ], "symlink_target": "" }
from math import log import operator # Chapter 3.1.1 - Information gain (Program List 3-1) def calculate_shannon_entropy(dataset): entry_count = len(dataset) label_counts = {} for feat_vec in dataset: current_label = feat_vec[-1] if current_label not in label_counts.keys(): label_counts[current_label] = 0 label_counts[current_label] += 1 shannon_entropy = 0.0 for key in label_counts: probably = float(label_counts[key]) / entry_count shannon_entropy -= probably * log(probably, 2) return shannon_entropy def create_dataset(): dataset = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] labels = ['no surfacing', 'flippers'] return dataset, labels # Chapter 3.1.2 - Split Dataset (Program List 3-2) def split_dataset(dataset, axis, value): # create a new dataset to avoid modifying the original dataset ret_dataset = [] for feat_vec in dataset: if feat_vec[axis] == value: # get values in range 0 to (axis - 1) reduced_feat_vec = feat_vec[:axis] # a = [1, 2, 3] b = [4, 5, 6] # a.append(b) ==> a = [1, 2, 3, [4, 5, 6]] # a.extend(b) ==> a = [1, 2, 3, 4, 5, 6] # extend values in range (axis + 1) to end reduced_feat_vec.extend(feat_vec[axis + 1:]) ret_dataset.append(reduced_feat_vec) return ret_dataset # Chapter 3.1.2 - Split Dataset (Program List 3-3) def choose_best_feature_to_split(dataset): # one is result and others are features feature_count = len(dataset[0]) - 1 # original entropy base_entropy = calculate_shannon_entropy(dataset) best_info_gain = 0.0 best_feature = -1 for i in range(feature_count): # make a new list with the i th column data feature_list = [item[i] for item in dataset] # convert to set to remove duplicated item unique_values = set(feature_list) new_entropy = 0.0 # calculate entropy under this split for value in unique_values: # split dataset based on selected feature and certain value sub_dataset = split_dataset(dataset, i, value) probably = len(sub_dataset) / float(len(dataset)) new_entropy += probably * calculate_shannon_entropy(sub_dataset) info_gain = base_entropy - new_entropy if info_gain > best_info_gain: best_info_gain = info_gain best_feature = i return best_feature # Chapter 3.1.3 - Build tree recursively def majority_count(class_list): class_count = {} for vote in class_list: if vote not in class_count.keys(): class_count[vote] = 0 class_count[vote] += 1 sorted_class_count = sorted(class_count.iteritems(), key=operator.itemgetter(1), reverse=True) return sorted_class_count[0][0] # Chapter 3.1.3 - Build tree recursively (Program List 3-4) def create_tree(dataset, labels): # get the result of each data in dataset (with duplicated items) class_list = [data[-1] for data in dataset] # Recursive Exit 1: Finish split because all results are in same type # list.count(item) returns the count of certain item in the list # this means the result contains only one type, then return the result type (terminating block) if class_list.count(class_list[0]) == len(class_list): return class_list[0] # Recursive Exit 2: # used all features but still had more than one result type # then get the type who has the most votes if len(dataset[0]) == 1: return majority_count(class_list) # get the best feature with most information gain best_feature = choose_best_feature_to_split(dataset) best_feature_label = labels[best_feature] # make a new tree dictionary my_tree = {best_feature_label: {}} # remove the used feature from labels del(labels[best_feature]) # get all value of certain feature feature_values = [data[best_feature] for data in dataset] # remove duplicated items unique_values = set(feature_values) for value in unique_values: # copy all items in labels to sub_labels sub_labels = labels[:] # create sub tree with new dataset and new labels and then append it to certain value of best_feature # best_feature has been removed in new dataset and new labels my_tree[best_feature_label][value] = create_tree(split_dataset(dataset, best_feature, value), sub_labels) return my_tree
{ "content_hash": "84489e61c31d0429837007ee0ffa7df4", "timestamp": "", "source": "github", "line_count": 117, "max_line_length": 113, "avg_line_length": 39.41880341880342, "alnum_prop": 0.6266261925411969, "repo_name": "metesa/MachineLearningInAction-Code", "id": "67b7f17de8682a2bcaeb982cb77ccde117fda05d", "size": "4696", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Ch3/trees.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "22511" } ], "symlink_target": "" }
import ode #enzyme initial condition setup GlucoseGA3P_vmax = GlucoseGA3P_gamma = GlucoseGA3P_rate = GlucoseGA3P_km = GA3PDHAP_vmax = GA3PDHAP_gamma = GA3PDHAP_rate = GA3PDHAP_km = DHAPG3P_vmax = DHAPG3P_gamma = DHAPG3P_rate = DHAPG3P_km = G3PGlycerol_vmax = G3PGlycerol_gamma = G3PGlycerol_rate = G3PGlycerol_km = GA3PPyruvate_vmax = GA3PPyruvate_gamma = GA3PPyruvate_rate = GA3PPyruvate_km = PyruvateAlphaAcetolactate_vmax = PyruvateAlphaAcetolactate_gamma = PyruvateAlphaAcetolactate_rate = PyruvateAlphaAcetolactate_km = AlphaAcetolactateDiacetyl_vmax = AlphaAcetolactateDiacetyl_gamma = AlphaAcetolactateDiacetyl_rate = AlphaAcetolactateDiacetyl_km = DiacetylAcetoin_vmax = DiacetylAcetoin_gamma = DiacetylAcetoin_rate = DiacetylAcetoin_km = AcetoinBDO_vmax = AcetoinBDO_gamma = AcetoinBDO_rate = AcetoinBDO_km = PyruvateAcetylaldehyde_vmax = PyruvateAcetylaldehyde_gamma = PyruvateAcetylaldehyde_rate = PyruvateAcetylaldehyde_km = AcetylaldehydeAcetoin_vmax = AcetylaldehydeAcetoin_gamma = AcetylaldehydeAcetoin_rate = AcetylaldehydeAcetoin_km = AcetylaldehydeAcetate_vmax = AcetylaldehydeAcetate_gamma = AcetylaldehydeAcetate_rate = AcetylaldehydeAcetate_km = AcetateEthanol_vmax = AcetateEthanol_gamma = AcetateEthanol_rate = AcetateEthanol_km = #reactant initial condition setup Glucose_vmax = Glucose_gamma = GA3P_gamma = DHAP_gamma = G3P_gamma = Glycerol_gamma = Pyruvate_gamma = AlphaAcetolactate_gamma = Diacetyl_gamma = Acetoin_gamma = BDO_gamma = Acetylaldehyde_gamma = Acetate_gamma = Ethanol_gamma = #ode system setup, reactant and product concentrations input respectively y = range[26] y[0] = #[GlucoseGA3P] y[1] = #[GA3PDHAP] y[2] = #[DHAPG3P] y[3] = #[G3PGlycerol] y[4] = #[GA3PPyruvate] y[5] = #[PyruvateAlphaAcetolactate] y[6] = #[AlphaAcetolactateDiacetyl] y[7] = #[DiacetylAcetoin] y[8] = #[AcetoinBDO] y[9] = #[PyruvateAcetylaldehyde] y[10] = #[AcetylaldehydeAcetoin] y[11] = #[AcetylaldehydeAcetate] y[12] = #[AcetateEthanol] #------- y[13] = #[Glucose] y[14] = #[GA3P] y[15] = #[DHAP] y[16] = #[G3P] y[17] = #[Glycerol] y[18] = #[Pyruvate] y[19] = #[AlphaAcetolactate] y[20] = #[Diacetyl] y[21] = #[Acetoin] y[22] = #[BDO] y[23] = #[Acetylaldehyde] y[24] = #[Acetate] y[25] = #[Ethanol] #individual ordinary differential equations' definition def Glucose(t, y): production = Glucose_vmax degradation = Glucose_gamma * [Glucose] usage = ([Glucose] * [GlucoseGA3P] * GlucoseGA3P_rate) / ([Glucose] + GlucoseGA3P_km) def GA3P(t, y): production = ([Glucose] * [GlucoseGA3P] * GlucoseGA3P_rate) / ([Glucose] + GlucoseGA3P_km) degradation = GA3P_gamma * [GA3P] usage = (([GA3P] * [GA3PDHAP] * GA3PDHAP_rate) / ([GA3P] + GA3PDHAP_km)) + (([GA3P] * [GA3PPyruvate] * GA3PPyruvate_rate) / ([GA3P] * GA3PPyruvate_km)) def DHAP(t, y): production = ([GA3P] * [GA3PDHAP] * GA3PDHAP_rate) / ([GA3P] + GA3PDHAP_km) degradation = [DHAP] * DHAP_gamma usage = ([DHAP] * [DHAPG3P] * DHAPG3P_rate) / ([DHAP] + DHAPG3P_km) def G3P(t, y): production = ([DHAP] * [DHAPG3P] * DHAPG3P_rate) / ([DHAP] + DHAPG3P_km) degradation = [G3P] * G3P_gamma usage = ([G3P] * [G3PGlycerol] * G3PGlycerol_rate) / ([G3P] + G3PGlycerol_km) def Glycerol(t, y): production = ([G3P] * [G3PGlycerol] * G3PGlycerol_rate) / ([G3P] + G3PGlycerol_km) degradation = [Glycerol] * Glycerol_gamma usage = 0 #----- def Pyruvate(t, y): production = ([GA3P] * [GA3PPyruvate] * GA3PPyruvate_rate) / ([GA3P] * GA3PPyruvate_km) degradation = [Pyruvate] * Pyruvate_gamma usage = (([Pyruvate] * [PyruvateAlphaAcetolactate] * PyruvateAlphaAcetolactate_rate)/([Pyruvate] + PyruvateAlphaAcetolactate_km)) + (([Pyruvate] * [PyruvateAcetylaldehyde] * PyruvateAcetylaldehyde_rate)/([Pyruvate] + PyruvateAcetylaldehyde_km)) def AlphaAcetolactate(t, y): production = ([Pyruvate] * [PyruvateAlphaAcetolactate] * PyruvateAlphaAcetolactate_rate)/([Pyruvate] + PyruvateAlphaAcetolactate_km) degradation = [AlphaAcetolactate] * AlphaAcetolactate_gamma usage = ([AlphaAcetolactate] * [AlphaAcetolactateDiacetyl] * AlphaAcetolactateDiacetyl_rate)/([AlphaAcetolactate] + AlphaAcetolactateDiacetyl_km) def Diacetyl(t, y): production = ([AlphaAcetolactate] * [AlphaAcetolactateDiacetyl] * AlphaAcetolactateDiacetyl_rate)/([AlphaAcetolactate] + AlphaAcetolactateDiacetyl_km) degradation = [Diacetyl] * Diacetyl_gamma usage = ([Diacetyl] * [DiacetylAcetoin] * DiacetylAcetoin_rate)/([Diacetyl] + DiacetylAcetoin_km) def Acetoin(t, y): production = (([Diacetyl] * [DiacetylAcetoin] * DiacetylAcetoin_rate)/([Diacetyl] + DiacetylAcetoin_km)) + (([Acetylaldehyde] * [AcetylaldehydeAcetoin] * AcetylaldehydeAcetoin_rate)/([Acetylaldehyde] + AcetylaldehydeAcetoin_km)) degradation = [Acetoin] * Acetoin_gamma usage = ([Acetoin] * [AcetoinBDO] * AcetoinBDO_rate)/([Acetoin] + AcetoinBDO_km) def BDO(t, y): production = ([Acetoin] * [AcetoinBDO] * AcetoinBDO_rate)/([Acetoin] + AcetoinBDO_km) degradation = [BDO] * BDO_gamma usage = 0 def Acetylaldehyde(t, y): production = ([Pyruvate] * [PyruvateAcetylaldehyde] * PyruvateAcetylaldehyde_rate)/([Pyruvate] + PyruvateAcetylaldehyde_km) degradation = [Acetylaldehyde] * Acetylaldehyde_gamma usage = (([Acetylaldehyde] * [AcetylaldehydeAcetoin] * AcetylaldehydeAcetoin_rate)/([Acetylaldehyde] + AcetylaldehydeAcetoin_km)) + (([Acetylaldehyde] * [AcetylaldehydeAcetate] * AcetylaldehydeAcetate_rate)/([Acetylaldehyde] + AcetylaldehydeAcetate_km)) def Acetate(t, y): production = ([Acetylaldehyde] * [AcetylaldehydeAcetate] * AcetylaldehydeAcetate_rate)/([Acetylaldehyde] + AcetylaldehydeAcetate_km) degradation = [Acetate] * Acetate_gamma usage = ([Acetate] * [AcetateEthanol] * AcetateEthanol_rate)/([Acetate] + AcetateEthanol_km) def Ethanol(t, y): production = ([Acetate] * [AcetateEthanol] * AcetateEthanol_rate)/([Acetate] + AcetateEthanol_km) degradation = [Ethanol] * Ethanol_gamma usage = 0 #-----enzymes------ def GlucoseGA3P(t, y): production = GlucoseGA3P_vmax degradation = GlucoseGA3P_gamma * [GlucoseGA3P] usage = 0 def GA3PDHAP(t, y): production = GA3PDHAP_vmax degradation = GA3PDHAP_gamma * [GA3PDHAP] usage = 0 def DHAPG3P(t, y): production = DHAPG3P_vmax degradation = DHAPG3P_gamma * [DHAPG3P] usage = 0 def G3PGlycerol(t, y): production = G3PGlycerol_vmax degradation = G3PGlycerol_gamma * [G3PGlycerol] usage = 0 def GA3PPyruvate(t, y): production = GA3PPyruvate_vmax degradation = GA3PPyruvate_gamma * [GA3PPyruvate] usage = 0 def PyruvateAlphaAcetolactate(t, y): production = PyruvateAlphaAcetolactate_vmax degradation = PyruvateAlphaAcetolactate_gamma * [PyruvateAlphaAcetolactate] usage = 0 def AlphaAcetolactateDiacetyl(t, y): production = AlphaAcetolactateDiacetyl_vmax degradation = AlphaAcetolactateDiacetyl_gamma * [AlphaAcetolactateDiacetyl] usage = 0 def DiacetylAcetoin(t, y): production = DiacetylAcetoin_vmax degradation = DiacetylAcetoin_gamma * [DiacetylAcetoin] usage = 0 def AcetoinBDO(t, y): production = AcetoinBDO_vmax degradation = AcetoinBDO_gamma * [AcetoinBDO] usage = 0 def PyruvateAcetylaldehyde(t, y): production = PyruvateAcetylaldehyde_vmax degradation = PyruvateAcetylaldehyde_gamma * [PyruvateAcetylaldehyde] usage = 0 def AcetylaldehydeAcetoin(t, y): production = AcetylaldehydeAcetoin_vmax degradation =AcetylaldehydeAcetoin_gamma * [AcetylaldehydeAcetoin] usage =0 def AcetylaldehydeAcetate(t, y): production = AcetylaldehydeAcetate_vmax degradation = AcetylaldehydeAcetate_gamma * [AcetylaldehydeAcetate] usage = 0 def AcetateEthanol(t, y): production = AcetateEthanol_vmax degradation = AcetateEthanol_gamma * [AcetateEthanol] usage = 0 #circuit ODE definitions circuitODE = range(26) circuitODE[0] = GlucoseGA3P circuitODE[1] = GA3PDHAP circuitODE[2] = DHAPG3P circuitODE[3] = G3PGlycerol circuitODE[4] = GA3PPyruvate circuitODE[5] = PyruvateAlphaAcetolactate circuitODE[6] = AlphaAcetolactateDiacetyl circuitODE[7] = DiacetylAcetoin circuitODE[8] = AcetoinBDO circuitODE[9] = PyruvateAcetylaldehyde circuitODE[10] = AcetylaldehydeAcetoin circuitODE[11] = AcetylaldehydeAcetate circuitODE[12] = AcetateEthanol circuitODE[13] = Glucose circuitODE[14] = GA3P circuitODE[15] = DHAP circuitODE[16] = G3P circuitODE[17] = Glycerol circuitODE[18] = Pyruvate circuitODE[19] = AlphaAcetolactate circuitODE[20] = Diacetyl circuitODE[21] = Acetoin circuitODE[22] = BDO circuitODE[23] = Acetylaldehyde circuitODE[24] = Acetate circuitODE[25] = Ethanol #iteration setup t0 = 0.0 tmax = 10000.0 dt = 0.1 outfile = 'IntegratedSimulationResult.csv' f = open(outfile, 'w') header = ['time', 'GlucoseGA3P', 'GA3PDHAP', 'DHAPG3P', 'G3PGlycerol', 'GA3PPyruvate', 'PyruvateAlphaAcetolactate', 'AlphaAcetolactateDiacetyl', 'DiacetylAcetoin', 'AcetoinBDO', 'PyruvateAcetylaldehyde', 'AcetylaldehydeAcetoin', 'AcetylaldehydeAcetate', 'AcetateEthanol', 'Glucose', 'GA3P', 'DHAP', 'G3P', 'Glycerol', 'Pyruvate', 'AlphaAcetolactate', 'Diacetyl', 'Acetoin', 'BDO', 'Acetylaldehyde', 'Acetate', 'Ethanol'] f.write(','.join(header) + '\n') for x in ode.multirk4(circuitODE, t0, y, dt, tmax): f.write(','.join([str(item) for item in x]) + '\n') f.close()
{ "content_hash": "36f7498ae28bafaa98252c519f1935dd", "timestamp": "", "source": "github", "line_count": 295, "max_line_length": 420, "avg_line_length": 31.050847457627118, "alnum_prop": 0.7372270742358079, "repo_name": "imwiththou/PathwayNet", "id": "95b120859e2dc19ed447bd4f60fab396b3fcbf88", "size": "9160", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "IntegratedSimulation.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "160929" } ], "symlink_target": "" }
from django.core.serializers import serialize from django.db.models.query import QuerySet from django.utils import simplejson from django.template import Library register = Library() def jsonify(obj): if isinstance(obj, QuerySet): return serialize('json', obj) return simplejson.dumps(obj) register.filter('jsonify', jsonify)
{ "content_hash": "2c7358f2095f487ed80519364c387edf", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 45, "avg_line_length": 26.46153846153846, "alnum_prop": 0.7674418604651163, "repo_name": "hamicornfury/storyscape.django-", "id": "46057a0bbc43fbbe5bf1277c22ea7a18c6b597ec", "size": "344", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/storyscape/templatetags/json_filters.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "45544" }, { "name": "HTML", "bytes": "24734" }, { "name": "JavaScript", "bytes": "305273" }, { "name": "Python", "bytes": "179365" } ], "symlink_target": "" }
''' motion.py: part of mriqc package ''' import numpy as np import pylab as plt import seaborn as sns from mriqc.misc import plot_vline from matplotlib.figure import Figure from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas from matplotlib.gridspec import GridSpec def calc_frame_dispalcement(realignment_parameters_file): lines = open(realignment_parameters_file, 'r').readlines() rows = [[float(x) for x in line.split()] for line in lines] cols = np.array([list(col) for col in zip(*rows)]) translations = np.transpose(np.abs(np.diff(cols[0:3, :]))) rotations = np.transpose(np.abs(np.diff(cols[3:6, :]))) FD_power = np.sum(translations, axis = 1) + (50*3.141/180)*np.sum(rotations, axis =1) #FD is zero for the first time point FD_power = np.insert(FD_power, 0, 0) return FD_power def get_mean_frame_displacement_disttribution(realignment_parameters_files): mean_FDs = [] max_FDs = [] for realignment_parameters_file in realignment_parameters_files: FD_power = calc_frame_dispalcement(realignment_parameters_file) mean_FDs.append(FD_power.mean()) max_FDs.append(FD_power.max()) return mean_FDs, max_FDs def plot_frame_displacement(realignment_parameters_file, mean_FD_distribution=None, figsize=(11.7,8.3)): FD_power = calc_frame_dispalcement(realignment_parameters_file) fig = Figure(figsize=figsize) FigureCanvas(fig) if mean_FD_distribution: grid = GridSpec(2, 4) else: grid = GridSpec(1, 4) ax = fig.add_subplot(grid[0,:-1]) ax.plot(FD_power) ax.set_xlim((0, len(FD_power))) ax.set_ylabel("Frame Displacement [mm]") ax.set_xlabel("Frame number") ylim = ax.get_ylim() ax = fig.add_subplot(grid[0,-1]) sns.distplot(FD_power, vertical=True, ax=ax) ax.set_ylim(ylim) if mean_FD_distribution: ax = fig.add_subplot(grid[1,:]) sns.distplot(mean_FD_distribution, ax=ax) ax.set_xlabel("Mean Frame Dispalcement (over all subjects) [mm]") MeanFD = FD_power.mean() label = "MeanFD = %g"%MeanFD plot_vline(MeanFD, label, ax=ax) return fig
{ "content_hash": "ce2847c1133a41c035c8e3db47e5a40f", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 104, "avg_line_length": 32.08695652173913, "alnum_prop": 0.6585365853658537, "repo_name": "vsoch/mriqc", "id": "cec7702218cd60ee4ea276614a5fe8d10120630b", "size": "2214", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mriqc/motion.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "21766" } ], "symlink_target": "" }
import numpy as np from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import NewCN class TestNewCN(VariableUnitTest): def test_NewCN(self): z = self.z np.testing.assert_array_almost_equal( NewCN.NewCN_f(z.NRur, z.NUrb, z.CN), NewCN.NewCN(z.NRur, z.NUrb, z.CN), decimal=7)
{ "content_hash": "e7a8cc94315b3b7caf67fb5bb4b09418", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 57, "avg_line_length": 28.916666666666668, "alnum_prop": 0.6714697406340058, "repo_name": "WikiWatershed/gwlf-e", "id": "d0e17d6280f6e7171373c9b9ce320d919fda4a19", "size": "347", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "test/unittests/test_NewCN.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "GAMS", "bytes": "5930291" }, { "name": "Python", "bytes": "775719" } ], "symlink_target": "" }
import json import os import sys import shutil # TODO: PUT THIS SOMEWHERE ELSE, Extend it for more primitives def map_types(params): """ Takes a dict of params in the form i:{'value':'val','type':type} and maps them to i:value according to type """ type_map = { 'int':int, 'real':float, 'string':str, 'bool':bool } newparams = dict() for p in params: if params[p]['Value'] == "": newparams[p] = None continue newparams[p] = type_map[params[p]['Type']](params[p]['Value']) # assume that this works because the validator should've checked it return newparams class process_transform_base: def __init__(self, params_file_s): try: params_file = open(params_file_s, 'r') except IOError as ioe: print >> sys.stderr, "Could not open system parameters", ioe sys.exit(-1) try: self.params = json.load(params_file) except ValueError as vae: print >> sys.stderr, "Could not decode parameters:", vae sys.exit(-1) params_file.close() try: self.idata_file = open(self.params['Inputs']['data'], 'r') except IOError as ioe: print >> sys.stderr, "Could not open input file", ioe sys.exit(-1) try: self.odata_file = open(self.params['Outputs']['data'], 'w') except IOError as ioe: print >> sys.stderr, "Could not open output file", ioe sys.exit(-1) self.hyperparameters = map_types(self.params['HyperParameters']) def read_data(self): """ Reads input. Base version just stores file""" self.idata = self.idata_file def process_data(self): """ Main meat of it, does something with self.idata and puts the result in self.odata. Base version just copies """ self.odata = self.idata def write_data(self): """ Writes output. Base version just copies the input file """ shutil.copyfileobj(self.odata,self.odata_file) def cleanup(self): """ Primarily just close up whatever files were used """ for i in self.__dict__: if isinstance(self.__dict__[i], file): self.__dict__[i].close() def run(self): self.read_data() self.process_data() self.write_data() self.cleanup() if __name__ == '__main__': params_file = sys.argv[1] transform = process_transform_base(params_file) transform.run()
{ "content_hash": "add4c4257bccde2a2de410857cbe975c", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 132, "avg_line_length": 27.209876543209877, "alnum_prop": 0.6633393829401089, "repo_name": "ProtoML/ProtoML-transforms", "id": "35ebabf5dd6d659f704a7af266e0466619c20404", "size": "2565", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "transforms/process_transforms/base_transform.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "11391" } ], "symlink_target": "" }
import argparse import json import os import re import urllib.request _REPO_URL = 'https://repo.maven.apache.org/maven2' _GROUP_NAME = 'com/google/dagger' _MODULE_NAME = 'dagger' _FILE_EXT = 'jar' _OVERRIDE_LATEST = None _PATCH_VERSION = 'cr1' def do_latest(): if _OVERRIDE_LATEST is not None: print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}') return maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format( _REPO_URL, _GROUP_NAME, _MODULE_NAME) metadata = urllib.request.urlopen(maven_metadata_url).read().decode( 'utf-8') # Do not parse xml with the python included parser since it is susceptible # to maliciously crafted xmls. Only use regular expression parsing to be # safe. RE should be enough to handle what we need to extract. match = re.search('<latest>([^<]+)</latest>', metadata) if match: latest = match.group(1) else: # if no latest info was found just hope the versions are sorted and the # last one is the latest (as is commonly the case). latest = re.findall('<version>([^<]+)</version>', metadata)[-1] print(latest + f'.{_PATCH_VERSION}') def get_download_url(version): # Remove the patch version when getting the download url version_no_patch, patch = version.rsplit('.', 1) if patch.startswith('cr'): version = version_no_patch file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME, _MODULE_NAME, version, _FILE_EXT) file_name = file_url.rsplit('/', 1)[-1] partial_manifest = { 'url': [file_url], 'name': [file_name], 'ext': '.' + _FILE_EXT, } print(json.dumps(partial_manifest)) def main(): ap = argparse.ArgumentParser() sub = ap.add_subparsers() latest = sub.add_parser('latest') latest.set_defaults(func=lambda _opts: do_latest()) download = sub.add_parser('get_url') download.set_defaults( func=lambda _opts: get_download_url(os.environ['_3PP_VERSION'])) opts = ap.parse_args() opts.func(opts) if __name__ == '__main__': main()
{ "content_hash": "f7954309bb7cc57151c72f76fd4e4238", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 79, "avg_line_length": 31.385714285714286, "alnum_prop": 0.5930814747382794, "repo_name": "chromium/chromium", "id": "951ba8d9ffe3129aa98806813601db32f3346fcf", "size": "2464", "binary": false, "copies": "6", "ref": "refs/heads/main", "path": "third_party/android_deps/libs/com_google_dagger_dagger/3pp/fetch.py", "mode": "33261", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
""" Generalized Linear Models. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Vincent Michel <vincent.michel@inria.fr> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck # Maryan Morel <maryan.morel@polytechnique.edu> # Giorgio Patrini <giorgio.patrini@anu.edu.au> # License: BSD 3 clause from abc import ABCMeta, abstractmethod import numbers import warnings import numpy as np import scipy.sparse as sp from scipy import linalg from scipy import optimize from scipy import sparse from scipy.special import expit from joblib import Parallel from ..base import (BaseEstimator, ClassifierMixin, RegressorMixin, MultiOutputMixin) from ..utils import check_array from ..utils.validation import FLOAT_DTYPES from ..utils.validation import _deprecate_positional_args from ..utils import check_random_state from ..utils.extmath import safe_sparse_dot from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale from ..utils.fixes import sparse_lsqr from ..utils._seq_dataset import ArrayDataset32, CSRDataset32 from ..utils._seq_dataset import ArrayDataset64, CSRDataset64 from ..utils.validation import check_is_fitted, _check_sample_weight from ..utils.fixes import delayed from ..preprocessing import normalize as f_normalize # TODO: bayesian_ridge_regression and bayesian_regression_ard # should be squashed into its respective objects. SPARSE_INTERCEPT_DECAY = 0.01 # For sparse data intercept updates are scaled by this decay factor to avoid # intercept oscillation. def make_dataset(X, y, sample_weight, random_state=None): """Create ``Dataset`` abstraction for sparse and dense inputs. This also returns the ``intercept_decay`` which is different for sparse datasets. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data y : array-like, shape (n_samples, ) Target values. sample_weight : numpy array of shape (n_samples,) The weight of each sample random_state : int, RandomState instance or None (default) Determines random number generation for dataset shuffling and noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- dataset The ``Dataset`` abstraction intercept_decay The intercept decay """ rng = check_random_state(random_state) # seed should never be 0 in SequentialDataset64 seed = rng.randint(1, np.iinfo(np.int32).max) if X.dtype == np.float32: CSRData = CSRDataset32 ArrayData = ArrayDataset32 else: CSRData = CSRDataset64 ArrayData = ArrayDataset64 if sp.issparse(X): dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed) intercept_decay = SPARSE_INTERCEPT_DECAY else: X = np.ascontiguousarray(X) dataset = ArrayData(X, y, sample_weight, seed=seed) intercept_decay = 1.0 return dataset, intercept_decay def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None, return_mean=False, check_input=True): """Center and scale data. Centers data to have mean zero along axis 0. If fit_intercept=False or if the X is a sparse matrix, no centering is done, but normalization can still be applied. The function returns the statistics necessary to reconstruct the input data, which are X_offset, y_offset, X_scale, such that the output X = (X - X_offset) / X_scale X_scale is the L2 norm of X - X_offset. If sample_weight is not None, then the weighted mean of X and y is zero, and not the mean itself. If return_mean=True, the mean, eventually weighted, is returned, independently of whether X was centered (option used for optimization with sparse data in coordinate_descend). This is here because nearly all linear models will want their data to be centered. This function also systematically makes y consistent with X.dtype """ if isinstance(sample_weight, numbers.Number): sample_weight = None if sample_weight is not None: sample_weight = np.asarray(sample_weight) if check_input: X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'], dtype=FLOAT_DTYPES) elif copy: if sp.issparse(X): X = X.copy() else: X = X.copy(order='K') y = np.asarray(y, dtype=X.dtype) if fit_intercept: if sp.issparse(X): X_offset, X_var = mean_variance_axis(X, axis=0) if not return_mean: X_offset[:] = X.dtype.type(0) if normalize: # TODO: f_normalize could be used here as well but the function # inplace_csr_row_normalize_l2 must be changed such that it # can return also the norms computed internally # transform variance to norm in-place X_var *= X.shape[0] X_scale = np.sqrt(X_var, X_var) del X_var X_scale[X_scale == 0] = 1 inplace_column_scale(X, 1. / X_scale) else: X_scale = np.ones(X.shape[1], dtype=X.dtype) else: X_offset = np.average(X, axis=0, weights=sample_weight) X -= X_offset if normalize: X, X_scale = f_normalize(X, axis=0, copy=False, return_norm=True) else: X_scale = np.ones(X.shape[1], dtype=X.dtype) y_offset = np.average(y, axis=0, weights=sample_weight) y = y - y_offset else: X_offset = np.zeros(X.shape[1], dtype=X.dtype) X_scale = np.ones(X.shape[1], dtype=X.dtype) if y.ndim == 1: y_offset = X.dtype.type(0) else: y_offset = np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_scale # TODO: _rescale_data should be factored into _preprocess_data. # Currently, the fact that sag implements its own way to deal with # sample_weight makes the refactoring tricky. def _rescale_data(X, y, sample_weight): """Rescale data sample-wise by square root of sample_weight. For many linear models, this enables easy support for sample_weight. Returns ------- X_rescaled : {array-like, sparse matrix} y_rescaled : {array-like, sparse matrix} """ n_samples = X.shape[0] sample_weight = np.asarray(sample_weight) if sample_weight.ndim == 0: sample_weight = np.full(n_samples, sample_weight, dtype=sample_weight.dtype) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y class LinearModel(BaseEstimator, metaclass=ABCMeta): """Base class for Linear Models""" @abstractmethod def fit(self, X, y): """Fit model.""" def _decision_function(self, X): check_is_fitted(self) X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ def predict(self, X): """ Predict using the linear model. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Samples. Returns ------- C : array, shape (n_samples,) Returns predicted values. """ return self._decision_function(X) _preprocess_data = staticmethod(_preprocess_data) def _set_intercept(self, X_offset, y_offset, X_scale): """Set the intercept_ """ if self.fit_intercept: self.coef_ = self.coef_ / X_scale self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T) else: self.intercept_ = 0. def _more_tags(self): return {'requires_y': True} # XXX Should this derive from LinearModel? It should be a mixin, not an ABC. # Maybe the n_features checking can be moved to LinearModel. class LinearClassifierMixin(ClassifierMixin): """Mixin for linear classifiers. Handles prediction for sparse and dense X. """ def decision_function(self, X): """ Predict confidence scores for samples. The confidence score for a sample is the signed distance of that sample to the hyperplane. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Samples. Returns ------- array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes) Confidence scores per (sample, class) combination. In the binary case, confidence score for self.classes_[1] where >0 means this class would be predicted. """ check_is_fitted(self) X = check_array(X, accept_sparse='csr') n_features = self.coef_.shape[1] if X.shape[1] != n_features: raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features)) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() if scores.shape[1] == 1 else scores def predict(self, X): """ Predict class labels for samples in X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Samples. Returns ------- C : array, shape [n_samples] Predicted class label per sample. """ scores = self.decision_function(X) if len(scores.shape) == 1: indices = (scores > 0).astype(int) else: indices = scores.argmax(axis=1) return self.classes_[indices] def _predict_proba_lr(self, X): """Probability estimation for OvR logistic regression. Positive class probabilities are computed as 1. / (1. + np.exp(-self.decision_function(X))); multiclass is handled by normalizing that over all classes. """ prob = self.decision_function(X) expit(prob, out=prob) if prob.ndim == 1: return np.vstack([1 - prob, prob]).T else: # OvR normalization, like LibLinear's predict_probability prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) return prob class SparseCoefMixin: """Mixin for converting coef_ to and from CSR format. L1-regularizing estimators should inherit this. """ def densify(self): """ Convert coefficient matrix to dense array format. Converts the ``coef_`` member (back) to a numpy.ndarray. This is the default format of ``coef_`` and is required for fitting, so calling this method is only required on models that have previously been sparsified; otherwise, it is a no-op. Returns ------- self Fitted estimator. """ msg = "Estimator, %(name)s, must be fitted before densifying." check_is_fitted(self, msg=msg) if sp.issparse(self.coef_): self.coef_ = self.coef_.toarray() return self def sparsify(self): """ Convert coefficient matrix to sparse format. Converts the ``coef_`` member to a scipy.sparse matrix, which for L1-regularized models can be much more memory- and storage-efficient than the usual numpy.ndarray representation. The ``intercept_`` member is not converted. Returns ------- self Fitted estimator. Notes ----- For non-sparse models, i.e. when there are not many zeros in ``coef_``, this may actually *increase* memory usage, so use this method with care. A rule of thumb is that the number of zero elements, which can be computed with ``(coef_ == 0).sum()``, must be more than 50% for this to provide significant benefits. After calling this method, further fitting with the partial_fit method (if any) will not work until you call densify. """ msg = "Estimator, %(name)s, must be fitted before sparsifying." check_is_fitted(self, msg=msg) self.coef_ = sp.csr_matrix(self.coef_) return self class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel): """ Ordinary least squares Linear Regression. LinearRegression fits a linear model with coefficients w = (w1, ..., wp) to minimize the residual sum of squares between the observed targets in the dataset, and the targets predicted by the linear approximation. Parameters ---------- fit_intercept : bool, default=True Whether to calculate the intercept for this model. If set to False, no intercept will be used in calculations (i.e. data is expected to be centered). normalize : bool, default=False This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : bool, default=True If True, X will be copied; else, it may be overwritten. n_jobs : int, default=None The number of jobs to use for the computation. This will only provide speedup for n_targets > 1 and sufficient large problems. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. positive : bool, default=False When set to ``True``, forces the coefficients to be positive. This option is only supported for dense arrays. .. versionadded:: 0.24 Attributes ---------- coef_ : array of shape (n_features, ) or (n_targets, n_features) Estimated coefficients for the linear regression problem. If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features), while if only one target is passed, this is a 1D array of length n_features. rank_ : int Rank of matrix `X`. Only available when `X` is dense. singular_ : array of shape (min(X, y),) Singular values of `X`. Only available when `X` is dense. intercept_ : float or array of shape (n_targets,) Independent term in the linear model. Set to 0.0 if `fit_intercept = False`. See Also -------- Ridge : Ridge regression addresses some of the problems of Ordinary Least Squares by imposing a penalty on the size of the coefficients with l2 regularization. Lasso : The Lasso is a linear model that estimates sparse coefficients with l1 regularization. ElasticNet : Elastic-Net is a linear regression model trained with both l1 and l2 -norm regularization of the coefficients. Notes ----- From the implementation point of view, this is just plain Ordinary Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares (scipy.optimize.nnls) wrapped as a predictor object. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) >>> # y = 1 * x_0 + 2 * x_1 + 3 >>> y = np.dot(X, np.array([1, 2])) + 3 >>> reg = LinearRegression().fit(X, y) >>> reg.score(X, y) 1.0 >>> reg.coef_ array([1., 2.]) >>> reg.intercept_ 3.0000... >>> reg.predict(np.array([[3, 5]])) array([16.]) """ @_deprecate_positional_args def __init__(self, *, fit_intercept=True, normalize=False, copy_X=True, n_jobs=None, positive=False): self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.n_jobs = n_jobs self.positive = positive def fit(self, X, y, sample_weight=None): """ Fit linear model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample .. versionadded:: 0.17 parameter *sample_weight* support to LinearRegression. Returns ------- self : returns an instance of self. """ n_jobs_ = self.n_jobs accept_sparse = False if self.positive else ['csr', 'csc', 'coo'] X, y = self._validate_data(X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) X, y, X_offset, y_offset, X_scale = self._preprocess_data( X, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=self.copy_X, sample_weight=sample_weight, return_mean=True) if sample_weight is not None: # Sample weight can be implemented via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) if self.positive: if y.ndim < 2: self.coef_, self._residues = optimize.nnls(X, y) else: # scipy.optimize.nnls cannot handle y with shape (M, K) outs = Parallel(n_jobs=n_jobs_)( delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])) self.coef_, self._residues = map(np.vstack, zip(*outs)) elif sp.issparse(X): X_offset_scale = X_offset / X_scale def matvec(b): return X.dot(b) - b.dot(X_offset_scale) def rmatvec(b): return X.T.dot(b) - X_offset_scale * np.sum(b) X_centered = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec) if y.ndim < 2: out = sparse_lsqr(X_centered, y) self.coef_ = out[0] self._residues = out[3] else: # sparse_lstsq cannot handle y with shape (M, K) outs = Parallel(n_jobs=n_jobs_)( delayed(sparse_lsqr)(X_centered, y[:, j].ravel()) for j in range(y.shape[1])) self.coef_ = np.vstack([out[0] for out in outs]) self._residues = np.vstack([out[3] for out in outs]) else: self.coef_, self._residues, self.rank_, self.singular_ = \ linalg.lstsq(X, y) self.coef_ = self.coef_.T if y.ndim == 1: self.coef_ = np.ravel(self.coef_) self._set_intercept(X_offset, y_offset, X_scale) return self def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy, check_input=True, sample_weight=None): """Aux function used at beginning of fit in linear models Parameters ---------- order : 'F', 'C' or None, default=None Whether X and y will be forced to be fortran or c-style. Only relevant if sample_weight is not None. """ n_samples, n_features = X.shape if sparse.isspmatrix(X): # copy is not needed here as X is not modified inplace when X is sparse precompute = False X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=fit_intercept, normalize=normalize, copy=False, return_mean=True, check_input=check_input) else: # copy was done in fit if necessary X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy, check_input=check_input, sample_weight=sample_weight) if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight=sample_weight) if hasattr(precompute, '__array__') and ( fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or normalize and not np.allclose(X_scale, np.ones(n_features))): warnings.warn("Gram matrix was provided but X was centered" " to fit intercept, " "or X was normalized : recomputing Gram matrix.", UserWarning) # recompute Gram precompute = 'auto' Xy = None # precompute if n_samples > n_features if isinstance(precompute, str) and precompute == 'auto': precompute = (n_samples > n_features) if precompute is True: # make sure that the 'precompute' array is contiguous. precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order='C') np.dot(X.T, X, out=precompute) if not hasattr(precompute, '__array__'): Xy = None # cannot use Xy if precompute is not Gram if hasattr(precompute, '__array__') and Xy is None: common_dtype = np.find_common_type([X.dtype, y.dtype], []) if y.ndim == 1: # Xy is 1d, make sure it is contiguous. Xy = np.empty(shape=n_features, dtype=common_dtype, order='C') np.dot(X.T, y, out=Xy) else: # Make sure that Xy is always F contiguous even if X or y are not # contiguous: the goal is to make it fast to extract the data for a # specific target. n_targets = y.shape[1] Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order='F') np.dot(y.T, X, out=Xy.T) return X, y, X_offset, y_offset, X_scale, precompute, Xy
{ "content_hash": "f7c7db03f2fca56f2352ed140300517d", "timestamp": "", "source": "github", "line_count": 642, "max_line_length": 79, "avg_line_length": 35.6183800623053, "alnum_prop": 0.5940438186032274, "repo_name": "ndingwall/scikit-learn", "id": "2399e1216238f6360db9f7834c5c4d3e1b740381", "size": "22867", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sklearn/linear_model/_base.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "C", "bytes": "416843" }, { "name": "C++", "bytes": "140261" }, { "name": "Makefile", "bytes": "1630" }, { "name": "PowerShell", "bytes": "17042" }, { "name": "Python", "bytes": "6794973" }, { "name": "Shell", "bytes": "13442" } ], "symlink_target": "" }
"""Extensions module. Each extension is initialized in the app factory located in app.py.""" from flask_bcrypt import Bcrypt from flask_caching import Cache from flask_debugtoolbar import DebugToolbarExtension from flask_login import LoginManager from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from flask_wtf.csrf import CSRFProtect from flask_cloudy import Storage bcrypt = Bcrypt() csrf_protect = CSRFProtect() login_manager = LoginManager() db = SQLAlchemy() migrate = Migrate() cache = Cache() debug_toolbar = DebugToolbarExtension() storage = Storage()
{ "content_hash": "ae80d8f8f0e5c3d297e99f13c3d52582", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 92, "avg_line_length": 30.894736842105264, "alnum_prop": 0.8023850085178875, "repo_name": "ChristoferHuynh/web", "id": "3cf5f9e0def7d84242f3a843c5627f7f558f6542", "size": "611", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web/extensions.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "3198" }, { "name": "HTML", "bytes": "11145" }, { "name": "JavaScript", "bytes": "181729" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "103247" }, { "name": "Shell", "bytes": "281" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('api', '0009_account_ownedcard'), ] operations = [ migrations.AlterField( model_name='account', name='center', field=models.ForeignKey(blank=True, to='api.Card', null=True), preserve_default=True, ), migrations.AlterField( model_name='account', name='friend_id', field=models.PositiveIntegerField(null=True, blank=True), preserve_default=True, ), migrations.AlterField( model_name='account', name='language', field=models.CharField(default=b'JP', max_length=10, choices=[(b'JP', b'Japanese'), (b'EN', b'English'), (b'KR', b'Korean'), (b'CH', b'Chinese')]), preserve_default=True, ), migrations.AlterField( model_name='account', name='rank', field=models.PositiveIntegerField(null=True, blank=True), preserve_default=True, ), ]
{ "content_hash": "40d0197e85aacbdde4d14f279589a78d", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 159, "avg_line_length": 31.08108108108108, "alnum_prop": 0.5565217391304348, "repo_name": "SchoolIdolTomodachi/SchoolIdolAPI", "id": "c686a47b715dfe19aba6977ef1c6476289710304", "size": "1174", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "api/migrations/0010_auto_20150123_1809.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "67801" }, { "name": "HTML", "bytes": "474730" }, { "name": "JavaScript", "bytes": "93928" }, { "name": "Python", "bytes": "748300" } ], "symlink_target": "" }
from __future__ import unicode_literals from collections import OrderedDict from datetime import datetime, timedelta from dateutil import rrule from flask import flash, jsonify, request, session from werkzeug.exceptions import BadRequest from indico.modules.events.cloning import EventCloner from indico.modules.events.management.controllers import RHManageEventBase from indico.modules.events.management.forms import (CLONE_REPEAT_CHOICES, CloneCategorySelectForm, CloneContentsForm, CloneRepeatabilityForm, CloneRepeatIntervalForm, CloneRepeatOnceForm, CloneRepeatPatternForm) from indico.modules.events.operations import clone_event from indico.util.i18n import _ from indico.web.flask.util import url_for from indico.web.util import jsonify_data, jsonify_template REPEAT_FORM_MAP = { 'once': CloneRepeatOnceForm, 'interval': CloneRepeatIntervalForm, 'pattern': CloneRepeatPatternForm } RRULE_FREQ_MAP = OrderedDict([ ('years', rrule.YEARLY), ('months', rrule.MONTHLY), ('weeks', rrule.WEEKLY), ('days', rrule.DAILY), ('hours', rrule.HOURLY), ('minutes', rrule.MINUTELY), ('seconds', rrule.SECONDLY) ]) def relativedelta_to_rrule_interval(rdelta): for unit, freq in RRULE_FREQ_MAP.viewitems(): value = getattr(rdelta, unit) if value: return freq, value raise ValueError('Invalid relativedelta(...) object') def get_clone_calculator(repeatability, event): if repeatability == 'interval': return IntervalCloneCalculator(event) elif repeatability == 'pattern': return PatternCloneCalculator(event) else: raise BadRequest class CloneCalculator(object): def __init__(self, event): self.event = event def _naivify(self, dt): return dt.astimezone(self.event.tzinfo).replace(tzinfo=None) def _tzify(self, dates): return [self.event.tzinfo.localize(dt) for dt in dates] def _calc_stop_criteria(self, form): args = {} if form.stop_criterion.data == 'day': args['until'] = datetime.combine(form.until_dt.data, self._naivify(form.start_dt.data).time()) else: args['count'] = form.num_times.data return args def calculate(self, formdata): """Calculate dates of cloned events :return: a ``(dates, last_day_of_month)`` tuple """ form = self.form_class(self.event, formdata=formdata) if form.validate(): return self._calculate(form) else: raise ValueError([(unicode(getattr(form, k).label.text), v) for k, v in form.errors.viewitems()]) class PatternCloneCalculator(CloneCalculator): form_class = CloneRepeatPatternForm def _calculate(self, form): args = {'dtstart': self._naivify(form.start_dt.data)} args.update(self._calc_stop_criteria(form)) dates = self._tzify(rrule.rrule(rrule.MONTHLY, interval=form.num_months.data, byweekday=form.week_day.week_day_data, bysetpos=form.week_day.day_number_data, **args)) return dates, False class IntervalCloneCalculator(CloneCalculator): form_class = CloneRepeatIntervalForm def _calculate(self, form): freq, interval = relativedelta_to_rrule_interval(form.recurrence.data) # check if last day of month dtstart = self._naivify(form.start_dt.data) next_day = dtstart + timedelta(days=1) if freq == rrule.MONTHLY and next_day.day == 1: kwargs = dict(self._calc_stop_criteria(form), dtstart=next_day) dates = rrule.rrule(freq, interval=interval, **kwargs) dates = self._tzify([date - timedelta(days=1) for date in dates]) last_day_of_month = True else: kwargs = dict(self._calc_stop_criteria(form), dtstart=dtstart) dates = self._tzify(rrule.rrule(freq, interval=interval, **kwargs)) last_day_of_month = False return dates, last_day_of_month class RHClonePreview(RHManageEventBase): ALLOW_LOCKED = True def _process(self): form = CloneRepeatabilityForm() clone_calculator = get_clone_calculator(form.repeatability.data, self.event) try: dates, last_day_of_month = clone_calculator.calculate(request.form) if len(dates) > 100: raise ValueError(_("You can clone maximum of 100 times at once")) except ValueError as e: return jsonify(error={'message': e.message}) return jsonify_data(count=len(dates), dates=dates, last_day_of_month=last_day_of_month, flash=False) class RHCloneEvent(RHManageEventBase): """Create copies of the event.""" ALLOW_LOCKED = True def _form_for_step(self, step, set_defaults=True): if step == 1: return CloneRepeatabilityForm() elif step == 2: return CloneContentsForm(self.event, set_defaults=set_defaults) elif step == 3: default_category = (self.event.category if self.event.category.can_create_events(session.user) else None) return CloneCategorySelectForm(self.event, category=default_category) elif step == 4: return REPEAT_FORM_MAP[request.form['repeatability']](self.event, set_defaults=set_defaults) else: return None def _process(self): step = int(request.form.get('step', 1)) tpl_args = {} form = self._form_for_step(step, set_defaults=True) prev_form = self._form_for_step(step - 1) if prev_form and not prev_form.validate(): form = prev_form step = step - 1 if step == 4: tpl_args.update({ 'step_title': dict(CLONE_REPEAT_CHOICES)[request.form['repeatability']], }) elif step > 4: # last step - perform actual cloning form = REPEAT_FORM_MAP[request.form['repeatability']](self.event) if form.validate_on_submit(): if form.repeatability.data == 'once': dates = [form.start_dt.data] else: clone_calculator = get_clone_calculator(form.repeatability.data, self.event) dates = clone_calculator.calculate(request.form)[0] clones = [clone_event(self.event, start_dt, set(form.selected_items.data), form.category.data) for start_dt in dates] if len(clones) == 1: flash(_('Welcome to your cloned event!'), 'success') return jsonify_data(redirect=url_for('event_management.settings', clones[0]), flash=False) else: flash(_('{} new events created.').format(len(dates)), 'success') return jsonify_data(redirect=form.category.data.url, flash=False) else: # back to step 4, since there's been an error step = 4 dependencies = {c.name: {'requires': list(c.requires_deep), 'required_by': list(c.required_by_deep)} for c in EventCloner.get_cloners(self.event)} return jsonify_template('events/management/clone_event.html', event=self.event, step=step, form=form, cloner_dependencies=dependencies, **tpl_args)
{ "content_hash": "231e7a26514f8a17a3578fbb9b6aa73f", "timestamp": "", "source": "github", "line_count": 191, "max_line_length": 117, "avg_line_length": 39.80628272251309, "alnum_prop": 0.6080494541628305, "repo_name": "mvidalgarcia/indico", "id": "0199915d83b8473fd3c77f0f849d1cad142c2c5d", "size": "7817", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "indico/modules/events/management/controllers/cloning.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "538590" }, { "name": "HTML", "bytes": "1345380" }, { "name": "JavaScript", "bytes": "1781971" }, { "name": "Mako", "bytes": "1340" }, { "name": "Python", "bytes": "4381847" }, { "name": "Shell", "bytes": "3568" }, { "name": "TeX", "bytes": "22182" }, { "name": "XSLT", "bytes": "1504" } ], "symlink_target": "" }
""" .. _ex-publication-figure: =================================== Make figures more publication ready =================================== In this example, we show several use cases to take MNE plots and customize them for a more publication-ready look. """ # Authors: Eric Larson <larson.eric.d@gmail.com> # Daniel McCloy <dan.mccloy@gmail.com> # Stefan Appelhoff <stefan.appelhoff@mailbox.org> # # License: BSD-3-Clause # %% # Imports # ------- # We are importing everything we need for this example: import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import (make_axes_locatable, ImageGrid, inset_locator) import mne # %% # Evoked plot with brain activation # --------------------------------- # # Suppose we want a figure with an evoked plot on top, and the brain activation # below, with the brain subplot slightly bigger than the evoked plot. Let's # start by loading some :ref:`example data <sample-dataset>`. data_path = mne.datasets.sample.data_path() subjects_dir = data_path / 'subjects' fname_stc = data_path / 'MEG' / 'sample' / 'sample_audvis-meg-eeg-lh.stc' fname_evoked = data_path / 'MEG' / 'sample' / 'sample_audvis-ave.fif' evoked = mne.read_evokeds(fname_evoked, 'Left Auditory') evoked.pick_types(meg='grad').apply_baseline((None, 0.)) max_t = evoked.get_peak()[1] stc = mne.read_source_estimate(fname_stc) # %% # During interactive plotting, we might see figures like this: evoked.plot() stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample', subjects_dir=subjects_dir, initial_time=max_t, time_viewer=False, show_traces=False) # %% # To make a publication-ready figure, first we'll re-plot the brain on a white # background, take a screenshot of it, and then crop out the white margins. # While we're at it, let's change the colormap, set custom colormap limits and # remove the default colorbar (so we can add a smaller, vertical one later): colormap = 'viridis' clim = dict(kind='value', lims=[4, 8, 12]) # Plot the STC, get the brain image, crop it: brain = stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample', subjects_dir=subjects_dir, initial_time=max_t, background='w', colorbar=False, clim=clim, colormap=colormap, time_viewer=False, show_traces=False) screenshot = brain.screenshot() brain.close() # %% # Now let's crop out the white margins and the white gap between hemispheres. # The screenshot has dimensions ``(h, w, 3)``, with the last axis being R, G, B # values for each pixel, encoded as integers between ``0`` and ``255``. ``(255, # 255, 255)`` encodes a white pixel, so we'll detect any pixels that differ # from that: nonwhite_pix = (screenshot != 255).any(-1) nonwhite_row = nonwhite_pix.any(1) nonwhite_col = nonwhite_pix.any(0) cropped_screenshot = screenshot[nonwhite_row][:, nonwhite_col] # before/after results fig = plt.figure(figsize=(4, 4)) axes = ImageGrid(fig, 111, nrows_ncols=(2, 1), axes_pad=0.5) for ax, image, title in zip(axes, [screenshot, cropped_screenshot], ['Before', 'After']): ax.imshow(image) ax.set_title('{} cropping'.format(title)) # %% # A lot of figure settings can be adjusted after the figure is created, but # many can also be adjusted in advance by updating the # :data:`~matplotlib.rcParams` dictionary. This is especially useful when your # script generates several figures that you want to all have the same style: # Tweak the figure style plt.rcParams.update({ 'ytick.labelsize': 'small', 'xtick.labelsize': 'small', 'axes.labelsize': 'small', 'axes.titlesize': 'medium', 'grid.color': '0.75', 'grid.linestyle': ':', }) # %% # Now let's create our custom figure. There are lots of ways to do this step. # Here we'll create the figure and the subplot axes in one step, specifying # overall figure size, number and arrangement of subplots, and the ratio of # subplot heights for each row using :mod:`GridSpec keywords # <matplotlib.gridspec>`. Other approaches (using # :func:`~matplotlib.pyplot.subplot2grid`, or adding each axes manually) are # shown commented out, for reference. # sphinx_gallery_thumbnail_number = 4 # figsize unit is inches fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(4.5, 3.), gridspec_kw=dict(height_ratios=[3, 4])) # alternate way #1: using subplot2grid # fig = plt.figure(figsize=(4.5, 3.)) # axes = [plt.subplot2grid((7, 1), (0, 0), rowspan=3), # plt.subplot2grid((7, 1), (3, 0), rowspan=4)] # alternate way #2: using figure-relative coordinates # fig = plt.figure(figsize=(4.5, 3.)) # axes = [fig.add_axes([0.125, 0.58, 0.775, 0.3]), # left, bot., width, height # fig.add_axes([0.125, 0.11, 0.775, 0.4])] # we'll put the evoked plot in the upper axes, and the brain below evoked_idx = 0 brain_idx = 1 # plot the evoked in the desired subplot, and add a line at peak activation evoked.plot(axes=axes[evoked_idx]) peak_line = axes[evoked_idx].axvline(max_t, color='#66CCEE', ls='--') # custom legend axes[evoked_idx].legend( [axes[evoked_idx].lines[0], peak_line], ['MEG data', 'Peak time'], frameon=True, columnspacing=0.1, labelspacing=0.1, fontsize=8, fancybox=True, handlelength=1.8) # remove the "N_ave" annotation for text in list(axes[evoked_idx].texts): text.remove() # Remove spines and add grid axes[evoked_idx].grid(True) axes[evoked_idx].set_axisbelow(True) for key in ('top', 'right'): axes[evoked_idx].spines[key].set(visible=False) # Tweak the ticks and limits axes[evoked_idx].set( yticks=np.arange(-200, 201, 100), xticks=np.arange(-0.2, 0.51, 0.1)) axes[evoked_idx].set( ylim=[-225, 225], xlim=[-0.2, 0.5]) # now add the brain to the lower axes axes[brain_idx].imshow(cropped_screenshot) axes[brain_idx].axis('off') # add a vertical colorbar with the same properties as the 3D one divider = make_axes_locatable(axes[brain_idx]) cax = divider.append_axes('right', size='5%', pad=0.2) cbar = mne.viz.plot_brain_colorbar(cax, clim, colormap, label='Activation (F)') # tweak margins and spacing fig.subplots_adjust( left=0.15, right=0.9, bottom=0.01, top=0.9, wspace=0.1, hspace=0.5) # add subplot labels for ax, label in zip(axes, 'AB'): ax.text(0.03, ax.get_position().ymax, label, transform=fig.transFigure, fontsize=12, fontweight='bold', va='top', ha='left') # %% # Custom timecourse with montage inset # ------------------------------------ # # Suppose we want a figure with some mean timecourse extracted from a number of # sensors, and we want a smaller panel within the figure to show a head outline # with the positions of those sensors clearly marked. # If you are familiar with MNE, you know that this is something that # :func:`mne.viz.plot_compare_evokeds` does, see an example output in # :ref:`ex-hf-sef-data` at the bottom. # # In this part of the example, we will show you how to achieve this result on # your own figure, without having to use :func:`mne.viz.plot_compare_evokeds`! # # Let's start by loading some :ref:`example data <sample-dataset>`. data_path = mne.datasets.sample.data_path() fname_raw = data_path / "MEG" / "sample" / "sample_audvis_raw.fif" raw = mne.io.read_raw_fif(fname_raw) # For the sake of the example, we focus on EEG data raw.pick_types(meg=False, eeg=True) # %% # Let's make a plot. # channels to plot: to_plot = [f"EEG {i:03}" for i in range(1, 5)] # get the data for plotting in a short time interval from 10 to 20 seconds start = int(raw.info['sfreq'] * 10) stop = int(raw.info['sfreq'] * 20) data, times = raw.get_data(picks=to_plot, start=start, stop=stop, return_times=True) # Scale the data from the MNE internal unit V to µV data *= 1e6 # Take the mean of the channels mean = np.mean(data, axis=0) # make a figure fig, ax = plt.subplots(figsize=(4.5, 3)) # plot some EEG data ax.plot(times, mean) # %% # So far so good. Now let's add the smaller figure within the figure to show # exactly, which sensors we used to make the timecourse. # For that, we use an "inset_axes" that we plot into our existing axes. # The head outline with the sensor positions can be plotted using the # `~mne.io.Raw` object that is the source of our data. # Specifically, that object already contains all the sensor positions, # and we can plot them using the ``plot_sensors`` method. # recreate the figure (only necessary for our documentation server) fig, ax = plt.subplots(figsize=(4.5, 3)) ax.plot(times, mean) axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2) # pick_channels() edits the raw object in place, so we'll make a copy here # so that our raw object stays intact for potential later analysis raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins) # %% # That looks nice. But the sensor dots are way too big for our taste. Luckily, # all MNE-Python plots use Matplotlib under the hood and we can customize # each and every facet of them. # To make the sensor dots smaller, we need to first get a handle on them to # then apply a ``*.set_*`` method on them. # If we inspect our axes we find the objects contained in our plot: print(axins.get_children()) # %% # That's quite a a lot of objects, but we know that we want to change the # sensor dots, and those are most certainly a "PathCollection" object. # So let's have a look at how many "collections" we have in the axes. print(axins.collections) # %% # There is only one! Those must be the sensor dots we were looking for. # We finally found exactly what we needed. Sometimes this can take a bit of # experimentation. sensor_dots = axins.collections[0] # Recreate the figure once more; shrink the sensor dots; add axis labels fig, ax = plt.subplots(figsize=(4.5, 3)) ax.plot(times, mean) axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2) raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins) sensor_dots = axins.collections[0] sensor_dots.set_sizes([1]) # add axis labels, and adjust bottom figure margin to make room for them ax.set(xlabel="Time (s)", ylabel="Amplitude (µV)") fig.subplots_adjust(bottom=0.2)
{ "content_hash": "b1fa443192b76f9b0a82699e613ff8cb", "timestamp": "", "source": "github", "line_count": 273, "max_line_length": 79, "avg_line_length": 37.43589743589744, "alnum_prop": 0.6858121330724071, "repo_name": "mne-tools/mne-tools.github.io", "id": "fc9385c385f5564a6bbd8101c83d6fe09da25441", "size": "10246", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "dev/_downloads/cae85de1d2b532e063fb12463baa0fca/publication_figure.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "708696" }, { "name": "Dockerfile", "bytes": "1820" }, { "name": "HTML", "bytes": "1526247783" }, { "name": "JavaScript", "bytes": "1323087" }, { "name": "Jupyter Notebook", "bytes": "24820047" }, { "name": "Python", "bytes": "18575494" } ], "symlink_target": "" }
from __future__ import unicode_literals import base64 import json import boto import boto3 import os import sure # noqa import sys from boto.exception import BotoServerError from botocore.exceptions import ClientError from moto import mock_iam, mock_iam_deprecated from moto.iam.models import aws_managed_policies from nose.tools import assert_raises, assert_equals from nose.tools import raises from datetime import datetime from tests.helpers import requires_boto_gte MOCK_CERT = """-----BEGIN CERTIFICATE----- MIIBpzCCARACCQCY5yOdxCTrGjANBgkqhkiG9w0BAQsFADAXMRUwEwYDVQQKDAxt b3RvIHRlc3RpbmcwIBcNMTgxMTA1MTkwNTIwWhgPMjI5MjA4MTkxOTA1MjBaMBcx FTATBgNVBAoMDG1vdG8gdGVzdGluZzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC gYEA1Jn3g2h7LD3FLqdpcYNbFXCS4V4eDpuTCje9vKFcC3pi/01147X3zdfPy8Mt ZhKxcREOwm4NXykh23P9KW7fBovpNwnbYsbPqj8Hf1ZaClrgku1arTVhEnKjx8zO vaR/bVLCss4uE0E0VM1tJn/QGQsfthFsjuHtwx8uIWz35tUCAwEAATANBgkqhkiG 9w0BAQsFAAOBgQBWdOQ7bDc2nWkUhFjZoNIZrqjyNdjlMUndpwREVD7FQ/DuxJMj FyDHrtlrS80dPUQWNYHw++oACDpWO01LGLPPrGmuO/7cOdojPEd852q5gd+7W9xt 8vUH+pBa6IBLbvBp+szli51V3TLSWcoyy4ceJNQU2vCkTLoFdS0RLd/7tQ== -----END CERTIFICATE-----""" MOCK_POLICY = """ { "Version": "2012-10-17", "Statement": { "Effect": "Allow", "Action": "s3:ListBucket", "Resource": "arn:aws:s3:::example_bucket" } } """ MOCK_POLICY_2 = """ { "Version": "2012-10-17", "Id": "2", "Statement": { "Effect": "Allow", "Action": "s3:ListBucket", "Resource": "arn:aws:s3:::example_bucket" } } """ MOCK_POLICY_3 = """ { "Version": "2012-10-17", "Id": "3", "Statement": { "Effect": "Allow", "Action": "s3:ListBucket", "Resource": "arn:aws:s3:::example_bucket" } } """ @mock_iam_deprecated() def test_get_all_server_certs(): conn = boto.connect_iam() conn.upload_server_cert("certname", "certbody", "privatekey") certs = conn.get_all_server_certs()['list_server_certificates_response'][ 'list_server_certificates_result']['server_certificate_metadata_list'] certs.should.have.length_of(1) cert1 = certs[0] cert1.server_certificate_name.should.equal("certname") cert1.arn.should.equal( "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() def test_get_server_cert_doesnt_exist(): conn = boto.connect_iam() with assert_raises(BotoServerError): conn.get_server_certificate("NonExistant") @mock_iam_deprecated() def test_get_server_cert(): conn = boto.connect_iam() conn.upload_server_cert("certname", "certbody", "privatekey") cert = conn.get_server_certificate("certname") cert.server_certificate_name.should.equal("certname") cert.arn.should.equal( "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() def test_upload_server_cert(): conn = boto.connect_iam() conn.upload_server_cert("certname", "certbody", "privatekey") cert = conn.get_server_certificate("certname") cert.server_certificate_name.should.equal("certname") cert.arn.should.equal( "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() def test_delete_server_cert(): conn = boto.connect_iam() conn.upload_server_cert("certname", "certbody", "privatekey") conn.get_server_certificate("certname") conn.delete_server_cert("certname") with assert_raises(BotoServerError): conn.get_server_certificate("certname") with assert_raises(BotoServerError): conn.delete_server_cert("certname") @mock_iam_deprecated() @raises(BotoServerError) def test_get_role__should_throw__when_role_does_not_exist(): conn = boto.connect_iam() conn.get_role('unexisting_role') @mock_iam_deprecated() @raises(BotoServerError) def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): conn = boto.connect_iam() conn.get_instance_profile('unexisting_instance_profile') @mock_iam_deprecated() def test_create_role_and_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") conn.create_role( "my-role", assume_role_policy_document="some policy", path="my-path") conn.add_role_to_instance_profile("my-profile", "my-role") role = conn.get_role("my-role") role.path.should.equal("my-path") role.assume_role_policy_document.should.equal("some policy") profile = conn.get_instance_profile("my-profile") profile.path.should.equal("my-path") role_from_profile = list(profile.roles.values())[0] role_from_profile['role_id'].should.equal(role.role_id) role_from_profile['role_name'].should.equal("my-role") conn.list_roles().roles[0].role_name.should.equal('my-role') # Test with an empty path: profile = conn.create_instance_profile('my-other-profile') profile.path.should.equal('/') @mock_iam_deprecated() def test_remove_role_from_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") conn.create_role( "my-role", assume_role_policy_document="some policy", path="my-path") conn.add_role_to_instance_profile("my-profile", "my-role") profile = conn.get_instance_profile("my-profile") role_from_profile = list(profile.roles.values())[0] role_from_profile['role_name'].should.equal("my-role") conn.remove_role_from_instance_profile("my-profile", "my-role") profile = conn.get_instance_profile("my-profile") dict(profile.roles).should.be.empty @mock_iam() def test_get_login_profile(): conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName='my-user') conn.create_login_profile(UserName='my-user', Password='my-pass') response = conn.get_login_profile(UserName='my-user') response['LoginProfile']['UserName'].should.equal('my-user') @mock_iam() def test_update_login_profile(): conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName='my-user') conn.create_login_profile(UserName='my-user', Password='my-pass') response = conn.get_login_profile(UserName='my-user') response['LoginProfile'].get('PasswordResetRequired').should.equal(None) conn.update_login_profile(UserName='my-user', Password='new-pass', PasswordResetRequired=True) response = conn.get_login_profile(UserName='my-user') response['LoginProfile'].get('PasswordResetRequired').should.equal(True) @mock_iam() def test_delete_role(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") role = conn.get_role(RoleName="my-role") role.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-path/my-role') conn.delete_role(RoleName="my-role") with assert_raises(ClientError): conn.get_role(RoleName="my-role") @mock_iam_deprecated() def test_list_instance_profiles(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") conn.create_role("my-role", path="my-path") conn.add_role_to_instance_profile("my-profile", "my-role") profiles = conn.list_instance_profiles().instance_profiles len(profiles).should.equal(1) profiles[0].instance_profile_name.should.equal("my-profile") profiles[0].roles.role_name.should.equal("my-role") @mock_iam_deprecated() def test_list_instance_profiles_for_role(): conn = boto.connect_iam() conn.create_role(role_name="my-role", assume_role_policy_document="some policy", path="my-path") conn.create_role(role_name="my-role2", assume_role_policy_document="some policy2", path="my-path2") profile_name_list = ['my-profile', 'my-profile2'] profile_path_list = ['my-path', 'my-path2'] for profile_count in range(0, 2): conn.create_instance_profile( profile_name_list[profile_count], path=profile_path_list[profile_count]) for profile_count in range(0, 2): conn.add_role_to_instance_profile( profile_name_list[profile_count], "my-role") profile_dump = conn.list_instance_profiles_for_role(role_name="my-role") profile_list = profile_dump['list_instance_profiles_for_role_response'][ 'list_instance_profiles_for_role_result']['instance_profiles'] for profile_count in range(0, len(profile_list)): profile_name_list.remove(profile_list[profile_count][ "instance_profile_name"]) profile_path_list.remove(profile_list[profile_count]["path"]) profile_list[profile_count]["roles"]["member"][ "role_name"].should.equal("my-role") len(profile_name_list).should.equal(0) len(profile_path_list).should.equal(0) profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2") profile_list = profile_dump2['list_instance_profiles_for_role_response'][ 'list_instance_profiles_for_role_result']['instance_profiles'] len(profile_list).should.equal(0) @mock_iam_deprecated() def test_list_role_policies(): conn = boto.connect_iam() conn.create_role("my-role") conn.put_role_policy("my-role", "test policy", MOCK_POLICY) role = conn.list_role_policies("my-role") role.policy_names.should.have.length_of(1) role.policy_names[0].should.equal("test policy") conn.put_role_policy("my-role", "test policy 2", MOCK_POLICY) role = conn.list_role_policies("my-role") role.policy_names.should.have.length_of(2) conn.delete_role_policy("my-role", "test policy") role = conn.list_role_policies("my-role") role.policy_names.should.have.length_of(1) role.policy_names[0].should.equal("test policy 2") with assert_raises(BotoServerError): conn.delete_role_policy("my-role", "test policy") @mock_iam_deprecated() def test_put_role_policy(): conn = boto.connect_iam() conn.create_role( "my-role", assume_role_policy_document="some policy", path="my-path") conn.put_role_policy("my-role", "test policy", MOCK_POLICY) policy = conn.get_role_policy( "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] policy.should.equal("test policy") @mock_iam def test_get_role_policy(): conn = boto3.client('iam', region_name='us-east-1') conn.create_role( RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="my-path") with assert_raises(conn.exceptions.NoSuchEntityException): conn.get_role_policy(RoleName="my-role", PolicyName="does-not-exist") @mock_iam_deprecated() def test_update_assume_role_policy(): conn = boto.connect_iam() role = conn.create_role("my-role") conn.update_assume_role_policy(role.role_name, "my-policy") role = conn.get_role("my-role") role.assume_role_policy_document.should.equal("my-policy") @mock_iam def test_create_policy(): conn = boto3.client('iam', region_name='us-east-1') response = conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=MOCK_POLICY) response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy") @mock_iam def test_create_policy_versions(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", PolicyDocument='{"some":"policy"}') conn.create_policy( PolicyName="TestCreatePolicyVersion", PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", PolicyDocument=MOCK_POLICY, SetAsDefault=True) version.get('PolicyVersion').get('Document').should.equal(json.loads(MOCK_POLICY)) version.get('PolicyVersion').get('VersionId').should.equal("v2") version.get('PolicyVersion').get('IsDefaultVersion').should.be.ok conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", VersionId="v1") version = conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", PolicyDocument=MOCK_POLICY) version.get('PolicyVersion').get('VersionId').should.equal("v3") version.get('PolicyVersion').get('IsDefaultVersion').shouldnt.be.ok @mock_iam def test_create_many_policy_versions(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestCreateManyPolicyVersions", PolicyDocument=MOCK_POLICY) for _ in range(0, 4): conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreateManyPolicyVersions", PolicyDocument=MOCK_POLICY) with assert_raises(ClientError): conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreateManyPolicyVersions", PolicyDocument=MOCK_POLICY) @mock_iam def test_set_default_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestSetDefaultPolicyVersion", PolicyDocument=MOCK_POLICY) conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestSetDefaultPolicyVersion", PolicyDocument=MOCK_POLICY_2, SetAsDefault=True) conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestSetDefaultPolicyVersion", PolicyDocument=MOCK_POLICY_3, SetAsDefault=True) versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestSetDefaultPolicyVersion") versions.get('Versions')[0].get('Document').should.equal(json.loads(MOCK_POLICY)) versions.get('Versions')[0].get('IsDefaultVersion').shouldnt.be.ok versions.get('Versions')[1].get('Document').should.equal(json.loads(MOCK_POLICY_2)) versions.get('Versions')[1].get('IsDefaultVersion').shouldnt.be.ok versions.get('Versions')[2].get('Document').should.equal(json.loads(MOCK_POLICY_3)) versions.get('Versions')[2].get('IsDefaultVersion').should.be.ok @mock_iam def test_get_policy(): conn = boto3.client('iam', region_name='us-east-1') response = conn.create_policy( PolicyName="TestGetPolicy", PolicyDocument=MOCK_POLICY) policy = conn.get_policy( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") policy['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") @mock_iam def test_get_aws_managed_policy(): conn = boto3.client('iam', region_name='us-east-1') managed_policy_arn = 'arn:aws:iam::aws:policy/IAMUserChangePassword' managed_policy_create_date = datetime.strptime("2016-11-15T00:25:16+00:00", "%Y-%m-%dT%H:%M:%S+00:00") policy = conn.get_policy( PolicyArn=managed_policy_arn) policy['Policy']['Arn'].should.equal(managed_policy_arn) policy['Policy']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_create_date) @mock_iam def test_get_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestGetPolicyVersion", PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", PolicyDocument=MOCK_POLICY) with assert_raises(ClientError): conn.get_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", VersionId='v2-does-not-exist') retrieved = conn.get_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", VersionId=version.get('PolicyVersion').get('VersionId')) retrieved.get('PolicyVersion').get('Document').should.equal(json.loads(MOCK_POLICY)) retrieved.get('PolicyVersion').get('IsDefaultVersion').shouldnt.be.ok @mock_iam def test_get_aws_managed_policy_version(): conn = boto3.client('iam', region_name='us-east-1') managed_policy_arn = 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' managed_policy_version_create_date = datetime.strptime("2015-04-09T15:03:43+00:00", "%Y-%m-%dT%H:%M:%S+00:00") with assert_raises(ClientError): conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId='v2-does-not-exist') retrieved = conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId="v1") retrieved['PolicyVersion']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_version_create_date) retrieved['PolicyVersion']['Document'].should.be.an(dict) @mock_iam def test_get_aws_managed_policy_v4_version(): conn = boto3.client('iam', region_name='us-east-1') managed_policy_arn = 'arn:aws:iam::aws:policy/job-function/SystemAdministrator' managed_policy_version_create_date = datetime.strptime("2018-10-08T21:33:45+00:00", "%Y-%m-%dT%H:%M:%S+00:00") with assert_raises(ClientError): conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId='v2-does-not-exist') retrieved = conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId="v4") retrieved['PolicyVersion']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_version_create_date) retrieved['PolicyVersion']['Document'].should.be.an(dict) @mock_iam def test_list_policy_versions(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") conn.create_policy( PolicyName="TestListPolicyVersions", PolicyDocument=MOCK_POLICY) versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") versions.get('Versions')[0].get('VersionId').should.equal('v1') versions.get('Versions')[0].get('IsDefaultVersion').should.be.ok conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", PolicyDocument=MOCK_POLICY_2) conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", PolicyDocument=MOCK_POLICY_3) versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") versions.get('Versions')[1].get('Document').should.equal(json.loads(MOCK_POLICY_2)) versions.get('Versions')[1].get('IsDefaultVersion').shouldnt.be.ok versions.get('Versions')[2].get('Document').should.equal(json.loads(MOCK_POLICY_3)) versions.get('Versions')[2].get('IsDefaultVersion').shouldnt.be.ok @mock_iam def test_delete_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY) conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY) with assert_raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", VersionId='v2-nope-this-does-not-exist') conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", VersionId='v2') versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") len(versions.get('Versions')).should.equal(1) @mock_iam def test_delete_default_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY) conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY_2) with assert_raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", VersionId='v1') @mock_iam_deprecated() def test_create_user(): conn = boto.connect_iam() conn.create_user('my-user') with assert_raises(BotoServerError): conn.create_user('my-user') @mock_iam_deprecated() def test_get_user(): conn = boto.connect_iam() with assert_raises(BotoServerError): conn.get_user('my-user') conn.create_user('my-user') conn.get_user('my-user') @mock_iam() def test_update_user(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(conn.exceptions.NoSuchEntityException): conn.update_user(UserName='my-user') conn.create_user(UserName='my-user') conn.update_user(UserName='my-user', NewPath='/new-path/', NewUserName='new-user') response = conn.get_user(UserName='new-user') response['User'].get('Path').should.equal('/new-path/') with assert_raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName='my-user') @mock_iam_deprecated() def test_get_current_user(): """If no user is specific, IAM returns the current user""" conn = boto.connect_iam() user = conn.get_user()['get_user_response']['get_user_result']['user'] user['user_name'].should.equal('default_user') @mock_iam() def test_list_users(): path_prefix = '/' max_items = 10 conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName='my-user') response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items) user = response['Users'][0] user['UserName'].should.equal('my-user') user['Path'].should.equal('/') user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user') @mock_iam() def test_user_policies(): policy_name = 'UserManagedPolicy' user_name = 'my-user' conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName=user_name) conn.put_user_policy( UserName=user_name, PolicyName=policy_name, PolicyDocument=MOCK_POLICY ) policy_doc = conn.get_user_policy( UserName=user_name, PolicyName=policy_name ) policy_doc['PolicyDocument'].should.equal(json.loads(MOCK_POLICY)) policies = conn.list_user_policies(UserName=user_name) len(policies['PolicyNames']).should.equal(1) policies['PolicyNames'][0].should.equal(policy_name) conn.delete_user_policy( UserName=user_name, PolicyName=policy_name ) policies = conn.list_user_policies(UserName=user_name) len(policies['PolicyNames']).should.equal(0) @mock_iam_deprecated() def test_create_login_profile(): conn = boto.connect_iam() with assert_raises(BotoServerError): conn.create_login_profile('my-user', 'my-pass') conn.create_user('my-user') conn.create_login_profile('my-user', 'my-pass') with assert_raises(BotoServerError): conn.create_login_profile('my-user', 'my-pass') @mock_iam_deprecated() def test_delete_login_profile(): conn = boto.connect_iam() conn.create_user('my-user') with assert_raises(BotoServerError): conn.delete_login_profile('my-user') conn.create_login_profile('my-user', 'my-pass') conn.delete_login_profile('my-user') @mock_iam() def test_create_access_key(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.create_access_key(UserName='my-user') conn.create_user(UserName='my-user') access_key = conn.create_access_key(UserName='my-user')["AccessKey"] (datetime.utcnow() - access_key["CreateDate"].replace(tzinfo=None)).seconds.should.be.within(0, 10) access_key["AccessKeyId"].should.have.length_of(20) access_key["SecretAccessKey"].should.have.length_of(40) assert access_key["AccessKeyId"].startswith("AKIA") @mock_iam_deprecated() def test_get_all_access_keys(): """If no access keys exist there should be none in the response, if an access key is present it should have the correct fields present""" conn = boto.connect_iam() conn.create_user('my-user') response = conn.get_all_access_keys('my-user') assert_equals( response['list_access_keys_response'][ 'list_access_keys_result']['access_key_metadata'], [] ) conn.create_access_key('my-user') response = conn.get_all_access_keys('my-user') assert_equals( sorted(response['list_access_keys_response'][ 'list_access_keys_result']['access_key_metadata'][0].keys()), sorted(['status', 'create_date', 'user_name', 'access_key_id']) ) @mock_iam_deprecated() def test_delete_access_key(): conn = boto.connect_iam() conn.create_user('my-user') access_key_id = conn.create_access_key('my-user')['create_access_key_response'][ 'create_access_key_result']['access_key']['access_key_id'] conn.delete_access_key(access_key_id, 'my-user') @mock_iam() def test_mfa_devices(): # Test enable device conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName='my-user') conn.enable_mfa_device( UserName='my-user', SerialNumber='123456789', AuthenticationCode1='234567', AuthenticationCode2='987654' ) # Test list mfa devices response = conn.list_mfa_devices(UserName='my-user') device = response['MFADevices'][0] device['SerialNumber'].should.equal('123456789') # Test deactivate mfa device conn.deactivate_mfa_device(UserName='my-user', SerialNumber='123456789') response = conn.list_mfa_devices(UserName='my-user') len(response['MFADevices']).should.equal(0) @mock_iam_deprecated() def test_delete_user(): conn = boto.connect_iam() with assert_raises(BotoServerError): conn.delete_user('my-user') conn.create_user('my-user') conn.delete_user('my-user') @mock_iam_deprecated() def test_generate_credential_report(): conn = boto.connect_iam() result = conn.generate_credential_report() result['generate_credential_report_response'][ 'generate_credential_report_result']['state'].should.equal('STARTED') result = conn.generate_credential_report() result['generate_credential_report_response'][ 'generate_credential_report_result']['state'].should.equal('COMPLETE') @mock_iam def test_boto3_generate_credential_report(): conn = boto3.client('iam', region_name='us-east-1') result = conn.generate_credential_report() result['State'].should.equal('STARTED') result = conn.generate_credential_report() result['State'].should.equal('COMPLETE') @mock_iam_deprecated() def test_get_credential_report(): conn = boto.connect_iam() conn.create_user('my-user') with assert_raises(BotoServerError): conn.get_credential_report() result = conn.generate_credential_report() while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE': result = conn.generate_credential_report() result = conn.get_credential_report() report = base64.b64decode(result['get_credential_report_response'][ 'get_credential_report_result']['content'].encode('ascii')).decode('ascii') report.should.match(r'.*my-user.*') @mock_iam def test_boto3_get_credential_report(): conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName='my-user') with assert_raises(ClientError): conn.get_credential_report() result = conn.generate_credential_report() while result['State'] != 'COMPLETE': result = conn.generate_credential_report() result = conn.get_credential_report() report = result['Content'].decode('utf-8') report.should.match(r'.*my-user.*') @requires_boto_gte('2.39') @mock_iam_deprecated() def test_managed_policy(): conn = boto.connect_iam() conn.create_policy(policy_name='UserManagedPolicy', policy_document=MOCK_POLICY, path='/mypolicy/', description='my user managed policy') marker = 0 aws_policies = [] while marker is not None: response = conn.list_policies(scope='AWS', marker=marker)[ 'list_policies_response']['list_policies_result'] for policy in response['policies']: aws_policies.append(policy) marker = response.get('marker') set(p.name for p in aws_managed_policies).should.equal( set(p['policy_name'] for p in aws_policies)) user_policies = conn.list_policies(scope='Local')['list_policies_response'][ 'list_policies_result']['policies'] set(['UserManagedPolicy']).should.equal( set(p['policy_name'] for p in user_policies)) marker = 0 all_policies = [] while marker is not None: response = conn.list_policies(marker=marker)[ 'list_policies_response']['list_policies_result'] for policy in response['policies']: all_policies.append(policy) marker = response.get('marker') set(p['policy_name'] for p in aws_policies + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) role_name = 'my-role' conn.create_role(role_name, assume_role_policy_document={ 'policy': 'test'}, path="my-path") for policy_name in ['AmazonElasticMapReduceRole', 'AmazonElasticMapReduceforEC2Role']: policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name conn.attach_role_policy(policy_arn, role_name) rows = conn.list_policies(only_attached=True)['list_policies_response'][ 'list_policies_result']['policies'] rows.should.have.length_of(2) for x in rows: int(x['attachment_count']).should.be.greater_than(0) # boto has not implemented this end point but accessible this way resp = conn.get_response('ListAttachedRolePolicies', {'RoleName': role_name}, list_marker='AttachedPolicies') resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ 'attached_policies'].should.have.length_of(2) conn.detach_role_policy( "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name) rows = conn.list_policies(only_attached=True)['list_policies_response'][ 'list_policies_result']['policies'] rows.should.have.length_of(1) for x in rows: int(x['attachment_count']).should.be.greater_than(0) # boto has not implemented this end point but accessible this way resp = conn.get_response('ListAttachedRolePolicies', {'RoleName': role_name}, list_marker='AttachedPolicies') resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ 'attached_policies'].should.have.length_of(1) with assert_raises(BotoServerError): conn.detach_role_policy( "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name) with assert_raises(BotoServerError): conn.detach_role_policy( "arn:aws:iam::aws:policy/Nonexistent", role_name) @mock_iam def test_boto3_create_login_profile(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.create_login_profile(UserName='my-user', Password='Password') conn.create_user(UserName='my-user') conn.create_login_profile(UserName='my-user', Password='Password') with assert_raises(ClientError): conn.create_login_profile(UserName='my-user', Password='Password') @mock_iam() def test_attach_detach_user_policy(): iam = boto3.resource('iam', region_name='us-east-1') client = boto3.client('iam', region_name='us-east-1') user = iam.create_user(UserName='test-user') policy_name = 'UserAttachedPolicy' policy = iam.create_policy(PolicyName=policy_name, PolicyDocument=MOCK_POLICY, Path='/mypolicy/', Description='my user attached policy') client.attach_user_policy(UserName=user.name, PolicyArn=policy.arn) resp = client.list_attached_user_policies(UserName=user.name) resp['AttachedPolicies'].should.have.length_of(1) attached_policy = resp['AttachedPolicies'][0] attached_policy['PolicyArn'].should.equal(policy.arn) attached_policy['PolicyName'].should.equal(policy_name) client.detach_user_policy(UserName=user.name, PolicyArn=policy.arn) resp = client.list_attached_user_policies(UserName=user.name) resp['AttachedPolicies'].should.have.length_of(0) @mock_iam def test_update_access_key(): iam = boto3.resource('iam', region_name='us-east-1') client = iam.meta.client username = 'test-user' iam.create_user(UserName=username) with assert_raises(ClientError): client.update_access_key(UserName=username, AccessKeyId='non-existent-key', Status='Inactive') key = client.create_access_key(UserName=username)['AccessKey'] client.update_access_key(UserName=username, AccessKeyId=key['AccessKeyId'], Status='Inactive') resp = client.list_access_keys(UserName=username) resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') @mock_iam def test_get_access_key_last_used(): iam = boto3.resource('iam', region_name='us-east-1') client = iam.meta.client username = 'test-user' iam.create_user(UserName=username) with assert_raises(ClientError): client.get_access_key_last_used(AccessKeyId='non-existent-key-id') create_key_response = client.create_access_key(UserName=username)['AccessKey'] resp = client.get_access_key_last_used(AccessKeyId=create_key_response['AccessKeyId']) datetime.strftime(resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d").should.equal(datetime.strftime( datetime.utcnow(), "%Y-%m-%d" )) resp["UserName"].should.equal(create_key_response["UserName"]) @mock_iam def test_get_account_authorization_details(): test_policy = json.dumps({ "Version": "2012-10-17", "Statement": [ { "Action": "s3:ListBucket", "Resource": "*", "Effect": "Allow", } ] }) conn = boto3.client('iam', region_name='us-east-1') conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") conn.create_user(Path='/', UserName='testUser') conn.create_group(Path='/', GroupName='testGroup') conn.create_policy( PolicyName='testPolicy', Path='/', PolicyDocument=test_policy, Description='Test Policy' ) # Attach things to the user and group: conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy) conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy) conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') conn.add_user_to_group(UserName='testUser', GroupName='testGroup') # Add things to the role: conn.create_instance_profile(InstanceProfileName='ipn') conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role') conn.tag_role(RoleName='my-role', Tags=[ { 'Key': 'somekey', 'Value': 'somevalue' }, { 'Key': 'someotherkey', 'Value': 'someothervalue' } ]) conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy) conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') result = conn.get_account_authorization_details(Filter=['Role']) assert len(result['RoleDetailList']) == 1 assert len(result['UserDetailList']) == 0 assert len(result['GroupDetailList']) == 0 assert len(result['Policies']) == 0 assert len(result['RoleDetailList'][0]['InstanceProfileList']) == 1 assert len(result['RoleDetailList'][0]['Tags']) == 2 assert len(result['RoleDetailList'][0]['RolePolicyList']) == 1 assert len(result['RoleDetailList'][0]['AttachedManagedPolicies']) == 1 assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ 'arn:aws:iam::123456789012:policy/testPolicy' result = conn.get_account_authorization_details(Filter=['User']) assert len(result['RoleDetailList']) == 0 assert len(result['UserDetailList']) == 1 assert len(result['UserDetailList'][0]['GroupList']) == 1 assert len(result['UserDetailList'][0]['AttachedManagedPolicies']) == 1 assert len(result['GroupDetailList']) == 0 assert len(result['Policies']) == 0 assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ 'arn:aws:iam::123456789012:policy/testPolicy' result = conn.get_account_authorization_details(Filter=['Group']) assert len(result['RoleDetailList']) == 0 assert len(result['UserDetailList']) == 0 assert len(result['GroupDetailList']) == 1 assert len(result['GroupDetailList'][0]['GroupPolicyList']) == 1 assert len(result['GroupDetailList'][0]['AttachedManagedPolicies']) == 1 assert len(result['Policies']) == 0 assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ 'arn:aws:iam::123456789012:policy/testPolicy' result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy']) assert len(result['RoleDetailList']) == 0 assert len(result['UserDetailList']) == 0 assert len(result['GroupDetailList']) == 0 assert len(result['Policies']) == 1 assert len(result['Policies'][0]['PolicyVersionList']) == 1 # Check for greater than 1 since this should always be greater than one but might change. # See iam/aws_managed_policies.py result = conn.get_account_authorization_details(Filter=['AWSManagedPolicy']) assert len(result['RoleDetailList']) == 0 assert len(result['UserDetailList']) == 0 assert len(result['GroupDetailList']) == 0 assert len(result['Policies']) > 1 result = conn.get_account_authorization_details() assert len(result['RoleDetailList']) == 1 assert len(result['UserDetailList']) == 1 assert len(result['GroupDetailList']) == 1 assert len(result['Policies']) > 1 @mock_iam def test_signing_certs(): client = boto3.client('iam', region_name='us-east-1') # Create the IAM user first: client.create_user(UserName='testing') # Upload the cert: resp = client.upload_signing_certificate(UserName='testing', CertificateBody=MOCK_CERT)['Certificate'] cert_id = resp['CertificateId'] assert resp['UserName'] == 'testing' assert resp['Status'] == 'Active' assert resp['CertificateBody'] == MOCK_CERT assert resp['CertificateId'] # Upload a the cert with an invalid body: with assert_raises(ClientError) as ce: client.upload_signing_certificate(UserName='testing', CertificateBody='notacert') assert ce.exception.response['Error']['Code'] == 'MalformedCertificate' # Upload with an invalid user: with assert_raises(ClientError): client.upload_signing_certificate(UserName='notauser', CertificateBody=MOCK_CERT) # Update: client.update_signing_certificate(UserName='testing', CertificateId=cert_id, Status='Inactive') with assert_raises(ClientError): client.update_signing_certificate(UserName='notauser', CertificateId=cert_id, Status='Inactive') with assert_raises(ClientError) as ce: client.update_signing_certificate(UserName='testing', CertificateId='x' * 32, Status='Inactive') assert ce.exception.response['Error']['Message'] == 'The Certificate with id {id} cannot be found.'.format( id='x' * 32) # List the certs: resp = client.list_signing_certificates(UserName='testing')['Certificates'] assert len(resp) == 1 assert resp[0]['CertificateBody'] == MOCK_CERT assert resp[0]['Status'] == 'Inactive' # Changed with the update call above. with assert_raises(ClientError): client.list_signing_certificates(UserName='notauser') # Delete: client.delete_signing_certificate(UserName='testing', CertificateId=cert_id) with assert_raises(ClientError): client.delete_signing_certificate(UserName='notauser', CertificateId=cert_id) @mock_iam() def test_create_saml_provider(): conn = boto3.client('iam', region_name='us-east-1') response = conn.create_saml_provider( Name="TestSAMLProvider", SAMLMetadataDocument='a' * 1024 ) response['SAMLProviderArn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider") @mock_iam() def test_get_saml_provider(): conn = boto3.client('iam', region_name='us-east-1') saml_provider_create = conn.create_saml_provider( Name="TestSAMLProvider", SAMLMetadataDocument='a' * 1024 ) response = conn.get_saml_provider( SAMLProviderArn=saml_provider_create['SAMLProviderArn'] ) response['SAMLMetadataDocument'].should.equal('a' * 1024) @mock_iam() def test_list_saml_providers(): conn = boto3.client('iam', region_name='us-east-1') conn.create_saml_provider( Name="TestSAMLProvider", SAMLMetadataDocument='a' * 1024 ) response = conn.list_saml_providers() response['SAMLProviderList'][0]['Arn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider") @mock_iam() def test_delete_saml_provider(): conn = boto3.client('iam', region_name='us-east-1') saml_provider_create = conn.create_saml_provider( Name="TestSAMLProvider", SAMLMetadataDocument='a' * 1024 ) response = conn.list_saml_providers() len(response['SAMLProviderList']).should.equal(1) conn.delete_saml_provider( SAMLProviderArn=saml_provider_create['SAMLProviderArn'] ) response = conn.list_saml_providers() len(response['SAMLProviderList']).should.equal(0) conn.create_user(UserName='testing') cert_id = '123456789012345678901234' with assert_raises(ClientError) as ce: conn.delete_signing_certificate(UserName='testing', CertificateId=cert_id) assert ce.exception.response['Error']['Message'] == 'The Certificate with id {id} cannot be found.'.format( id=cert_id) # Verify that it's not in the list: resp = conn.list_signing_certificates(UserName='testing') assert not resp['Certificates'] @mock_iam() def test_tag_role(): """Tests both the tag_role and get_role_tags capability""" conn = boto3.client('iam', region_name='us-east-1') conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}") # Get without tags: role = conn.get_role(RoleName='my-role')['Role'] assert not role.get('Tags') # With proper tag values: conn.tag_role(RoleName='my-role', Tags=[ { 'Key': 'somekey', 'Value': 'somevalue' }, { 'Key': 'someotherkey', 'Value': 'someothervalue' } ]) # Get role: role = conn.get_role(RoleName='my-role')['Role'] assert len(role['Tags']) == 2 assert role['Tags'][0]['Key'] == 'somekey' assert role['Tags'][0]['Value'] == 'somevalue' assert role['Tags'][1]['Key'] == 'someotherkey' assert role['Tags'][1]['Value'] == 'someothervalue' # Same -- but for list_role_tags: tags = conn.list_role_tags(RoleName='my-role') assert len(tags['Tags']) == 2 assert role['Tags'][0]['Key'] == 'somekey' assert role['Tags'][0]['Value'] == 'somevalue' assert role['Tags'][1]['Key'] == 'someotherkey' assert role['Tags'][1]['Value'] == 'someothervalue' assert not tags['IsTruncated'] assert not tags.get('Marker') # Test pagination: tags = conn.list_role_tags(RoleName='my-role', MaxItems=1) assert len(tags['Tags']) == 1 assert tags['IsTruncated'] assert tags['Tags'][0]['Key'] == 'somekey' assert tags['Tags'][0]['Value'] == 'somevalue' assert tags['Marker'] == '1' tags = conn.list_role_tags(RoleName='my-role', Marker=tags['Marker']) assert len(tags['Tags']) == 1 assert tags['Tags'][0]['Key'] == 'someotherkey' assert tags['Tags'][0]['Value'] == 'someothervalue' assert not tags['IsTruncated'] assert not tags.get('Marker') # Test updating an existing tag: conn.tag_role(RoleName='my-role', Tags=[ { 'Key': 'somekey', 'Value': 'somenewvalue' } ]) tags = conn.list_role_tags(RoleName='my-role') assert len(tags['Tags']) == 2 assert tags['Tags'][0]['Key'] == 'somekey' assert tags['Tags'][0]['Value'] == 'somenewvalue' # Empty is good: conn.tag_role(RoleName='my-role', Tags=[ { 'Key': 'somekey', 'Value': '' } ]) tags = conn.list_role_tags(RoleName='my-role') assert len(tags['Tags']) == 2 assert tags['Tags'][0]['Key'] == 'somekey' assert tags['Tags'][0]['Value'] == '' # Test creating tags with invalid values: # With more than 50 tags: with assert_raises(ClientError) as ce: too_many_tags = list(map(lambda x: {'Key': str(x), 'Value': str(x)}, range(0, 51))) conn.tag_role(RoleName='my-role', Tags=too_many_tags) assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \ in ce.exception.response['Error']['Message'] # With a duplicate tag: with assert_raises(ClientError) as ce: conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': ''}, {'Key': '0', 'Value': ''}]) assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \ in ce.exception.response['Error']['Message'] # Duplicate tag with different casing: with assert_raises(ClientError) as ce: conn.tag_role(RoleName='my-role', Tags=[{'Key': 'a', 'Value': ''}, {'Key': 'A', 'Value': ''}]) assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \ in ce.exception.response['Error']['Message'] # With a really big key: with assert_raises(ClientError) as ce: conn.tag_role(RoleName='my-role', Tags=[{'Key': '0' * 129, 'Value': ''}]) assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message'] # With a really big value: with assert_raises(ClientError) as ce: conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': '0' * 257}]) assert 'Member must have length less than or equal to 256.' in ce.exception.response['Error']['Message'] # With an invalid character: with assert_raises(ClientError) as ce: conn.tag_role(RoleName='my-role', Tags=[{'Key': 'NOWAY!', 'Value': ''}]) assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \ in ce.exception.response['Error']['Message'] # With a role that doesn't exist: with assert_raises(ClientError): conn.tag_role(RoleName='notarole', Tags=[{'Key': 'some', 'Value': 'value'}]) @mock_iam def test_untag_role(): conn = boto3.client('iam', region_name='us-east-1') conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}") # With proper tag values: conn.tag_role(RoleName='my-role', Tags=[ { 'Key': 'somekey', 'Value': 'somevalue' }, { 'Key': 'someotherkey', 'Value': 'someothervalue' } ]) # Remove them: conn.untag_role(RoleName='my-role', TagKeys=['somekey']) tags = conn.list_role_tags(RoleName='my-role') assert len(tags['Tags']) == 1 assert tags['Tags'][0]['Key'] == 'someotherkey' assert tags['Tags'][0]['Value'] == 'someothervalue' # And again: conn.untag_role(RoleName='my-role', TagKeys=['someotherkey']) tags = conn.list_role_tags(RoleName='my-role') assert not tags['Tags'] # Test removing tags with invalid values: # With more than 50 tags: with assert_raises(ClientError) as ce: conn.untag_role(RoleName='my-role', TagKeys=[str(x) for x in range(0, 51)]) assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \ in ce.exception.response['Error']['Message'] assert 'tagKeys' in ce.exception.response['Error']['Message'] # With a really big key: with assert_raises(ClientError) as ce: conn.untag_role(RoleName='my-role', TagKeys=['0' * 129]) assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message'] assert 'tagKeys' in ce.exception.response['Error']['Message'] # With an invalid character: with assert_raises(ClientError) as ce: conn.untag_role(RoleName='my-role', TagKeys=['NOWAY!']) assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \ in ce.exception.response['Error']['Message'] assert 'tagKeys' in ce.exception.response['Error']['Message'] # With a role that doesn't exist: with assert_raises(ClientError): conn.untag_role(RoleName='notarole', TagKeys=['somevalue']) @mock_iam() def test_update_role_description(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") response = conn.update_role_description(RoleName="my-role", Description="test") assert response['Role']['RoleName'] == 'my-role' @mock_iam() def test_update_role(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") response = conn.update_role_description(RoleName="my-role", Description="test") assert response['Role']['RoleName'] == 'my-role' @mock_iam() def test_update_role(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") response = conn.update_role(RoleName="my-role", Description="test") assert len(response.keys()) == 1 @mock_iam() def test_list_entities_for_policy(): test_policy = json.dumps({ "Version": "2012-10-17", "Statement": [ { "Action": "s3:ListBucket", "Resource": "*", "Effect": "Allow", } ] }) conn = boto3.client('iam', region_name='us-east-1') conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") conn.create_user(Path='/', UserName='testUser') conn.create_group(Path='/', GroupName='testGroup') conn.create_policy( PolicyName='testPolicy', Path='/', PolicyDocument=test_policy, Description='Test Policy' ) # Attach things to the user and group: conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy) conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy) conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') conn.add_user_to_group(UserName='testUser', GroupName='testGroup') # Add things to the role: conn.create_instance_profile(InstanceProfileName='ipn') conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role') conn.tag_role(RoleName='my-role', Tags=[ { 'Key': 'somekey', 'Value': 'somevalue' }, { 'Key': 'someotherkey', 'Value': 'someothervalue' } ]) conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy) conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') response = conn.list_entities_for_policy( PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', EntityFilter='Role' ) assert response['PolicyRoles'] == [{'RoleName': 'my-role'}] response = conn.list_entities_for_policy( PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', EntityFilter='User', ) assert response['PolicyUsers'] == [{'UserName': 'testUser'}] response = conn.list_entities_for_policy( PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', EntityFilter='Group', ) assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}] response = conn.list_entities_for_policy( PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', EntityFilter='LocalManagedPolicy', ) assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}] assert response['PolicyUsers'] == [{'UserName': 'testUser'}] assert response['PolicyRoles'] == [{'RoleName': 'my-role'}] @mock_iam() def test_create_role_no_path(): conn = boto3.client('iam', region_name='us-east-1') resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test') resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role') resp.get('Role').should_not.have.key('PermissionsBoundary') @mock_iam() def test_create_role_with_permissions_boundary(): conn = boto3.client('iam', region_name='us-east-1') boundary = 'arn:aws:iam::123456789012:policy/boundary' resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=boundary) expected = { 'PermissionsBoundaryType': 'PermissionsBoundaryPolicy', 'PermissionsBoundaryArn': boundary } resp.get('Role').get('PermissionsBoundary').should.equal(expected) invalid_boundary_arn = 'arn:aws:iam::123456789:not_a_boundary' with assert_raises(ClientError): conn.create_role(RoleName='bad-boundary', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=invalid_boundary_arn) # Ensure the PermissionsBoundary is included in role listing as well conn.list_roles().get('Roles')[0].get('PermissionsBoundary').should.equal(expected)
{ "content_hash": "94764223cae018464d847f69456939fe", "timestamp": "", "source": "github", "line_count": 1463, "max_line_length": 152, "avg_line_length": 38.05604921394395, "alnum_prop": 0.6651339895107408, "repo_name": "whummer/moto", "id": "e7507e2e52027f381b90b4702b8f13411857f957", "size": "55676", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_iam/test_iam.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "443" }, { "name": "HTML", "bytes": "5848" }, { "name": "Java", "bytes": "1688" }, { "name": "JavaScript", "bytes": "756" }, { "name": "Makefile", "bytes": "1148" }, { "name": "Python", "bytes": "6015085" }, { "name": "Ruby", "bytes": "188" }, { "name": "Scala", "bytes": "782" }, { "name": "Shell", "bytes": "797" } ], "symlink_target": "" }
from setuptools import setup setup( name='armstrong.apps.couchdb', version='0.1.1', description='Provides a few generic views for wrapping calls to CouchDB', author='Texas Tribune', author_email='tech@texastribune.org', url='http://github.com/texastribune/armstrong.apps.couchdb/', packages=[ 'armstrong', 'armstrong.apps', 'armstrong.apps.couchdb', ], namespace_packages=[ "armstrong", ], install_requires=[ 'setuptools', ], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', ], )
{ "content_hash": "85dc7b97097bbc3f155d424fc9b63afb", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 77, "avg_line_length": 27.096774193548388, "alnum_prop": 0.5976190476190476, "repo_name": "texastribune/armstrong.apps.couchdb", "id": "dacd176b72a57f4681e3ad383501b44ce9e9f0b2", "size": "840", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "10981" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip) __version__ = "1.2.1" import sys if sys.version_info[0] == 2: from .py2parser import (FoostacheLexer, FoostacheParser, FoostacheParserListener, FoostacheParserVisitor) elif sys.version_info[0] == 3: from .py3parser import (FoostacheLexer, FoostacheParser, FoostacheParserListener, FoostacheParserVisitor) else: raise RuntimeError("Unhandled Python version.") from .template import Template
{ "content_hash": "0ed13db137a6ae53ccf0beaadf50e33a", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 129, "avg_line_length": 41.4, "alnum_prop": 0.7536231884057971, "repo_name": "ldgabbay/foostache-python", "id": "ec47144085baaafb429b726a7c153b257bbedfbb", "size": "621", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "src/foostache/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2910" }, { "name": "Python", "bytes": "22202" } ], "symlink_target": "" }
from django import forms #cuz it is a package import datetime from lessons.models import Lesson, Assessment # lolling about the difference between ModelForm and Form class LessonAddForm(forms.Form): name = forms.CharField() url = forms.URLField() # IT BETTER VALIDATE IT choices = ( (1,'Yes'), (0,'Not yet'), ) # I HATE CHOICE FIELDS class PromiseForm(forms.Form): done = forms.TypedChoiceField(choices=choices, widget=forms.RadioSelect, coerce=int, label="I did it!") # I HATE CHOICE FIELDS class PromiseMakeForm(forms.Form): who = forms.CharField() when = forms.DateField() made_by = forms.IntegerField() #lesson = forms.ChoiceField() # http://stackoverflow.com/questions/3419997/creating-a-dynamic-choice-field def __init__(self, *args, **kwargs): super(PromiseMakeForm, self).__init__(*args, **kwargs) self.fields['lesson'] = forms.ChoiceField(choices=[ (o.id, str(o)) for o in Lesson.objects.all()]) self.fields['assessment'] = forms.ChoiceField(choices=[ (o.id, str(o)) for o in Assessment.objects.all()]) class AssessmentForm(forms.Form): post = forms.CharField(widget=forms.widgets.Textarea()) class AssessmentAddForm(forms.Form): question = forms.CharField(max_length=200)
{ "content_hash": "1381e3254f707515124c011e218f2e3a", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 114, "avg_line_length": 35.44444444444444, "alnum_prop": 0.6943573667711599, "repo_name": "selenamarie/teach-today", "id": "b84f3642c6b332227aa0ebc6ba5abd178d4f9bad", "size": "1276", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lessons/forms.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "23705" } ], "symlink_target": "" }
import sys import nicknamer IS_PY2 = sys.version_info[0] == 2 if IS_PY2: def dict_iteritems(d): return d.iteritems() def dict_iterkeys(d): return d.iterkeys() else: def dict_iteritems(d): return iter(d.items()) def dict_iterkeys(d): return iter(d.keys()) class Common(): # analysis_level_ludicrous # Adverbl tries too hard to cross reference data # Use these switchs to turn some of the biggest offenders off per_link_detail = True message_progress_tables = False # returned from argparse.parse_args() args = None # first letter of the connection names log_char_base = 'A' # number of logs processed n_logs = 0 # array of file name strings from command line # len=n_logs log_fns = [] # discovered router container names # len=n_logs router_ids = [] # raw long names # router display names shortened with popups router_display_names = [] # router modes in plain text router_modes = [] # list of router-instance lists # [[A0, A1], [B0], [C0, C1, C2]] routers = [] # ordered list of connection names across all routers all_conn_names = [] # conn_details_map - # key=conn_id, val=ConnectionDetail for that connection conn_details_map = {} # mapping of connected routers by connection id # A0_1 is connected to B3_2 # key = full conn_id 'A0_5' # val = full conn_id 'B0_8' # note names[key]=val and names[val]=key mutual reference conn_peers_connid = {} # short display name for peer indexed by connection id # A0_1 maps to B's container_name nickname conn_peers_display = {} # conn_to_frame_map - global list for easier iteration in main # key = conn_id full A0_3 # val = list of plf lines conn_to_frame_map = {} shorteners = nicknamer.Shorteners() # when --no-data is in effect, how many log lines were skipped? data_skipped = 0 # List of router log module names to include verbatim. # Defaults to "SCRAPER". Overridden by command line. verbatim_include_list = ["SCRAPER"] def router_id_index(self, id): """ Given a router full container name, return the index in router_ids table Throw value error if not found :param id: :return: """ return self.router_ids.index(id) def module_key_in_line(self, key, line): ''' Sense if the key is a log module name in the log line. The name can't be too far into the string or else it finds false positives when a user uses qdstat to get a log file. MAX_POSITION defines what constitutes 'too far'. :param key: :param line: :return: ''' MAX_POSITION = 40 assert len(key) > 0 st = line.find(key) return st >= 0 and st <= MAX_POSITION def log_letter_of(idx): ''' Return the letter A, B, C, ... from the index 0..n :param idx: :return: A..Z ''' if idx >= 26: sys.exit('ERROR: too many log files') return "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[idx] def index_of_log_letter(letter): ''' Return the index 0..25 of the firster letter of the 'letter' string Raise error if out of range :param letter: :return: ''' val = "ABCDEFGHIJKLMNOPQRSTUVWXYZ".find(letter[0].upper()) if val < 0 or val > 25: raise ValueError("index_of_log_letter Invalid log letter: %s", letter) return val class RestartRec(): def __init__(self, _id, _router, _event, _datetime): self.id = _id self.router = _router self.event = _event self.datetime = _datetime def transfer_is_possibly_unsettled(plf): return (plf.data.transfer and not plf.data.transfer_more and not (plf.data.transfer_settled or plf.data.final_disposition is not None)) global_colors = { "errors": "yellow", "unsettled": "tomato", "presettled": "aqua", "accepted": "aquamarine", "rejected": "orange", # hard coded in resolve_settlement "released": "orange", "modified": "orange", "aborted": "crimson", "more": "chartreuse", "drain": "gold", "no_credit": "beige" } def color_of(obj_type): return global_colors.get(obj_type, "pink") html_escape_table = { "&": "&amp;", ">": "&gt;", "<": "&lt;", } def html_escape(text): return "".join(html_escape_table.get(c, c) for c in text) def strings_of_proton_log(text): ''' Given a transfer log text string like: "\x00SpE\x00Ss\xd0\x00\x00\x002\x00\x00\x00\x06@@@@\xa1$amqp:/_edge/EB1/temp.RkCWe_Is4jc3bcN\xa1\x0232\x00St\xd1\x00\x00\x00\x8c\x00\x00\x00\x0c\xa1\x04name\xa1\x04self\xa1\x04type\xa1\x13org.amqp.management\xa1\x09operation\xa1\x05QUERY\xa1\x0aentityType\xa1'org.apache.qpid.dispatch.router.address\xa1\x06offsetU\x00\xa1\x05count\x81\x00\x00\x00\x00\x00\x00\x01\xf4\x00Sw\xd1\x00\x00\x00Q\x00\x00\x00\x02\xa1\x0eattributeNames\xd0\x00\x00\x008\x00\x00\x00\x04\xa1\x04name\xa1\x0fsubscriberCount\xa1\x0bremoteCount\xa1\x0econtainerCount" return the strings thereof: "SpE Ss @@@@ $amqp:/_edge/EB1/temp.RkCWe_Is4jc3bcN name self type org.amqp.management operation QUERY entityType org.apache.qpid.dispatch.router.address offsetU count Sw attributeNames name subscriberCount remoteCount containerCount" The intended use for this is to help decode management and router frames in the transfer nickname dump. :param text: :return: strings embedded in text ''' r = "" # return result sstate = 0 # when a backslash is seen, skip this many more input chars skipping = False for elem in text: if sstate > 0: sstate -= 1 else: if elem == '\\': if not skipping: r += ' ' skipping = True sstate = 3 else: skipping = False r += elem return r def ls_eval(text): ''' Given a router_ls cost string like '{u'A': 1, u'C': 51L, u'B': 101L}', return a dictionary {A:1, C:51, B:101} This code replaces ast.literal_eval ''' result = {} text = text.strip(" {}") if len(text) > 0: items = text.split(', ') for item in items: kv = item.split(": ") key = kv[0].strip() if key.startswith("u'") or key.startswith('u"'): key = key[2:-1] elif key.startswith("'"): key = key[1:-1] val = kv[1].strip() if val.endswith("L"): val = val[:-1] result[key] = int(val) return result
{ "content_hash": "e72e9dd253c25da5820f287010148545", "timestamp": "", "source": "github", "line_count": 229, "max_line_length": 544, "avg_line_length": 29.283842794759824, "alnum_prop": 0.6039367730390695, "repo_name": "ganeshmurthy/qpid-dispatch", "id": "248e65fdeb64c5cf7ea403b4c4e4aaec9134f120", "size": "7556", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/scraper/common.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "2695814" }, { "name": "C++", "bytes": "359957" }, { "name": "CMake", "bytes": "54018" }, { "name": "CSS", "bytes": "49129" }, { "name": "Dockerfile", "bytes": "3230" }, { "name": "HTML", "bytes": "2320" }, { "name": "JavaScript", "bytes": "737682" }, { "name": "Objective-C", "bytes": "1976" }, { "name": "Python", "bytes": "2547017" }, { "name": "Shell", "bytes": "34107" } ], "symlink_target": "" }
"""Support for streamed reading and writing of multipart MIME content.""" from base64 import b64encode from cgi import parse_header try: from hashlib import md5 md5; # Pyflakes workaround except ImportError: from md5 import new as md5 import sys __all__ = ['read_multipart', 'write_multipart'] __docformat__ = 'restructuredtext en' CRLF = '\r\n' def read_multipart(fileobj, boundary=None): """Simple streaming MIME multipart parser. This function takes a file-like object reading a MIME envelope, and yields a ``(headers, is_multipart, payload)`` tuple for every part found, where ``headers`` is a dictionary containing the MIME headers of that part (with names lower-cased), ``is_multipart`` is a boolean indicating whether the part is itself multipart, and ``payload`` is either a string (if ``is_multipart`` is false), or an iterator over the nested parts. Note that the iterator produced for nested multipart payloads MUST be fully consumed, even if you wish to skip over the content. :param fileobj: a file-like object :param boundary: the part boundary string, will generally be determined automatically from the headers of the outermost multipart envelope :return: an iterator over the parts :since: 0.5 """ headers = {} buf = [] outer = in_headers = boundary is None next_boundary = boundary and '--' + boundary + '\n' or None last_boundary = boundary and '--' + boundary + '--\n' or None def _current_part(): payload = ''.join(buf) if payload.endswith('\r\n'): payload = payload[:-2] elif payload.endswith('\n'): payload = payload[:-1] content_md5 = headers.get('content-md5') if content_md5: h = b64encode(md5(payload).digest()) if content_md5 != h: raise ValueError('data integrity check failed') return headers, False, payload for line in fileobj: if in_headers: line = line.replace(CRLF, '\n') if line != '\n': name, value = line.split(':', 1) headers[name.lower().strip()] = value.strip() else: in_headers = False mimetype, params = parse_header(headers.get('content-type')) if mimetype.startswith('multipart/'): sub_boundary = params['boundary'] sub_parts = read_multipart(fileobj, boundary=sub_boundary) if boundary is not None: yield headers, True, sub_parts headers.clear() del buf[:] else: for part in sub_parts: yield part return elif line.replace(CRLF, '\n') == next_boundary: # We've reached the start of a new part, as indicated by the # boundary if headers: if not outer: yield _current_part() else: outer = False headers.clear() del buf[:] in_headers = True elif line.replace(CRLF, '\n') == last_boundary: # We're done with this multipart envelope break else: buf.append(line) if not outer and headers: yield _current_part() class MultipartWriter(object): def __init__(self, fileobj, headers=None, subtype='mixed', boundary=None): self.fileobj = fileobj if boundary is None: boundary = self._make_boundary() self.boundary = boundary if headers is None: headers = {} headers['Content-Type'] = 'multipart/%s; boundary="%s"' % ( subtype, self.boundary ) self._write_headers(headers) def open(self, headers=None, subtype='mixed', boundary=None): self.fileobj.write('--') self.fileobj.write(self.boundary) self.fileobj.write(CRLF) return MultipartWriter(self.fileobj, headers=headers, subtype=subtype, boundary=boundary) def add(self, mimetype, content, headers=None): self.fileobj.write('--') self.fileobj.write(self.boundary) self.fileobj.write(CRLF) if headers is None: headers = {} if isinstance(content, unicode): ctype, params = parse_header(mimetype) if 'charset' in params: content = content.encode(params['charset']) else: content = content.encode('utf-8') mimetype = mimetype + ';charset=utf-8' headers['Content-Type'] = mimetype if content: headers['Content-Length'] = str(len(content)) headers['Content-MD5'] = b64encode(md5(content).digest()) self._write_headers(headers) if content: # XXX: throw an exception if a boundary appears in the content?? self.fileobj.write(content) self.fileobj.write(CRLF) def close(self): self.fileobj.write('--') self.fileobj.write(self.boundary) self.fileobj.write('--') self.fileobj.write(CRLF) def _make_boundary(self): try: from uuid import uuid4 return '==' + uuid4().hex + '==' except ImportError: from random import randrange token = randrange(sys.maxint) format = '%%0%dd' % len(repr(sys.maxint - 1)) return '===============' + (format % token) + '==' def _write_headers(self, headers): if headers: for name in sorted(headers.keys()): self.fileobj.write(name) self.fileobj.write(': ') self.fileobj.write(headers[name]) self.fileobj.write(CRLF) self.fileobj.write(CRLF) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() def write_multipart(fileobj, subtype='mixed', boundary=None): r"""Simple streaming MIME multipart writer. This function returns a `MultipartWriter` object that has a few methods to control the nested MIME parts. For example, to write a flat multipart envelope you call the ``add(mimetype, content, [headers])`` method for every part, and finally call the ``close()`` method. >>> from StringIO import StringIO >>> buf = StringIO() >>> envelope = write_multipart(buf, boundary='==123456789==') >>> envelope.add('text/plain', 'Just testing') >>> envelope.close() >>> print buf.getvalue().replace('\r\n', '\n') Content-Type: multipart/mixed; boundary="==123456789==" <BLANKLINE> --==123456789== Content-Length: 12 Content-MD5: nHmX4a6el41B06x2uCpglQ== Content-Type: text/plain <BLANKLINE> Just testing --==123456789==-- <BLANKLINE> Note that an explicit boundary is only specified for testing purposes. If the `boundary` parameter is omitted, the multipart writer will generate a random string for the boundary. To write nested structures, call the ``open([headers])`` method on the respective envelope, and finish each envelope using the ``close()`` method: >>> buf = StringIO() >>> envelope = write_multipart(buf, boundary='==123456789==') >>> part = envelope.open(boundary='==abcdefghi==') >>> part.add('text/plain', 'Just testing') >>> part.close() >>> envelope.close() >>> print buf.getvalue().replace('\r\n', '\n') #:doctest +ELLIPSIS Content-Type: multipart/mixed; boundary="==123456789==" <BLANKLINE> --==123456789== Content-Type: multipart/mixed; boundary="==abcdefghi==" <BLANKLINE> --==abcdefghi== Content-Length: 12 Content-MD5: nHmX4a6el41B06x2uCpglQ== Content-Type: text/plain <BLANKLINE> Just testing --==abcdefghi==-- --==123456789==-- <BLANKLINE> :param fileobj: a writable file-like object that the output should get written to :param subtype: the subtype of the multipart MIME type (e.g. "mixed") :param boundary: the boundary to use to separate the different parts :since: 0.6 """ return MultipartWriter(fileobj, subtype=subtype, boundary=boundary)
{ "content_hash": "ec8cf0af9919619f526ae3e9b21f004e", "timestamp": "", "source": "github", "line_count": 240, "max_line_length": 79, "avg_line_length": 35.2125, "alnum_prop": 0.5792213939178795, "repo_name": "couchbaselabs/priority15", "id": "a46f4a2f26a13d2ec774fa83857900bf0f793520", "size": "8671", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "lib/couchdb/multipart.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "103452" }, { "name": "JavaScript", "bytes": "706877" }, { "name": "Python", "bytes": "1333744" } ], "symlink_target": "" }
from oscar.apps.address.abstract_models import AbstractPartnerAddress from oscar_vat_moss import fields class PartnerAddress(AbstractPartnerAddress): vatin = fields.vatin() from oscar.apps.partner.models import * # noqa
{ "content_hash": "f623ff8a893496c5fcb26e9aba9ebc2f", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 69, "avg_line_length": 21, "alnum_prop": 0.7922077922077922, "repo_name": "fghaas/django-oscar-vat_moss", "id": "d500671f7c9644b18c0169cc1d5da507fdab3a02", "size": "231", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "oscar_vat_moss/partner/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "99190" }, { "name": "Shell", "bytes": "104" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, schema, scope, workspace from caffe2.python.layers.layers import ( ModelLayer, ) import caffe2.proto.caffe2_pb2 as caffe2_pb2 import numpy as np import six import logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class Functional(ModelLayer): def __init__(self, model, input_record, output_names_or_num, function, name='functional', output_dtypes=None, **kwargs): # allow coercion input_record = schema.as_record(input_record) super(Functional, self).__init__(model, name, input_record, **kwargs) self._function = function self._kwargs = kwargs return_struct = ( isinstance(output_names_or_num, list) or (isinstance(output_names_or_num, six.integer_types) and output_names_or_num != 1) ) with scope.NameScope(self.name, reset=True): if isinstance(output_names_or_num, int): struct_output_schema = schema.NewRecord( model.net, schema.RawTuple(output_names_or_num)) elif isinstance(output_names_or_num, schema.Field): self.output_schema = output_names_or_num.clone(keep_blobs=True) return else: if not isinstance(output_names_or_num, list): output_names_or_num = [output_names_or_num] out_tuple = [(out, np.void) for out in output_names_or_num] struct_output_schema = schema.NewRecord( model.net, schema.Struct(*out_tuple)) num_outputs = len(struct_output_schema.field_blobs()) # functional layer returns Struct if more than one outputs or output is # a list, otherwise Scalar if return_struct: self.output_schema = struct_output_schema else: self.output_schema = struct_output_schema[0] # If output_dtypes is provided, use it for output schema. Otherwise # the shape and type will be inferred. if output_dtypes is not None: if not isinstance(output_dtypes, list): output_dtypes = [output_dtypes] * num_outputs assert len(output_dtypes) == num_outputs for dtype, scalar in zip(output_dtypes, self.output_schema.all_scalars()): scalar.set_type(dtype) return # Fake execution of the function to infer shapes and types automatically had_issues = False try: type_net = core.Net('_temp_type_and_shape_inference_net') schema.InitEmptyRecord(type_net, input_record, enforce_types=True) function(type_net, self.input_record, self.output_schema, **kwargs) (shapes, types) = workspace.InferShapesAndTypes([type_net], {}) for i in range(num_outputs): scalar_schema = (self.output_schema[i] if return_struct else self.output_schema) blob = scalar_schema() if blob not in types or blob not in shapes: had_issues = True continue if shapes[blob] == []: # Scalar type shape = tuple() elif shapes[blob][0] == 0: shape = tuple(shapes[blob][1:]) else: logger.warning("unexpeced shape: {}".format(shapes[blob])) # If batch dimension is not first - give up on shape # inference for that blob had_issues = True continue # TODO(amalevich): Move it to some shared library dtype = None if types[blob] == caffe2_pb2.TensorProto.DOUBLE: dtype = (np.float64, shape) elif types[blob] == caffe2_pb2.TensorProto.FLOAT: dtype = (np.float32, shape) elif types[blob] == caffe2_pb2.TensorProto.INT32: dtype = (np.int32, shape) elif types[blob] == caffe2_pb2.TensorProto.INT64: dtype = (np.int64, shape) if dtype is not None: scalar_schema.set_type(dtype) except TypeError as ex: had_issues = True logger.warning(str(ex)) if had_issues: logger.warning( "Type inference had problems for layer: {}".format(self.name)) def add_ops(self, net): self._function( net, self.input_record, self.output_schema, **(self._kwargs))
{ "content_hash": "0f4db279c5d2433df7d870173d47d7f5", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 80, "avg_line_length": 40.358333333333334, "alnum_prop": 0.5572991947140202, "repo_name": "pietern/caffe2", "id": "7a860d25c82fa6b071a0aa5dda9ebd6c3b6bd5ee", "size": "5577", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "caffe2/python/layers/functional.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "5415" }, { "name": "C", "bytes": "316608" }, { "name": "C++", "bytes": "4743501" }, { "name": "CMake", "bytes": "139649" }, { "name": "CSS", "bytes": "2196" }, { "name": "Cuda", "bytes": "671183" }, { "name": "HTML", "bytes": "5203" }, { "name": "Makefile", "bytes": "1225" }, { "name": "Metal", "bytes": "36752" }, { "name": "Objective-C", "bytes": "6505" }, { "name": "Objective-C++", "bytes": "239139" }, { "name": "Python", "bytes": "2902249" }, { "name": "Shell", "bytes": "31734" } ], "symlink_target": "" }
import os import logging import subprocess def spawn(args, **kwargs): """Spawn a subprocess and return it back """ if 'cwd' not in kwargs: kwargs['cwd'] = os.path.dirname(os.path.abspath(__file__)) kwargs['bufsize'] = -1 if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW kwargs['startupinfo'] = startupinfo try: return subprocess.Popen(args, **kwargs) except: logging.error( 'Your operating system denied the spawn of {} process'.format( args[0]) )
{ "content_hash": "5a00022b11a6161274c5e13c2817af5c", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 74, "avg_line_length": 25.2, "alnum_prop": 0.5984126984126984, "repo_name": "prisis/sublime-text-packages", "id": "667c6801d2c33e59ed4eef1e0d30c56d11e3f9f3", "size": "759", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "Packages/Anaconda/anaconda_server/process.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "318" }, { "name": "Batchfile", "bytes": "786" }, { "name": "C++", "bytes": "56562" }, { "name": "CSS", "bytes": "18339" }, { "name": "HTML", "bytes": "1757" }, { "name": "JavaScript", "bytes": "206342" }, { "name": "PHP", "bytes": "2193174" }, { "name": "Pascal", "bytes": "7460" }, { "name": "PowerShell", "bytes": "397" }, { "name": "Python", "bytes": "19331281" }, { "name": "Shell", "bytes": "1903" }, { "name": "Smarty", "bytes": "4883" }, { "name": "SourcePawn", "bytes": "4479" }, { "name": "Tcl", "bytes": "88877" } ], "symlink_target": "" }
import csv import time from controller.CouponMaster import CouponMaster from controller.UserMaster import UserMaster from model.User import User __author__ = "Ivon Liu" # How to print unicode properly # print(str(row).encode('ascii', 'ignore')) print("Hello world") # Read user_list UserMaster.load_from_csv() # print("\nPrinting from User object now") # for row in range(0, len(UserMaster.users)): # print(str(UserMaster.users[row])) # Read coupon_list_train CouponMaster.load_from_csv() # print("\n Printing from Coupon object now") # for row in range(0, len(CouponMaster.coupons)): # print(str(CouponMaster.coupons[row]))
{ "content_hash": "015821d75c7e87356f23ddbfabcb17df", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 49, "avg_line_length": 25.56, "alnum_prop": 0.7355242566510172, "repo_name": "AJLiu/SPCSAI-CouponPurchasePrediction", "id": "4de20d08f127527d1d60a48bef06727b18da4b6e", "size": "639", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main/ReadCSV.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "14742" } ], "symlink_target": "" }
"""RAND Health Insurance Experiment Data""" from statsmodels.datasets import utils as du __docformat__ = 'restructuredtext' COPYRIGHT = """This is in the public domain.""" TITLE = __doc__ SOURCE = """ The data was collected by the RAND corporation as part of the Health Insurance Experiment (HIE). http://www.rand.org/health/projects/hie.html This data was used in:: Cameron, A.C. amd Trivedi, P.K. 2005. `Microeconometrics: Methods and Applications,` Cambridge: New York. And was obtained from: <http://cameron.econ.ucdavis.edu/mmabook/mmadata.html> See randhie/src for the original data and description. The data included here contains only a subset of the original data. The data varies slightly compared to that reported in Cameron and Trivedi. """ DESCRSHORT = """The RAND Co. Health Insurance Experiment Data""" DESCRLONG = """""" NOTE = """:: Number of observations - 20,190 Number of variables - 10 Variable name definitions:: mdvis - Number of outpatient visits to an MD lncoins - ln(coinsurance + 1), 0 <= coninsurance <= 100 idp - 1 if individual deductible plan, 0 otherwise lpi - ln(max(1, annual participation incentive payment)) fmde - 0 if idp = 1; ln(max(1, MDE/(0.01 coinsurance))) otherwise physlm - 1 if the person has a physical limitation disea - number of chronic diseases hlthg - 1 if self-rated health is good hlthf - 1 if self-rated health is fair hlthp - 1 if self-rated health is poor (Omitted category is excellent self-rated health) """ def load(): """ Loads the RAND HIE data and returns a Dataset class. Returns ------- Dataset See DATASET_PROPOSAL.txt for more information. Notes ----- endog - response variable, mdvis exog - design """ return load_pandas() def load_pandas(): """ Loads the RAND HIE data and returns a Dataset class. Returns ------- Dataset See DATASET_PROPOSAL.txt for more information. Notes ----- endog - response variable, mdvis exog - design """ return du.process_pandas(_get_data(), endog_idx=0) def _get_data(): return du.load_csv(__file__, 'randhie.csv')
{ "content_hash": "3808ecfbb9cf04ab833df6451dbff329", "timestamp": "", "source": "github", "line_count": 85, "max_line_length": 77, "avg_line_length": 27.094117647058823, "alnum_prop": 0.6435084672166739, "repo_name": "josef-pkt/statsmodels", "id": "a82c4679ac1a5410755c105f5db173f64dcc8c77", "size": "2303", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "statsmodels/datasets/randhie/data.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AGS Script", "bytes": "457842" }, { "name": "Assembly", "bytes": "10035" }, { "name": "Batchfile", "bytes": "625" }, { "name": "C", "bytes": "381" }, { "name": "Cython", "bytes": "225838" }, { "name": "Fortran", "bytes": "16671" }, { "name": "HTML", "bytes": "148470" }, { "name": "MATLAB", "bytes": "100525" }, { "name": "Python", "bytes": "14428857" }, { "name": "R", "bytes": "106569" }, { "name": "Shell", "bytes": "25322" }, { "name": "Stata", "bytes": "50129" } ], "symlink_target": "" }
"""NVP Plugin exceptions""" from neutron.common import exceptions as q_exc class NvpPluginException(q_exc.NeutronException): message = _("An unexpected error occurred in the NVP Plugin:%(err_msg)s") class NvpInvalidVersion(NvpPluginException): message = _("Unable to fulfill request with version %(version)s.") class NvpInvalidConnection(NvpPluginException): message = _("Invalid NVP connection parameters: %(conn_params)s") class NvpInvalidClusterConfiguration(NvpPluginException): message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure " "that these values are specified in the [DEFAULT] " "section of the nvp plugin ini file.") class NvpInvalidNovaZone(NvpPluginException): message = _("Unable to find cluster config entry " "for nova zone: %(nova_zone)s") class NvpNoMorePortsException(NvpPluginException): message = _("Unable to create port on network %(network)s. " "Maximum number of ports reached") class NvpNatRuleMismatch(NvpPluginException): message = _("While retrieving NAT rules, %(actual_rules)s were found " "whereas rules in the (%(min_rules)s,%(max_rules)s) interval " "were expected") class NvpInvalidAttachmentType(NvpPluginException): message = _("Invalid NVP attachment type '%(attachment_type)s'") class MaintenanceInProgress(NvpPluginException): message = _("The networking backend is currently in maintenance mode and " "therefore unable to accept requests which modify its state. " "Please try later.") class NvpServicePluginException(q_exc.NeutronException): """NVP Service Plugin exceptions.""" message = _("An unexpected error happened " "in the NVP Service Plugin: %(err_msg)s") class NvpServiceOverQuota(q_exc.Conflict): message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s") class NvpVcnsDriverException(NvpServicePluginException): message = _("Error happened in NVP VCNS Driver: %(err_msg)s")
{ "content_hash": "8266a09f32400d8355eaa523fafc6b8a", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 78, "avg_line_length": 34.08196721311475, "alnum_prop": 0.6916786916786917, "repo_name": "netscaler/neutron", "id": "e26a49acabaaab757883a9cdf46042eeba9277f7", "size": "2760", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "neutron/plugins/nicira/common/exceptions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "37307" }, { "name": "JavaScript", "bytes": "67928" }, { "name": "Python", "bytes": "6924102" }, { "name": "Shell", "bytes": "8983" }, { "name": "XSLT", "bytes": "50907" } ], "symlink_target": "" }
from __future__ import unicode_literals, division, absolute_import, print_function import unittest import sys import os from asn1crypto import pem, algos, keys, core from oscrypto import asymmetric, errors, backend from ._unittest_compat import patch patch() if sys.version_info < (3,): byte_cls = str int_types = (int, long) # noqa else: byte_cls = bytes int_types = (int,) _backend = backend() if _backend == 'openssl': from oscrypto._openssl._libcrypto import libcrypto_version_info openssl_098 = libcrypto_version_info < (1, 0, 0) else: openssl_098 = False tests_root = os.path.dirname(__file__) fixtures_dir = os.path.join(tests_root, 'fixtures') def _win_version_pair(): ver_info = sys.getwindowsversion() return (ver_info[0], ver_info[1]) def _should_support_sha2(): if _backend == 'mac': return False if _backend == 'winlegacy': return False if _backend == 'win' and _win_version_pair() < (6, 2): return False if openssl_098: return False return True class AsymmetricTests(unittest.TestCase): def test_load_incomplete_dsa_cert(self): with self.assertRaises(errors.IncompleteAsymmetricKeyError): asymmetric.load_public_key(os.path.join(fixtures_dir, 'DSAParametersInheritedCACert.crt')) def test_cert_attributes(self): cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'keys/test.crt')) self.assertEqual(2048, cert.bit_size) self.assertEqual(256, cert.byte_size) self.assertEqual('rsa', cert.algorithm) def test_public_key_attributes(self): pub_key = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-rsa.key')) self.assertEqual(2048, pub_key.bit_size) self.assertEqual(256, pub_key.byte_size) self.assertEqual('rsa', pub_key.algorithm) def test_private_key_attributes(self): private_key = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) self.assertEqual(2048, private_key.bit_size) self.assertEqual(256, private_key.byte_size) self.assertEqual('rsa', private_key.algorithm) def test_cert_ec_attributes(self): cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'keys/test-ec-named.crt')) self.assertEqual(256, cert.bit_size) self.assertEqual(32, cert.byte_size) self.assertEqual('secp256r1', cert.curve) self.assertEqual('ec', cert.algorithm) def test_public_key_ec_attributes(self): pub_key = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-ec-named.key')) self.assertEqual(256, pub_key.bit_size) self.assertEqual(32, pub_key.byte_size) self.assertEqual('secp256r1', pub_key.curve) self.assertEqual('ec', pub_key.algorithm) def test_private_key_ec_attributes(self): private_key = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key')) self.assertEqual(256, private_key.bit_size) self.assertEqual(32, private_key.byte_size) self.assertEqual('secp256r1', private_key.curve) self.assertEqual('ec', private_key.algorithm) def test_dump_public(self): public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) pem_serialized = asymmetric.dump_public_key(public) public_reloaded = asymmetric.load_public_key(pem_serialized) self.assertIsInstance(public_reloaded, asymmetric.PublicKey) self.assertEqual('rsa', public_reloaded.algorithm) def test_dump_certificate(self): cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'keys/test.crt')) pem_serialized = asymmetric.dump_certificate(cert) cert_reloaded = asymmetric.load_certificate(pem_serialized) self.assertIsInstance(cert_reloaded, asymmetric.Certificate) self.assertEqual('rsa', cert_reloaded.algorithm) def test_dump_private(self): def do_run(): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) for password in [None, 'password123']: pem_serialized = asymmetric.dump_private_key(private, password, target_ms=20) private_reloaded = asymmetric.load_private_key(pem_serialized, password) self.assertTrue(pem.detect(pem_serialized)) self.assertIsInstance(private_reloaded, asymmetric.PrivateKey) self.assertEqual('rsa', private_reloaded.algorithm) # OpenSSL 0.9.8 and Windows CryptoAPI don't have PBKDF2 implemented in # C, thus the dump operation fails since there is no reasonable way to # ensure we are using a good number of iterations of PBKDF2 if openssl_098 or _backend == 'winlegacy': with self.assertRaises(OSError): do_run() else: do_run() def test_dump_private_openssl(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) pem_serialized = asymmetric.dump_openssl_private_key(private, 'password123') private_reloaded = asymmetric.load_private_key(pem_serialized, 'password123') self.assertIsInstance(private_reloaded, asymmetric.PrivateKey) self.assertEqual('rsa', private_reloaded.algorithm) def test_load_rsa_pss_cert(self): cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'keys/test-pss.crt')) self.assertEqual('rsassa_pss', cert.algorithm) self.assertEqual(2048, cert.bit_size) def test_dh_generate(self): dh_parameters = asymmetric.generate_dh_parameters(512) self.assertIsInstance(dh_parameters, algos.DHParameters) self.assertIsInstance(dh_parameters['p'].native, int_types) self.assertIsInstance(dh_parameters['g'].native, int_types) self.assertEqual(2, dh_parameters['g'].native) def test_rsa_generate(self): public, private = asymmetric.generate_pair('rsa', bit_size=2048) self.assertEqual('rsa', public.algorithm) self.assertEqual(2048, public.bit_size) original_data = b'This is data to sign' signature = asymmetric.rsa_pkcs1v15_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1') raw_public = asymmetric.dump_public_key(public) asymmetric.load_public_key(raw_public) raw_private = asymmetric.dump_private_key(private, None) asymmetric.load_private_key(raw_private, None) self.assertIsInstance(private.fingerprint, byte_cls) self.assertIsInstance(public.fingerprint, byte_cls) self.assertEqual(private.fingerprint, public.fingerprint) def test_dsa_generate(self): public, private = asymmetric.generate_pair('dsa', bit_size=1024) self.assertEqual('dsa', public.algorithm) self.assertEqual(1024, public.bit_size) original_data = b'This is data to sign' signature = asymmetric.dsa_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.dsa_verify(public, signature, original_data, 'sha1') raw_public = asymmetric.dump_public_key(public) asymmetric.load_public_key(raw_public) raw_private = asymmetric.dump_private_key(private, None) asymmetric.load_private_key(raw_private, None) self.assertIsInstance(private.fingerprint, byte_cls) self.assertIsInstance(public.fingerprint, byte_cls) self.assertEqual(private.fingerprint, public.fingerprint) def test_ec_generate(self): public, private = asymmetric.generate_pair('ec', curve='secp256r1') self.assertEqual('ec', public.algorithm) self.assertEqual('secp256r1', public.asn1.curve[1]) original_data = b'This is data to sign' signature = asymmetric.ecdsa_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.ecdsa_verify(public, signature, original_data, 'sha1') raw_public = asymmetric.dump_public_key(public) asymmetric.load_public_key(raw_public) raw_private = asymmetric.dump_private_key(private, None) asymmetric.load_private_key(raw_private, None) self.assertIsInstance(private.fingerprint, byte_cls) self.assertIsInstance(public.fingerprint, byte_cls) self.assertEqual(private.fingerprint, public.fingerprint) def test_rsa_verify(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1') def test_rsa_verify_key_size_mismatch(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-4096.crt')) with self.assertRaises(errors.SignatureError): asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1') def test_rsa_verify_fail(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) with self.assertRaises(errors.SignatureError): asymmetric.rsa_pkcs1v15_verify(public, signature, original_data + b'1', 'sha1') def test_rsa_verify_fail_each_byte(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f: original_signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) for i in range(0, len(original_signature)): if i == 0: signature = b'\xab' + original_signature[1:] elif i == len(original_signature) - 1: signature = original_signature[0:-1] + b'\xab' else: signature = original_signature[0:i] + b'\xab' + original_signature[i + 1:] with self.assertRaises(errors.SignatureError): asymmetric.rsa_pkcs1v15_verify(public, signature, original_data + b'1', 'sha1') def test_rsa_pss_verify(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_pss_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) asymmetric.rsa_pss_verify(public, signature, original_data, 'sha1') def test_rsa_pss_verify_pss_cert(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_pss_signature_pss_cert'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-pss.crt')) asymmetric.rsa_pss_verify(public, signature, original_data, 'sha256') def test_rsa_pss_verify_fail(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_pss_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) with self.assertRaises(errors.SignatureError): asymmetric.rsa_pss_verify(public, signature, original_data + b'1', 'sha1') def test_rsa_pss_verify_pss_cert_fail(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_pss_signature_pss_cert'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-pss.crt')) with self.assertRaises(errors.SignatureError): asymmetric.rsa_pss_verify(public, signature, original_data + b'1', 'sha256') def test_rsa_raw_verify(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_signature_raw'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'raw') def test_rsa_raw_verify_fail(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'rsa_signature_raw'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) with self.assertRaises(errors.SignatureError): asymmetric.rsa_pkcs1v15_verify(public, signature, original_data + b'1', 'raw') def test_dsa_verify(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt')) asymmetric.dsa_verify(public, signature, original_data, 'sha1') def test_dsa_verify_key_size_mismatch(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-512.crt')) with self.assertRaises(errors.SignatureError): asymmetric.dsa_verify(public, signature, original_data, 'sha1') def test_dsa_verify_fail(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt')) with self.assertRaises(errors.SignatureError): asymmetric.dsa_verify(public, signature, original_data + b'1', 'sha1') def test_dsa_verify_fail_each_byte(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f: original_signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt')) for i in range(0, len(original_signature)): if i == 0: signature = b'\xab' + original_signature[1:] elif i == len(original_signature) - 1: signature = original_signature[0:-1] + b'\xab' else: signature = original_signature[0:i] + b'\xab' + original_signature[i+1:] with self.assertRaises(errors.SignatureError): asymmetric.dsa_verify(public, signature, original_data + b'1', 'sha1') def test_ecdsa_verify(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'ecdsa_signature'), 'rb') as f: signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-ec-named.key')) asymmetric.ecdsa_verify(public, signature, original_data, 'sha1') def test_ecdsa_verify_fail_each_byte(self): with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f: original_data = f.read() with open(os.path.join(fixtures_dir, 'ecdsa_signature'), 'rb') as f: original_signature = f.read() public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-ec-named.key')) for i in range(0, len(original_signature)): if i == 0: signature = b'\xab' + original_signature[1:] elif i == len(original_signature) - 1: signature = original_signature[0:-1] + b'\xab' else: signature = original_signature[0:i] + b'\xab' + original_signature[i+1:] with self.assertRaises(errors.SignatureError): asymmetric.ecdsa_verify(public, signature, original_data + b'1', 'sha1') def test_rsa_pkcs1v15_encrypt(self): original_data = b'This is data to encrypt' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) ciphertext = asymmetric.rsa_pkcs1v15_encrypt(public, original_data) self.assertIsInstance(ciphertext, byte_cls) plaintext = asymmetric.rsa_pkcs1v15_decrypt(private, ciphertext) self.assertEqual(original_data, plaintext) def test_rsa_oaep_encrypt(self): original_data = b'This is data to encrypt' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) ciphertext = asymmetric.rsa_oaep_encrypt(public, original_data) self.assertIsInstance(ciphertext, byte_cls) plaintext = asymmetric.rsa_oaep_decrypt(private, ciphertext) self.assertEqual(original_data, plaintext) def test_rsa_private_pkcs1v15_decrypt(self): original_data = b'This is the message to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) with open(os.path.join(fixtures_dir, 'rsa_public_encrypted'), 'rb') as f: plaintext = asymmetric.rsa_pkcs1v15_decrypt(private, f.read()) self.assertEqual(original_data, plaintext) def test_rsa_private_oaep_decrypt(self): original_data = b'This is the message to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) with open(os.path.join(fixtures_dir, 'rsa_public_encrypted_oaep'), 'rb') as f: plaintext = asymmetric.rsa_oaep_decrypt(private, f.read()) self.assertEqual(original_data, plaintext) def test_rsa_sign(self): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) signature = asymmetric.rsa_pkcs1v15_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1') def test_rsa_fingerprint(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) self.assertIsInstance(private.fingerprint, byte_cls) self.assertIsInstance(public.fingerprint, byte_cls) self.assertEqual(private.fingerprint, public.fingerprint) def test_rsa_public_key_attr(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) computed_public = private.public_key self.assertEqual(public.asn1.dump(), computed_public.asn1.dump()) def test_rsa_private_key_unwrap(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) self.assertIsInstance(private.unwrap(), keys.RSAPrivateKey) def test_rsa_public_key_unwrap(self): public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) self.assertIsInstance(public.unwrap(), keys.RSAPublicKey) def test_rsa_pss_sign(self): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) signature = asymmetric.rsa_pss_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.rsa_pss_verify(public, signature, original_data, 'sha1') def test_rsa_pss_sign_pss_cert(self): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-pss.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-pss.crt')) signature = asymmetric.rsa_pss_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.rsa_pss_verify(public, signature, original_data, 'sha1') def test_rsa_pss_sha256_sign(self): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) signature = asymmetric.rsa_pss_sign(private, original_data, 'sha256') self.assertIsInstance(signature, byte_cls) asymmetric.rsa_pss_verify(public, signature, original_data, 'sha256') def test_rsa_pss_sha256_sign_pss_cert(self): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-pss.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-pss.crt')) signature = asymmetric.rsa_pss_sign(private, original_data, 'sha256') self.assertIsInstance(signature, byte_cls) asymmetric.rsa_pss_verify(public, signature, original_data, 'sha256') def test_rsa_raw_sign(self): original_data = b'This is data to sign!' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt')) signature = asymmetric.rsa_pkcs1v15_sign(private, original_data, 'raw') self.assertIsInstance(signature, byte_cls) asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'raw') def test_dsa_sign(self): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt')) signature = asymmetric.dsa_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.dsa_verify(public, signature, original_data, 'sha1') def test_dsa_fingerprint(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt')) self.assertIsInstance(private.fingerprint, byte_cls) self.assertIsInstance(public.fingerprint, byte_cls) self.assertEqual(private.fingerprint, public.fingerprint) def test_dsa_public_key_attr(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt')) computed_public = private.public_key self.assertEqual(public.asn1.dump(), computed_public.asn1.dump()) def test_dsa_private_key_unwrap(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key')) self.assertIsInstance(private.unwrap(), keys.DSAPrivateKey) def test_dsa_public_key_unwrap(self): public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt')) self.assertIsInstance(public.unwrap(), core.Integer) def test_dsa_2048_sha1_sign(self): def do_run(): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048.crt')) signature = asymmetric.dsa_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.dsa_verify(public, signature, original_data, 'sha1') if sys.platform == 'win32': with self.assertRaises(errors.AsymmetricKeyError): do_run() else: do_run() def test_dsa_2048_sha2_sign(self): def do_run(): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048-sha2.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048-sha2.crt')) signature = asymmetric.dsa_sign(private, original_data, 'sha256') self.assertIsInstance(signature, byte_cls) asymmetric.dsa_verify(public, signature, original_data, 'sha256') if not _should_support_sha2(): with self.assertRaises(errors.AsymmetricKeyError): do_run() else: do_run() def test_dsa_3072_sign(self): def do_run(): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa.crt')) signature = asymmetric.dsa_sign(private, original_data, 'sha256') self.assertIsInstance(signature, byte_cls) asymmetric.dsa_verify(public, signature, original_data, 'sha256') if not _should_support_sha2(): with self.assertRaises(errors.AsymmetricKeyError): do_run() else: do_run() def test_dsa_3072_sign_sha1(self): def do_run(): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa.crt')) signature = asymmetric.dsa_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.dsa_verify(public, signature, original_data, 'sha1') if _backend == 'mac' or openssl_098 or _backend == 'winlegacy': with self.assertRaises(errors.AsymmetricKeyError): do_run() elif _backend == 'win': if _win_version_pair() < (6, 2): exception_class = errors.AsymmetricKeyError else: exception_class = ValueError with self.assertRaises(exception_class): do_run() else: do_run() def test_ecdsa_sign(self): original_data = b'This is data to sign' private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt')) signature = asymmetric.ecdsa_sign(private, original_data, 'sha1') self.assertIsInstance(signature, byte_cls) asymmetric.ecdsa_verify(public, signature, original_data, 'sha1') def test_ec_fingerprints(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt')) self.assertIsInstance(private.fingerprint, byte_cls) self.assertIsInstance(public.fingerprint, byte_cls) self.assertEqual(private.fingerprint, public.fingerprint) def test_ec_public_key_attr(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key')) public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt')) computed_public = private.public_key self.assertEqual(public.asn1.dump(), computed_public.asn1.dump()) def test_ec_private_key_unwrap(self): private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key')) self.assertIsInstance(private.unwrap(), keys.ECPrivateKey) def test_ec_public_key_unwrap(self): public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt')) self.assertIsInstance(public.unwrap(), keys.ECPointBitString) def test_macos_public_key_export_issue(self): # This was failing on Apple Silicon Macs with macOS 12+ cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'macos_12_public_key_export_issue.crt')) public = cert.public_key self.assertEqual('rsa', public.asn1.algorithm)
{ "content_hash": "e30b3b01a1aba782e51abd976c9424a6", "timestamp": "", "source": "github", "line_count": 636, "max_line_length": 110, "avg_line_length": 47.02358490566038, "alnum_prop": 0.6521215768883539, "repo_name": "wbond/oscrypto", "id": "e1fca1c7e800ef89bba4e5d1742a8c80d14a5605", "size": "29923", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_asymmetric.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1104599" } ], "symlink_target": "" }
"""DB API v2.0 required """ import time import datetime import constants class _DBAPITypeObject: def __init__(self,*values): self.values = values def __cmp__(self,other): if other in self.values: return 0 if other < self.values: return 1 else: return -1 Date = datetime.date Time = datetime.time Timestamp = datetime.datetime def DateFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) Binary = str STRING = _DBAPITypeObject(constants.FieldType.get_string_types()) BINARY = _DBAPITypeObject(constants.FieldType.get_binary_types()) NUMBER = _DBAPITypeObject(constants.FieldType.get_number_types()) DATETIME = _DBAPITypeObject(constants.FieldType.get_timestamp_types()) ROWID = _DBAPITypeObject()
{ "content_hash": "46dc01122a05795652e68dce7344221f", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 70, "avg_line_length": 23.29268292682927, "alnum_prop": 0.6785340314136126, "repo_name": "GetSomeBlocks/Score_Soccer", "id": "57fdcf1624ec5f466557d944649e19ca689451f8", "size": "2069", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "resources/lib/mysql-connector-python/mysql/connector/dbapi.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "930" }, { "name": "C", "bytes": "293000" }, { "name": "C#", "bytes": "9664" }, { "name": "CSS", "bytes": "24716" }, { "name": "D", "bytes": "542" }, { "name": "HTML", "bytes": "374176" }, { "name": "Java", "bytes": "206" }, { "name": "Objective-C", "bytes": "9421" }, { "name": "Python", "bytes": "8744725" }, { "name": "Ruby", "bytes": "6773" }, { "name": "Shell", "bytes": "13600" } ], "symlink_target": "" }
"""Support for interacting with Spotify Connect.""" from asyncio import run_coroutine_threadsafe import datetime as dt from datetime import timedelta import logging from typing import Any, Callable, Dict, List, Optional from aiohttp import ClientError from spotipy import Spotify, SpotifyException from yarl import URL from homeassistant.components.media_player import MediaPlayerEntity from homeassistant.components.media_player.const import ( MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_VOLUME_SET, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONF_ID, CONF_NAME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.config_entry_oauth2_flow import OAuth2Session from homeassistant.helpers.entity import Entity from homeassistant.util.dt import utc_from_timestamp from .const import DATA_SPOTIFY_CLIENT, DATA_SPOTIFY_ME, DATA_SPOTIFY_SESSION, DOMAIN _LOGGER = logging.getLogger(__name__) ICON = "mdi:spotify" SCAN_INTERVAL = timedelta(seconds=30) SUPPORT_SPOTIFY = ( SUPPORT_NEXT_TRACK | SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA | SUPPORT_PREVIOUS_TRACK | SUPPORT_SEEK | SUPPORT_SELECT_SOURCE | SUPPORT_SHUFFLE_SET | SUPPORT_VOLUME_SET ) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable[[List[Entity], bool], None], ) -> None: """Set up Spotify based on a config entry.""" spotify = SpotifyMediaPlayer( hass.data[DOMAIN][entry.entry_id][DATA_SPOTIFY_SESSION], hass.data[DOMAIN][entry.entry_id][DATA_SPOTIFY_CLIENT], hass.data[DOMAIN][entry.entry_id][DATA_SPOTIFY_ME], entry.data[CONF_ID], entry.data[CONF_NAME], ) async_add_entities([spotify], True) def spotify_exception_handler(func): """Decorate Spotify calls to handle Spotify exception. A decorator that wraps the passed in function, catches Spotify errors, aiohttp exceptions and handles the availability of the media player. """ def wrapper(self, *args, **kwargs): try: result = func(self, *args, **kwargs) self.player_available = True return result except (SpotifyException, ClientError): self.player_available = False return wrapper class SpotifyMediaPlayer(MediaPlayerEntity): """Representation of a Spotify controller.""" def __init__( self, session: OAuth2Session, spotify: Spotify, me: dict, user_id: str, name: str, ): """Initialize.""" self._id = user_id self._me = me self._name = f"Spotify {name}" self._session = session self._spotify = spotify self._currently_playing: Optional[dict] = {} self._devices: Optional[List[dict]] = [] self._playlist: Optional[dict] = None self._spotify: Spotify = None self.player_available = False @property def name(self) -> str: """Return the name.""" return self._name @property def icon(self) -> str: """Return the icon.""" return ICON @property def available(self) -> bool: """Return True if entity is available.""" return self.player_available @property def unique_id(self) -> str: """Return the unique ID.""" return self._id @property def device_info(self) -> Dict[str, Any]: """Return device information about this entity.""" if self._me is not None: model = self._me["product"] return { "identifiers": {(DOMAIN, self._id)}, "manufacturer": "Spotify AB", "model": f"Spotify {model}".rstrip(), "name": self._name, } @property def state(self) -> Optional[str]: """Return the playback state.""" if not self._currently_playing: return STATE_IDLE if self._currently_playing["is_playing"]: return STATE_PLAYING return STATE_PAUSED @property def volume_level(self) -> Optional[float]: """Return the device volume.""" return self._currently_playing.get("device", {}).get("volume_percent", 0) / 100 @property def media_content_id(self) -> Optional[str]: """Return the media URL.""" item = self._currently_playing.get("item") or {} return item.get("name") @property def media_content_type(self) -> Optional[str]: """Return the media type.""" return MEDIA_TYPE_MUSIC @property def media_duration(self) -> Optional[int]: """Duration of current playing media in seconds.""" if self._currently_playing.get("item") is None: return None return self._currently_playing["item"]["duration_ms"] / 1000 @property def media_position(self) -> Optional[str]: """Position of current playing media in seconds.""" if not self._currently_playing: return None return self._currently_playing["progress_ms"] / 1000 @property def media_position_updated_at(self) -> Optional[dt.datetime]: """When was the position of the current playing media valid.""" if not self._currently_playing: return None return utc_from_timestamp(self._currently_playing["timestamp"] / 1000) @property def media_image_url(self) -> Optional[str]: """Return the media image URL.""" if ( self._currently_playing.get("item") is None or not self._currently_playing["item"]["album"]["images"] ): return None return self._currently_playing["item"]["album"]["images"][0]["url"] @property def media_image_remotely_accessible(self) -> bool: """If the image url is remotely accessible.""" return False @property def media_title(self) -> Optional[str]: """Return the media title.""" item = self._currently_playing.get("item") or {} return item.get("name") @property def media_artist(self) -> Optional[str]: """Return the media artist.""" if self._currently_playing.get("item") is None: return None return ", ".join( [artist["name"] for artist in self._currently_playing["item"]["artists"]] ) @property def media_album_name(self) -> Optional[str]: """Return the media album.""" if self._currently_playing.get("item") is None: return None return self._currently_playing["item"]["album"]["name"] @property def media_track(self) -> Optional[int]: """Track number of current playing media, music track only.""" item = self._currently_playing.get("item") or {} return item.get("track_number") @property def media_playlist(self): """Title of Playlist currently playing.""" if self._playlist is None: return None return self._playlist["name"] @property def source(self) -> Optional[str]: """Return the current playback device.""" return self._currently_playing.get("device", {}).get("name") @property def source_list(self) -> Optional[List[str]]: """Return a list of source devices.""" if not self._devices: return None return [device["name"] for device in self._devices] @property def shuffle(self) -> bool: """Shuffling state.""" return bool(self._currently_playing.get("shuffle_state")) @property def supported_features(self) -> int: """Return the media player features that are supported.""" if self._me["product"] != "premium": return 0 return SUPPORT_SPOTIFY @spotify_exception_handler def set_volume_level(self, volume: int) -> None: """Set the volume level.""" self._spotify.volume(int(volume * 100)) @spotify_exception_handler def media_play(self) -> None: """Start or resume playback.""" self._spotify.start_playback() @spotify_exception_handler def media_pause(self) -> None: """Pause playback.""" self._spotify.pause_playback() @spotify_exception_handler def media_previous_track(self) -> None: """Skip to previous track.""" self._spotify.previous_track() @spotify_exception_handler def media_next_track(self) -> None: """Skip to next track.""" self._spotify.next_track() @spotify_exception_handler def media_seek(self, position): """Send seek command.""" self._spotify.seek_track(int(position * 1000)) @spotify_exception_handler def play_media(self, media_type: str, media_id: str, **kwargs) -> None: """Play media.""" kwargs = {} # Spotify can't handle URI's with query strings or anchors # Yet, they do generate those types of URI in their official clients. media_id = str(URL(media_id).with_query(None).with_fragment(None)) if media_type == MEDIA_TYPE_MUSIC: kwargs["uris"] = [media_id] elif media_type == MEDIA_TYPE_PLAYLIST: kwargs["context_uri"] = media_id else: _LOGGER.error("Media type %s is not supported", media_type) return self._spotify.start_playback(**kwargs) @spotify_exception_handler def select_source(self, source: str) -> None: """Select playback device.""" for device in self._devices: if device["name"] == source: self._spotify.transfer_playback( device["id"], self.state == STATE_PLAYING ) return @spotify_exception_handler def set_shuffle(self, shuffle: bool) -> None: """Enable/Disable shuffle mode.""" self._spotify.shuffle(shuffle) @spotify_exception_handler def update(self) -> None: """Update state and attributes.""" if not self.enabled: return if not self._session.valid_token or self._spotify is None: run_coroutine_threadsafe( self._session.async_ensure_token_valid(), self.hass.loop ).result() self._spotify = Spotify(auth=self._session.token["access_token"]) current = self._spotify.current_playback() self._currently_playing = current or {} self._playlist = None context = self._currently_playing.get("context") if context is not None and context["type"] == MEDIA_TYPE_PLAYLIST: self._playlist = self._spotify.playlist(current["context"]["uri"]) devices = self._spotify.devices() or {} self._devices = devices.get("devices", [])
{ "content_hash": "421d4282a7259051bfa2f5091ca6ecd9", "timestamp": "", "source": "github", "line_count": 357, "max_line_length": 87, "avg_line_length": 31.03921568627451, "alnum_prop": 0.6085190867250249, "repo_name": "robbiet480/home-assistant", "id": "1b74855c9f99fc3f8ced3352adc72d0f0db7c526", "size": "11081", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/spotify/media_player.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "18837456" }, { "name": "Shell", "bytes": "6846" } ], "symlink_target": "" }
from __future__ import absolute_import from blockcanvas.numerical_modeling.numeric_context.nan_filter import *
{ "content_hash": "559e78fec72f438cdb6b2732c97b7c57", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 71, "avg_line_length": 55.5, "alnum_prop": 0.8198198198198198, "repo_name": "enthought/etsproxy", "id": "aba722ae2658ff3e088611e78ac2f0131d2824a9", "size": "126", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "enthought/numerical_modeling/numeric_context/nan_filter.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "363714" } ], "symlink_target": "" }
from google.cloud import securitycenter_v1beta1 def sample_update_source(): # Create a client client = securitycenter_v1beta1.SecurityCenterClient() # Initialize request argument(s) request = securitycenter_v1beta1.UpdateSourceRequest( ) # Make the request response = client.update_source(request=request) # Handle the response print(response) # [END securitycenter_v1beta1_generated_SecurityCenter_UpdateSource_sync]
{ "content_hash": "86495192c9909f9393285ac2af863881", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 73, "avg_line_length": 25.555555555555557, "alnum_prop": 0.7456521739130435, "repo_name": "googleapis/python-securitycenter", "id": "16168c1bcb747522259ecf11d98cdf8222e3b1be", "size": "1863", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "samples/generated_samples/securitycenter_v1beta1_generated_security_center_update_source_sync.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "2740673" }, { "name": "Shell", "bytes": "30684" } ], "symlink_target": "" }
from __future__ import unicode_literals from collections import defaultdict from operator import attrgetter from flask import flash, jsonify, request, session from sqlalchemy.orm import joinedload, subqueryload from indico.core.db import db from indico.modules.events.abstracts.controllers.base import RHManageAbstractsBase from indico.modules.events.abstracts.controllers.common import (AbstractsDownloadAttachmentsMixin, AbstractsExportCSV, AbstractsExportExcel, AbstractsExportPDFMixin, CustomizeAbstractListMixin, DisplayAbstractListMixin) from indico.modules.events.abstracts.forms import BulkAbstractJudgmentForm from indico.modules.events.abstracts.lists import AbstractListGeneratorManagement from indico.modules.events.abstracts.models.abstracts import Abstract, AbstractState from indico.modules.events.abstracts.models.persons import AbstractPersonLink from indico.modules.events.abstracts.operations import create_abstract, delete_abstract, judge_abstract from indico.modules.events.abstracts.schemas import abstract_review_questions_schema, abstracts_schema from indico.modules.events.abstracts.util import can_create_invited_abstracts, make_abstract_form from indico.modules.events.abstracts.views import WPManageAbstracts from indico.modules.events.contributions.models.persons import AuthorType from indico.modules.events.util import get_field_values from indico.modules.users.models.users import User from indico.util.i18n import _, ngettext from indico.web.util import jsonify_data, jsonify_form, jsonify_template class RHAbstractListBase(RHManageAbstractsBase): """Base class for all RHs using the abstract list generator""" def _process_args(self): RHManageAbstractsBase._process_args(self) self.list_generator = AbstractListGeneratorManagement(event=self.event) class RHManageAbstractsActionsBase(RHAbstractListBase): """Base class for RHs performing actions on selected abstracts""" _abstract_query_options = () @property def _abstract_query(self): query = Abstract.query.with_parent(self.event) if self._abstract_query_options: query = query.options(*self._abstract_query_options) return query def _process_args(self): RHAbstractListBase._process_args(self) ids = map(int, request.form.getlist('abstract_id')) self.abstracts = self._abstract_query.filter(Abstract.id.in_(ids)).all() class RHBulkAbstractJudgment(RHManageAbstractsActionsBase): """Perform bulk judgment operations on selected abstracts""" def _process(self): form = BulkAbstractJudgmentForm(event=self.event, abstract_id=[a.id for a in self.abstracts], judgment=request.form.get('judgment')) if form.validate_on_submit(): judgment_data, abstract_data = form.split_data submitted_abstracts = {abstract for abstract in self.abstracts if abstract.state == AbstractState.submitted} for abstract in submitted_abstracts: judge_abstract(abstract, abstract_data, judge=session.user, **judgment_data) num_judged_abstracts = len(submitted_abstracts) num_prejudged_abstracts = len(self.abstracts) - num_judged_abstracts if num_judged_abstracts: flash(ngettext("One abstract has been judged.", "{num} abstracts have been judged.", num_judged_abstracts).format(num=num_judged_abstracts), 'success') if num_prejudged_abstracts: flash(ngettext("One abstract has been skipped since it is already judged.", "{num} abstracts have been skipped since they are already judged.", num_prejudged_abstracts).format(num=num_prejudged_abstracts), 'warning') return jsonify_data(**self.list_generator.render_list()) return jsonify_form(form=form, fields=form._order, submit=_('Judge'), disabled_until_change=False) class RHAbstractList(DisplayAbstractListMixin, RHAbstractListBase): template = 'management/abstract_list.html' view_class = WPManageAbstracts def _render_template(self, **kwargs): kwargs['track_session_map'] = {track.id: track.default_session_id for track in self.event.tracks} can_create = can_create_invited_abstracts(self.event) return super(RHAbstractList, self)._render_template(can_create_invited_abstracts=can_create, **kwargs) class RHAbstractListCustomize(CustomizeAbstractListMixin, RHAbstractListBase): view_class = WPManageAbstracts ALLOW_LOCKED = True class RHAbstractListStaticURL(RHAbstractListBase): """Generate a static URL for the configuration of the abstract list""" ALLOW_LOCKED = True def _process(self): return jsonify(url=self.list_generator.generate_static_url()) class RHCreateAbstract(RHAbstractListBase): def _process(self): is_invited = request.args.get('invited') == '1' abstract_form_class = make_abstract_form(self.event, session.user, notification_option=True, management=self.management, invited=is_invited) form = abstract_form_class(event=self.event, management=self.management, invited=is_invited) if is_invited: del form.submitted_contrib_type del form.attachments del form.send_notifications del form.person_links if form.validate_on_submit(): data = form.data submitter = None if is_invited: if form.users_with_no_account.data == 'existing': submitter = data['submitter'] else: submitter = User(first_name=data['first_name'], last_name=data['last_name'], email=data['email'], is_pending=True) db.session.add(submitter) db.session.flush() data.pop('first_name') data.pop('last_name') data.pop('email') data.pop('users_with_no_account') data.pop('submitter') send_notifications = data.pop('send_notifications', is_invited) abstract = create_abstract(self.event, *get_field_values(data), send_notifications=send_notifications, submitter=submitter, is_invited=is_invited) flash(_("Abstract '{}' created successfully").format(abstract.title), 'success') tpl_components = self.list_generator.render_list(abstract) if tpl_components.get('hide_abstract'): self.list_generator.flash_info_message(abstract) return jsonify_data(**tpl_components) return jsonify_form(form, back=_("Cancel"), form_header_kwargs={'action': request.relative_url}) class RHDeleteAbstracts(RHManageAbstractsActionsBase): def _process(self): delete_contribs = request.values.get('delete_contribs') == '1' deleted_contrib_count = 0 for abstract in self.abstracts: if delete_contribs and abstract.contribution: deleted_contrib_count += 1 delete_abstract(abstract, delete_contribs) deleted_abstract_count = len(self.abstracts) flash(ngettext("The abstract has been deleted.", "{count} abstracts have been deleted.", deleted_abstract_count) .format(count=deleted_abstract_count), 'success') if deleted_contrib_count: flash(ngettext("The linked contribution has been deleted.", "{count} linked contributions have been deleted.", deleted_contrib_count) .format(count=deleted_contrib_count), 'success') return jsonify_data(**self.list_generator.render_list()) class RHAbstractPersonList(RHManageAbstractsActionsBase): """List of persons somehow related to abstracts (co-authors, speakers...)""" ALLOW_LOCKED = True @property def _membership_filter(self): abstract_ids = {abstract.id for abstract in self.abstracts} return Abstract.id.in_(abstract_ids) def _process(self): submitters = {abstract.submitter for abstract in self.abstracts} abstract_persons = AbstractPersonLink.find_all(AbstractPersonLink.abstract.has(self._membership_filter)) abstract_persons_dict = defaultdict(lambda: {'speaker': False, 'submitter': False, 'primary_author': False, 'secondary_author': False}) for abstract_person in abstract_persons: dict_key = abstract_person.person.user if abstract_person.person.user else abstract_person.person person_roles = abstract_persons_dict[dict_key] person_roles['speaker'] |= abstract_person.is_speaker person_roles['primary_author'] |= abstract_person.author_type == AuthorType.primary person_roles['secondary_author'] |= abstract_person.author_type == AuthorType.secondary for submitter in submitters: abstract_persons_dict[submitter]['submitter'] |= True return jsonify_template('events/abstracts/management/abstract_person_list.html', event_persons=abstract_persons_dict, event=self.event) class RHManageAbstractsExportActionsBase(RHManageAbstractsActionsBase): ALLOW_LOCKED = True class RHAbstractsDownloadAttachments(AbstractsDownloadAttachmentsMixin, RHManageAbstractsExportActionsBase): pass class RHAbstractsExportPDF(AbstractsExportPDFMixin, RHManageAbstractsExportActionsBase): pass class RHAbstractsExportCSV(AbstractsExportCSV, RHManageAbstractsExportActionsBase): pass class RHAbstractsExportExcel(AbstractsExportExcel, RHManageAbstractsExportActionsBase): pass class RHAbstractsExportJSON(RHManageAbstractsExportActionsBase): _abstract_query_options = (joinedload('submitter'), joinedload('accepted_track'), joinedload('accepted_contrib_type'), joinedload('submitted_contrib_type'), subqueryload('comments'), subqueryload('field_values'), subqueryload('submitted_for_tracks'), subqueryload('reviewed_for_tracks'), subqueryload('person_links'), subqueryload('reviews').joinedload('ratings').joinedload('question')) def _process(self): abstracts = abstracts_schema.dump(sorted(self.abstracts, key=attrgetter('friendly_id'))) questions = abstract_review_questions_schema.dump(self.event.abstract_review_questions) response = jsonify(version=1, abstracts=abstracts, questions=questions) response.headers['Content-Disposition'] = 'attachment; filename="abstracts.json"' return response
{ "content_hash": "1a3fd237054497e488b28745fa58560b", "timestamp": "", "source": "github", "line_count": 228, "max_line_length": 120, "avg_line_length": 49.083333333333336, "alnum_prop": 0.6636582968456796, "repo_name": "mic4ael/indico", "id": "198ff7d18b0f4586610787c6489a601c51beda4d", "size": "11405", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "indico/modules/events/abstracts/controllers/abstract_list.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "553825" }, { "name": "HTML", "bytes": "1375160" }, { "name": "JavaScript", "bytes": "1852830" }, { "name": "Mako", "bytes": "1340" }, { "name": "Python", "bytes": "4612709" }, { "name": "Shell", "bytes": "2665" }, { "name": "TeX", "bytes": "23292" }, { "name": "XSLT", "bytes": "1504" } ], "symlink_target": "" }
import sys from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton def on_clic(): print("Hello!") app = QApplication(sys.argv) # The default constructor has no parent. # A widget with no parent is a window. window = QMainWindow() window.resize(250, 150) window.setWindowTitle('Hello') button = QPushButton('Hello', window) button.clicked.connect(on_clic) window.show() # The mainloop of the application. The event handling starts from this point. # The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead. exit_code = app.exec_() # The sys.exit() method ensures a clean exit. # The environment will be informed, how the application ended. sys.exit(exit_code)
{ "content_hash": "205ebe63134bd5abb41886371ae0b4f2", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 120, "avg_line_length": 27.555555555555557, "alnum_prop": 0.7446236559139785, "repo_name": "jeremiedecock/snippets", "id": "deae82bfb79ad301b4280355470a3604f81a3bdd", "size": "846", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/pyqt/pyqt5/widget_QPushButton_clic_event.py", "mode": "33261", "license": "mit", "language": [ { "name": "AMPL", "bytes": "4294" }, { "name": "Batchfile", "bytes": "6779" }, { "name": "C", "bytes": "102107" }, { "name": "C++", "bytes": "320943" }, { "name": "CMake", "bytes": "11424" }, { "name": "CSS", "bytes": "21121" }, { "name": "Cython", "bytes": "21" }, { "name": "Dockerfile", "bytes": "1818" }, { "name": "Fortran", "bytes": "633" }, { "name": "Gnuplot", "bytes": "39999" }, { "name": "Go", "bytes": "3166" }, { "name": "Groovy", "bytes": "3009" }, { "name": "HTML", "bytes": "138995" }, { "name": "IDL", "bytes": "43" }, { "name": "Java", "bytes": "120221" }, { "name": "JavaScript", "bytes": "32342" }, { "name": "Jinja", "bytes": "206" }, { "name": "Jupyter Notebook", "bytes": "95991" }, { "name": "Lua", "bytes": "200" }, { "name": "M4", "bytes": "111" }, { "name": "MATLAB", "bytes": "31972" }, { "name": "Makefile", "bytes": "81307" }, { "name": "OpenSCAD", "bytes": "14995" }, { "name": "PHP", "bytes": "94" }, { "name": "Perl", "bytes": "46" }, { "name": "Processing", "bytes": "208" }, { "name": "Prolog", "bytes": "454" }, { "name": "Python", "bytes": "1685966" }, { "name": "R", "bytes": "76" }, { "name": "Raku", "bytes": "43" }, { "name": "Ruby", "bytes": "42" }, { "name": "Scheme", "bytes": "649" }, { "name": "Shell", "bytes": "52865" }, { "name": "Smalltalk", "bytes": "55" }, { "name": "TeX", "bytes": "1189" }, { "name": "Vue", "bytes": "49445" }, { "name": "XSLT", "bytes": "1816" } ], "symlink_target": "" }
def listOnOneLine(items): for item in items: print(item, end=' ') listOnOneLine(['apple', 'banana', 'pear']) print('This may not be what you expected!')
{ "content_hash": "4799b04bb1843a90b334d51d165b2295", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 43, "avg_line_length": 27.666666666666668, "alnum_prop": 0.6445783132530121, "repo_name": "hwheeler01/comp150", "id": "d8fd56fec28ced104203678eb2c42a8e6b7b380a", "size": "166", "binary": false, "copies": "2", "ref": "refs/heads/gh-pages", "path": "_site/examples/endSpace1.py", "mode": "33261", "license": "mit", "language": [ { "name": "Assembly", "bytes": "11466" }, { "name": "Batchfile", "bytes": "28" }, { "name": "CSS", "bytes": "121532" }, { "name": "HTML", "bytes": "5858311" }, { "name": "JavaScript", "bytes": "524" }, { "name": "Jupyter Notebook", "bytes": "6422478" }, { "name": "Python", "bytes": "365319" } ], "symlink_target": "" }
from difflib import SequenceMatcher def scour(find, tlist): x = 0 def ke(z): return SequenceMatcher(None, z, find).ratio() * -1 try: while tlist[x]: SequenceMatcher(None, tlist[x], find).ratio() #print comp[x] + " compared to " + find; x = x + 1 except IndexError: tlist.sort(key=ke) return tlist
{ "content_hash": "55ca506374d4a3c26517b71a4b604dde", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 58, "avg_line_length": 29.307692307692307, "alnum_prop": 0.5485564304461942, "repo_name": "notanewbie/BRIAN", "id": "8c10895ad84c3e2b24f72a61b4445e99b0ebac8a", "size": "381", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "findtext.py", "mode": "33188", "license": "mit", "language": [ { "name": "Perl", "bytes": "2" }, { "name": "Python", "bytes": "7684" } ], "symlink_target": "" }
'''Generates Go source files from a mojom.Module.''' from itertools import chain import os import re from mojom.generate.template_expander import UseJinja import mojom.generate.generator as generator import mojom.generate.module as mojom import mojom.generate.pack as pack class KindInfo(object): def __init__(self, go_type, encode_suffix, decode_suffix, bit_size): self.go_type = go_type self.encode_suffix = encode_suffix self.decode_suffix = decode_suffix self.bit_size = bit_size _kind_infos = { mojom.BOOL: KindInfo('bool', 'Bool', 'Bool', 1), mojom.INT8: KindInfo('int8', 'Int8', 'Int8', 8), mojom.UINT8: KindInfo('uint8', 'Uint8', 'Uint8', 8), mojom.INT16: KindInfo('int16', 'Int16', 'Int16', 16), mojom.UINT16: KindInfo('uint16', 'Uint16', 'Uint16', 16), mojom.INT32: KindInfo('int32', 'Int32', 'Int32', 32), mojom.UINT32: KindInfo('uint32', 'Uint32', 'Uint32', 32), mojom.FLOAT: KindInfo('float32', 'Float32', 'Float32', 32), mojom.HANDLE: KindInfo( 'system.Handle', 'Handle', 'Handle', 32), mojom.DCPIPE: KindInfo( 'system.ConsumerHandle', 'Handle', 'ConsumerHandle', 32), mojom.DPPIPE: KindInfo( 'system.ProducerHandle', 'Handle', 'ProducerHandle', 32), mojom.MSGPIPE: KindInfo( 'system.MessagePipeHandle', 'Handle', 'MessagePipeHandle', 32), mojom.SHAREDBUFFER: KindInfo( 'system.SharedBufferHandle', 'Handle', 'SharedBufferHandle', 32), mojom.NULLABLE_HANDLE: KindInfo( 'system.Handle', 'Handle', 'Handle', 32), mojom.NULLABLE_DCPIPE: KindInfo( 'system.ConsumerHandle', 'Handle', 'ConsumerHandle', 32), mojom.NULLABLE_DPPIPE: KindInfo( 'system.ProducerHandle', 'Handle', 'ProducerHandle', 32), mojom.NULLABLE_MSGPIPE: KindInfo( 'system.MessagePipeHandle', 'Handle', 'MessagePipeHandle', 32), mojom.NULLABLE_SHAREDBUFFER: KindInfo( 'system.SharedBufferHandle', 'Handle', 'SharedBufferHandle', 32), mojom.INT64: KindInfo('int64', 'Int64', 'Int64', 64), mojom.UINT64: KindInfo('uint64', 'Uint64', 'Uint64', 64), mojom.DOUBLE: KindInfo('float64', 'Float64', 'Float64', 64), mojom.STRING: KindInfo('string', 'String', 'String', 64), mojom.NULLABLE_STRING: KindInfo('string', 'String', 'String', 64), } _imports = {} def GetBitSize(kind): if isinstance(kind, (mojom.Union)): return 128 if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct, mojom.Interface)): return 64 if mojom.IsUnionKind(kind): return 2*64 if isinstance(kind, (mojom.InterfaceRequest)): kind = mojom.MSGPIPE if isinstance(kind, mojom.Enum): kind = mojom.INT32 return _kind_infos[kind].bit_size # Returns go type corresponding to provided kind. If |nullable| is true # and kind is nullable adds an '*' to type (example: ?string -> *string). def GetGoType(kind, nullable = True): if nullable and mojom.IsNullableKind(kind): return '*%s' % GetNonNullableGoType(kind) return GetNonNullableGoType(kind) # Returns go type corresponding to provided kind. Ignores nullability of # top-level kind. def GetNonNullableGoType(kind): if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind): return '%s' % GetFullName(kind) if mojom.IsArrayKind(kind): if kind.length: return '[%s]%s' % (kind.length, GetGoType(kind.kind)) return '[]%s' % GetGoType(kind.kind) if mojom.IsMapKind(kind): return 'map[%s]%s' % (GetGoType(kind.key_kind), GetGoType(kind.value_kind)) if mojom.IsInterfaceKind(kind): return '%sPointer' % GetFullName(kind) if mojom.IsInterfaceRequestKind(kind): return '%sRequest' % GetFullName(kind.kind) if mojom.IsEnumKind(kind): return GetNameForNestedElement(kind) return _kind_infos[kind].go_type # Splits name to lower-cased parts used for camel-casing # (example: HTTPEntry2FooBar -> ['http', 'entry2', 'foo', 'bar']). def NameToComponent(name): # insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar -> # HTTP_Entry2_FooBar) name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name) # insert '_' between non upper and start of upper blocks (e.g., # HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar) name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name) return [x.lower() for x in name.split('_')] def UpperCamelCase(name): return ''.join([x.capitalize() for x in NameToComponent(name)]) # Formats a name. If |exported| is true makes name camel-cased with first # letter capital, otherwise does no camel-casing and makes first letter # lower-cased (which is used for making internal names more readable). def FormatName(name, exported=True): if exported: return UpperCamelCase(name) # Leave '_' symbols for unexported names. return name[0].lower() + name[1:] # Returns full name of an imported element based on prebuilt dict |_imports|. # If the |element| is not imported returns formatted name of it. # |element| should have attr 'name'. |exported| argument is used to make # |FormatName()| calls only. def GetFullName(element, exported=True): if not hasattr(element, 'imported_from') or not element.imported_from: return FormatName(element.name, exported) path = '' if element.imported_from['module'].path: path += GetPackagePath(element.imported_from['module']) if path in _imports: return '%s.%s' % (_imports[path], FormatName(element.name, exported)) return FormatName(element.name, exported) # Returns a name for nested elements like enum field or constant. # The returned name consists of camel-cased parts separated by '_'. def GetNameForNestedElement(element): if element.parent_kind: return "%s_%s" % (GetNameForElement(element.parent_kind), FormatName(element.name)) return GetFullName(element) def GetNameForElement(element, exported=True): if (mojom.IsInterfaceKind(element) or mojom.IsStructKind(element) or mojom.IsUnionKind(element)): return GetFullName(element, exported) if isinstance(element, (mojom.EnumField, mojom.Field, mojom.Method, mojom.Parameter)): return FormatName(element.name, exported) if isinstance(element, (mojom.Enum, mojom.Constant, mojom.ConstantValue)): return GetNameForNestedElement(element) raise Exception('Unexpected element: %s' % element) def ExpressionToText(token): if isinstance(token, mojom.EnumValue): return "%s_%s" % (GetNameForNestedElement(token.enum), FormatName(token.name, True)) if isinstance(token, mojom.ConstantValue): return GetNameForNestedElement(token) if isinstance(token, mojom.Constant): return ExpressionToText(token.value) return token def DecodeSuffix(kind): if mojom.IsEnumKind(kind): return DecodeSuffix(mojom.INT32) if mojom.IsInterfaceKind(kind): return 'Interface' if mojom.IsInterfaceRequestKind(kind): return DecodeSuffix(mojom.MSGPIPE) return _kind_infos[kind].decode_suffix def EncodeSuffix(kind): if mojom.IsEnumKind(kind): return EncodeSuffix(mojom.INT32) if mojom.IsInterfaceKind(kind): return 'Interface' if mojom.IsInterfaceRequestKind(kind): return EncodeSuffix(mojom.MSGPIPE) return _kind_infos[kind].encode_suffix def GetPackageName(module): return module.name.split('.')[0] def GetPackagePath(module): name = module.name.split('.')[0] return '/'.join(module.path.split('/')[:-1] + [name]) def GetAllConstants(module): data = [module] + module.structs + module.interfaces constants = [x.constants for x in data] return [i for i in chain.from_iterable(constants)] def GetAllEnums(module): data = [module] + module.structs + module.interfaces enums = [x.enums for x in data] return [i for i in chain.from_iterable(enums)] # Adds an import required to use the provided |element|. # The required import is stored at '_imports'. def AddImport(module, element): if not isinstance(element, mojom.Kind): return if mojom.IsArrayKind(element) or mojom.IsInterfaceRequestKind(element): AddImport(module, element.kind) return if mojom.IsMapKind(element): AddImport(module, element.key_kind) AddImport(module, element.value_kind) return if mojom.IsAnyHandleKind(element): _imports['mojo/public/go/system'] = 'system' return if not hasattr(element, 'imported_from') or not element.imported_from: return imported = element.imported_from if GetPackagePath(imported['module']) == GetPackagePath(module): return path = GetPackagePath(imported['module']) if path in _imports: return name = GetPackageName(imported['module']) while name in _imports.values(): name += '_' _imports[path] = name class Generator(generator.Generator): go_filters = { 'array': lambda kind: mojom.Array(kind), 'bit_size': GetBitSize, 'decode_suffix': DecodeSuffix, 'encode_suffix': EncodeSuffix, 'go_type': GetGoType, 'expression_to_text': ExpressionToText, 'is_array': mojom.IsArrayKind, 'is_enum': mojom.IsEnumKind, 'is_handle': mojom.IsAnyHandleKind, 'is_interface': mojom.IsInterfaceKind, 'is_interface_request': mojom.IsInterfaceRequestKind, 'is_map': mojom.IsMapKind, 'is_none_or_empty': lambda array: array == None or len(array) == 0, 'is_nullable': mojom.IsNullableKind, 'is_pointer': mojom.IsObjectKind, 'is_struct': mojom.IsStructKind, 'is_union': mojom.IsUnionKind, 'name': GetNameForElement, 'tab_indent': lambda s, size = 1: ('\n' + '\t' * size).join(s.splitlines()) } def GetParameters(self): return { 'enums': GetAllEnums(self.module), 'imports': self.GetImports(), 'interfaces': self.GetInterfaces(), 'package': GetPackageName(self.module), 'structs': self.GetStructs(), 'unions': self.GetUnions(), } @UseJinja('go_templates/source.tmpl', filters=go_filters) def GenerateSource(self): return self.GetParameters() def GenerateFiles(self, args): self.Write(self.GenerateSource(), os.path.join("go", "src", GetPackagePath(self.module), "%s.go" % self.module.name)) def GetJinjaParameters(self): return { 'lstrip_blocks': True, 'trim_blocks': True, } def GetGlobals(self): return { 'namespace': self.module.namespace, 'module': self.module, } # Scans |self.module| for elements that require imports and adds all found # imports to '_imports' dict. Returns a list of imports that should include # the generated go file. def GetImports(self): # Imports can only be used in structs, constants, enums, interfaces. all_structs = list(self.module.structs) for i in self.module.interfaces: for method in i.methods: all_structs.append(self._GetStructFromMethod(method)) if method.response_parameters: all_structs.append(self._GetResponseStructFromMethod(method)) if len(all_structs) > 0 or len(self.module.interfaces) > 0: _imports['fmt'] = 'fmt' _imports['mojo/public/go/bindings'] = 'bindings' if len(self.module.interfaces) > 0: _imports['mojo/public/go/system'] = 'system' if len(all_structs) > 0: _imports['sort'] = 'sort' for union in self.module.unions: for field in union.fields: AddImport(self.module, field.kind) for struct in all_structs: for field in struct.fields: AddImport(self.module, field.kind) # TODO(rogulenko): add these after generating constants and struct defaults. # if field.default: # AddImport(self.module, field.default) for enum in GetAllEnums(self.module): for field in enum.fields: if field.value: AddImport(self.module, field.value) # TODO(rogulenko): add these after generating constants and struct defaults. # for constant in GetAllConstants(self.module): # AddImport(self.module, constant.value) imports_list = [] for i in _imports: if i.split('/')[-1] == _imports[i]: imports_list.append('"%s"' % i) else: imports_list.append('%s "%s"' % (_imports[i], i)) return sorted(imports_list) # Overrides the implementation from the base class in order to customize the # struct and field names. def _GetStructFromMethod(self, method): params_class = "%s_%s_Params" % (GetNameForElement(method.interface), GetNameForElement(method)) struct = mojom.Struct(params_class, module=method.interface.module) for param in method.parameters: struct.AddField("in%s" % GetNameForElement(param), param.kind, param.ordinal, attributes=param.attributes) return self._AddStructComputedData(False, struct) # Overrides the implementation from the base class in order to customize the # struct and field names. def _GetResponseStructFromMethod(self, method): params_class = "%s_%s_ResponseParams" % ( GetNameForElement(method.interface), GetNameForElement(method)) struct = mojom.Struct(params_class, module=method.interface.module) for param in method.response_parameters: struct.AddField("out%s" % GetNameForElement(param), param.kind, param.ordinal, attributes=param.attributes) return self._AddStructComputedData(False, struct)
{ "content_hash": "947c9867f77dce2045b9f2483b3db465", "timestamp": "", "source": "github", "line_count": 354, "max_line_length": 79, "avg_line_length": 38.11581920903955, "alnum_prop": 0.6731638627436448, "repo_name": "collinjackson/mojo", "id": "e20e9e4f6b4ac69e95823bf9621c230bec48c707", "size": "13656", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mojo/public/tools/bindings/generators/mojom_go_generator.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Bison", "bytes": "31162" }, { "name": "C", "bytes": "1870198" }, { "name": "C++", "bytes": "36473977" }, { "name": "CSS", "bytes": "1897" }, { "name": "Dart", "bytes": "508640" }, { "name": "Go", "bytes": "181090" }, { "name": "Groff", "bytes": "29030" }, { "name": "HTML", "bytes": "6258864" }, { "name": "Java", "bytes": "1187123" }, { "name": "JavaScript", "bytes": "204155" }, { "name": "Makefile", "bytes": "402" }, { "name": "Objective-C", "bytes": "74603" }, { "name": "Objective-C++", "bytes": "370763" }, { "name": "Protocol Buffer", "bytes": "1048" }, { "name": "Python", "bytes": "5515876" }, { "name": "Shell", "bytes": "143302" }, { "name": "nesC", "bytes": "18347" } ], "symlink_target": "" }
""" This modules implements the CrawlSpider which is the recommended spider to use for scraping typical web sites that requires crawling pages. See documentation in docs/topics/spiders.rst """ import copy from scrapy.http import Request, HtmlResponse from scrapy.utils.spider import iterate_spider_output from scrapy.spider import Spider def identity(x): return x class Rule(object): def __init__(self, link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity): self.link_extractor = link_extractor self.callback = callback self.cb_kwargs = cb_kwargs or {} self.process_links = process_links self.process_request = process_request if follow is None: self.follow = False if callback else True else: self.follow = follow class CrawlSpider(Spider): rules = () def __init__(self, *a, **kw): super(CrawlSpider, self).__init__(*a, **kw) self._compile_rules() def parse(self, response): return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True) def parse_start_url(self, response): return [] def process_results(self, response, results): return results def _requests_to_follow(self, response): if not isinstance(response, HtmlResponse): return seen = set() for n, rule in enumerate(self._rules): links = [l for l in rule.link_extractor.extract_links(response) if l not in seen] if links and rule.process_links: links = rule.process_links(links) seen = seen.union(links) for link in links: r = Request(url=link.url, callback=self._response_downloaded) r.meta.update(rule=n, link_text=link.text) yield rule.process_request(r) def _response_downloaded(self, response): rule = self._rules[response.meta['rule']] return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow) def _parse_response(self, response, callback, cb_kwargs, follow=True): if callback: cb_res = callback(response, **cb_kwargs) or () cb_res = self.process_results(response, cb_res) for requests_or_item in iterate_spider_output(cb_res): yield requests_or_item if follow and self._follow_links: for request_or_item in self._requests_to_follow(response): yield request_or_item def _compile_rules(self): def get_method(method): if callable(method): return method elif isinstance(method, basestring): return getattr(self, method, None) self._rules = [copy.copy(r) for r in self.rules] for rule in self._rules: rule.callback = get_method(rule.callback) rule.process_links = get_method(rule.process_links) rule.process_request = get_method(rule.process_request) def set_crawler(self, crawler): super(CrawlSpider, self).set_crawler(crawler) self._follow_links = crawler.settings.getbool('CRAWLSPIDER_FOLLOW_LINKS', True)
{ "content_hash": "f44dfa5e72f2e55fec8cd2616c2d71df", "timestamp": "", "source": "github", "line_count": 91, "max_line_length": 129, "avg_line_length": 35.714285714285715, "alnum_prop": 0.6286153846153846, "repo_name": "xtmhm2000/scrapy-0.22", "id": "61b97e5a3b433fa245e3b5f7543f8036ab8fac82", "size": "3250", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "scrapy/contrib/spiders/crawl.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "9681" }, { "name": "Makefile", "bytes": "2225" }, { "name": "Python", "bytes": "1183205" }, { "name": "Shell", "bytes": "1723" } ], "symlink_target": "" }
import warnings import numpy as np from scipy.integrate import quad, dblquad from dpmm.density import t_density, multivariate_t_density, scaled_IX_density from test_utils import timer @timer def test_scaled_IX_density(): nu = 3 sigsqr = 1.0 # test that probability integrates to 1.0 r = quad(lambda x: scaled_IX_density(nu, sigsqr, x), 0.0, np.inf) np.testing.assert_almost_equal(r[0], 1.0, 10, "scaled_IX_density does not integrate to 1.0") # test mean mean = nu*sigsqr/(nu-2) r = quad(lambda x: scaled_IX_density(nu, sigsqr, x)*x, 0.0, np.inf) np.testing.assert_almost_equal(r[0], mean, 10, "scaled_IX_density has wrong mean") # test variance var = 2.0*nu**2*sigsqr/(nu-2.0)**2/(nu-4.0) with warnings.catch_warnings(): warnings.simplefilter('ignore') r = quad(lambda x: scaled_IX_density(nu, sigsqr, x)*(x-mean)**2, 0.0, np.inf) np.testing.assert_almost_equal(r[0], var, 8, "scaled_IX_density has wrong variance") # test vectorizability x = np.arange(24.0).reshape(4, 3, 2)+1 prs = scaled_IX_density(nu, sigsqr, x) for (i, j, k), pr in np.ndenumerate(prs): np.testing.assert_equal( pr, scaled_IX_density(nu, sigsqr, x[i, j, k]), "scaled_IX_density does not vectorize correctly!") @timer def test_t_density(): nu = 3 mu = 2.2 sigsqr = 1.51 # test that probability integrates to 1.0 r = quad(lambda x: t_density(nu, mu, sigsqr, x), -np.inf, np.inf) np.testing.assert_almost_equal(r[0], 1.0, 10, "t_density does not integrate to 1.0") # test mean r = quad(lambda x: t_density(nu, mu, sigsqr, x)*x, -np.inf, np.inf) np.testing.assert_almost_equal(r[0], mu, 10, "t_density has wrong mean") # test variance r = quad(lambda x: t_density(nu, mu, sigsqr, x)*(x-mu)**2, -np.inf, np.inf) np.testing.assert_almost_equal(r[0], nu*sigsqr/(nu-2), 10, "t_density has wrong variance") # test vectorizability x = np.arange(24.0).reshape(4, 3, 2)+1 prs = t_density(nu, mu, sigsqr, x) for (i, j, k), pr in np.ndenumerate(prs): np.testing.assert_equal( pr, t_density(nu, mu, sigsqr, x[i, j, k]), "t_density does not vectorize correctly!") @timer def test_multivariate_t_density(full=False): nu = 3 mu = np.r_[1., 2.] Sig = np.eye(2)+0.1 # test that integrates to 1.0 r = dblquad(lambda x, y: multivariate_t_density(nu, mu, Sig, np.r_[x, y]), -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf) np.testing.assert_almost_equal( r[0], 1.0, 5, "multivariate_t_density does not integrate to 1.0") if full: # test mean with warnings.catch_warnings(): warnings.simplefilter('ignore') xbar = dblquad(lambda x, y: multivariate_t_density(nu, mu, Sig, np.r_[x, y])*x, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] ybar = dblquad(lambda x, y: multivariate_t_density(nu, mu, Sig, np.r_[x, y])*y, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] np.testing.assert_almost_equal( xbar, mu[0], 5, "multivariate_t_density has wrong mean") np.testing.assert_almost_equal( ybar, mu[1], 5, "multivariate_t_density has wrong mean") # test covariance with warnings.catch_warnings(): warnings.simplefilter('ignore') Ixx = dblquad(lambda x, y: multivariate_t_density(nu, mu, Sig, np.r_[x, y])*(x-xbar)*(x-xbar), -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] Iyy = dblquad(lambda x, y: multivariate_t_density(nu, mu, Sig, np.r_[x, y])*(y-ybar)*(y-ybar), -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] Ixy = dblquad(lambda x, y: multivariate_t_density(nu, mu, Sig, np.r_[x, y])*(x-xbar)*(y-ybar), -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] cov = np.array([[Ixx, Ixy], [Ixy, Iyy]]) print cov print "----------" print nu/(nu-2.)*Sig np.testing.assert_almost_equal( cov, nu/(nu-2.)*Sig, 2, "multivariate_t_density has wrong covariance") # test that we can evaluate multiple probabilities in parallel xy1 = np.r_[0.0, 0.1] xy2 = np.r_[0.2, 0.3] pr1 = [multivariate_t_density(nu, mu, Sig, xy1), multivariate_t_density(nu, mu, Sig, xy2)] xys = np.vstack([xy1, xy2]) pr2 = multivariate_t_density(nu, mu, Sig, xys) np.testing.assert_array_almost_equal(pr1, pr2, 15, "multivariate_t_density does not vectorize correctly") # And a harder, higher dimensional case... xys = np.arange(24.0).reshape(4, 3, 2) prs = multivariate_t_density(nu, mu, Sig, xys) assert prs.shape == (4, 3) for (i, j), pr in np.ndenumerate(prs): np.testing.assert_array_almost_equal( pr, multivariate_t_density(nu, mu, Sig, xys[i, j]), 15, "multivariate_t_density does not vectorize correctly") if __name__ == "__main__": from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--full', action='store_true', help="Run full test suite (slow).") args = parser.parse_args() test_scaled_IX_density() test_t_density() test_multivariate_t_density(args.full)
{ "content_hash": "2cae6da123e489a3b278154538dd1c66", "timestamp": "", "source": "github", "line_count": 134, "max_line_length": 109, "avg_line_length": 40.57462686567164, "alnum_prop": 0.5885598675740298, "repo_name": "jmeyers314/DPMM", "id": "037b4138519dca72cf0332d174fb2053795d0d7a", "size": "5437", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_density.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Jupyter Notebook", "bytes": "566496" }, { "name": "Python", "bytes": "113928" } ], "symlink_target": "" }
""" ELEVATOR ANIMATED This sample code simulates an elevator using salabim's capabilities, including animation. The user can set some values on the animation screen: number of floors, number of elevator cabins, capacity of the cabins (persons) and the number of visitors requesting an elevator. The default values set in the code are as follows: Number of floors: topfloor = 15 Number of elevator cabins: ncars = 3 Capacity of each cabin: capacity = 4 Number of visitors requesting a lift: From level 0 to level n: load_0_n = 50 From level n to level n: load_n_n = 100 From level n to level 0: load_n_0 = 100 """ from __future__ import print_function # compatibility with Python 2.x from __future__ import division # compatibility with Python 2.x import salabim as sim def do_animation(): # Animation initialisation is done in this function # In this example, the simulation code is completely separated from the animation code # Some global variables accesible from any classes and functions in the code global xvisitor_dim # x-dimension of the square representing a visitor global yvisitor_dim # y-dimension of the square representting a visitor global xcar # x-dimension of the elevator car/s global capacity_last, ncars_last, topfloor_last # Some general parameters about the simulation (See Reference: salabim.Environment) env.modelname("Elevator") env.speed(32) env.background_color("20%gray") if make_video: env.video("Elevator.mp4") # We assign values to some the global variables xvisitor_dim = 30 # x-dimension of the square representing a visitor yvisitor_dim = xvisitor_dim # y-dimension of the square representing a visitor yfloor0 = 20 # y-coordinate of floor 0 xcar = {} # This is a dictionary containing the x-coordinates of the cars xled = {} # This is a dictionary containing the x-coordinates of the leds x = env.width() # Width of the animation in screen coordinates (See Reference) # This is the width available to display all the elements on the screen # Now we assign x-coordinates, from the right to left of the screen for car in cars: # We assign x-coordinates to the elevator cars x -= (capacity + 1) * xvisitor_dim # Each car must contain visitors xcar[car] = x # We store the car x-coordinate in the dictionary x -= xvisitor_dim # Additional space (one square) xsign = x # Position of the text with the number of floor x -= xvisitor_dim / 2 # Additional space (half square) for direction in (up, down): # Position of the leds (red/green) x -= xvisitor_dim / 2 xled[direction] = x # We store the led x-coordinates in the dictionary x -= xvisitor_dim # Another square to the right xwait = x # Where to show the queues at different floors for floor in floors: # Components needed to display the floors y = yfloor0 + floor.n * yvisitor_dim # y-coordinate of the floors floor.y = y for direction in (up, down): # Led indicating the direction of the car if (direction == up and floor.n < topfloor) or (direction == down and floor.n > 0): b = xvisitor_dim / 4 # Dimension used to define the triangle animate_led = sim.AnimatePolygon( # See Reference AnimatePolygon spec=(-b, -b, b, -b, 0, b), # this is triangle x=xled[direction], y=y + 2 * b, angle=0 if direction == up else 180, # up points up, down points down fillcolor=direction_color(direction), # up is red, down is green visible=lambda arg, t: arg in requests, arg=(floor, direction), ) # if (floor, direction) in requests, show, otherwise do not show sim.AnimateLine(x=0, y=y, spec=(0, 0, xwait, 0)) # Horizontal lines for floors sim.AnimateText( x=xsign, y=y + yvisitor_dim / 2, text=str(floor.n), fontsize=xvisitor_dim / 2 ) # Text indicating the floor number sim.AnimateQueue(queue=floor.visitors, x=xwait - xvisitor_dim, y=floor.y, direction="w") , # The queue at each floor of people waiting for the elevator, build westward for car in cars: # Components needed to display the cars x = xcar[car] # A dictionary containing the x-coordinates of the cars car.pic = sim.AnimateRectangle( x=x, y=car.y, # Main rectangle representing the car spec=(0, 0, capacity * xvisitor_dim, yvisitor_dim), fillcolor="lightblue", linewidth=0, ) sim.AnimateQueue(queue=car.visitors, x=xcar[car], y=car.y, direction="e", arg=car) # note that both the rectangle and the queue have a dynamic y-coordinate that # is controlled by the car.y method # The following Animate elements are sliders, which allow controlling different variables # on the animation screen ncars_last = ncars sim.AnimateSlider( x=510, y=0, width=90, height=20, vmin=1, vmax=5, resolution=1, v=ncars, label="#elevators", action=set_ncars, xy_anchor="nw", ) topfloor_last = topfloor sim.AnimateSlider( x=610, y=0, width=90, height=20, vmin=5, vmax=20, resolution=1, v=topfloor, label="top floor", action=set_topfloor, xy_anchor="nw", ) capacity_last = capacity sim.AnimateSlider( x=710, y=0, width=90, height=20, vmin=2, vmax=6, resolution=1, v=capacity, label="capacity", action=set_capacity, xy_anchor="nw", ) sim.AnimateSlider( x=510, y=-50, width=90, height=25, vmin=0, vmax=400, resolution=25, v=load_0_n, label="Load 0->n", action=set_load_0_n, xy_anchor="nw", ) sim.AnimateSlider( x=610, y=-50, width=90, height=25, vmin=0, vmax=400, resolution=25, v=load_n_n, label="Load n->n", action=set_load_n_n, xy_anchor="nw", ) sim.AnimateSlider( x=710, y=-50, width=90, height=25, vmin=0, vmax=400, resolution=25, v=load_n_0, label="Load n->0", action=set_load_n_0, xy_anchor="nw", ) env.animate(True) # starts the animation def set_load_0_n(val): # Setter for numer of visitors from level 0 to level n global load_0_n load_0_n = float(val) if vg_0_n.ispassive(): # vg_0_n is a VisitorGenerator vg_0_n.activate() def set_load_n_n(val): # Setter for numer of visitors from level n to level n global load_n_n load_n_n = float(val) if vg_n_n.ispassive(): # vg_n_n is a VisitorGenerator vg_n_n.activate() def set_load_n_0(val): # Setter for number of visitors from level n to level 0 global load_n_0 load_n_0 = float(val) if vg_n_0.ispassive(): # vg_n_0 is a VisitorGenerator vg_n_0.activate() def set_capacity(val): # Setter for capacity of the elevator cabins global capacity global capacity_last capacity = int(val) if capacity != capacity_last: capacity_last = capacity env.main().activate() def set_ncars(val): # Setter for number of cars (cabins) global ncars global ncars_last ncars = int(val) if ncars != ncars_last: ncars_last = ncars env.main().activate() def set_topfloor(val): # Setter for number of floors global topfloor global topfloor_last topfloor = int(val) if topfloor != topfloor_last: topfloor_last = topfloor env.main().activate() def direction_color(direction): # Function to assign color of a visitor or led if direction == 1: return "red" if direction == -1: return "green" return "yellow" class VisitorGenerator(sim.Component): # Class inheriting from sim.Component def setup(self, from_, to, id): # Setup is a method of Component, can be overriden # It is called immediately after initialisation of a Component self.from_ = from_ self.to = to self.id = id # There are 3 types: 0_n, n_0, n_n def process(self): while True: # Infinite loop # Selects randomly the origin floor of that visitor fromfloor = floors[sim.IntUniform(self.from_[0], self.from_[1]).sample()] # Selects randomly the destination floor of that visitor while True: tofloor = floors[sim.IntUniform(self.to[0], self.to[1]).sample()] if fromfloor != tofloor: # The selection is valid if origin and destination are different break Visitor(fromfloor=fromfloor, tofloor=tofloor) # Generates an instance of Visitor if self.id == "0_n": load = load_0_n elif self.id == "n_0": load = load_n_0 else: load = load_n_n if load == 0: # If there is no load then passivate the VisitorGenerator yield self.passivate() else: iat = 3600 / load r = sim.Uniform(0.5, 1.5).sample() yield self.hold(r * iat) # Holds during interarrival time class Visitor(sim.Component): # Class inheriting from sim.Component def setup(self, fromfloor, tofloor): self.fromfloor = fromfloor self.tofloor = tofloor self.direction = getdirection(self.fromfloor, self.tofloor) # animation_objects defines how to display a component in AnimateQueue # This method is overriden def animation_objects(self, q): size_x = xvisitor_dim # how much to displace the next component in x-direction size_y = yvisitor_dim # how much to displace the next component in y-direction b = 0.1 * xvisitor_dim # Instances of Animate class: an0 = sim.AnimateRectangle( spec=(b, 2, xvisitor_dim - b, yvisitor_dim - b), linewidth=0, fillcolor=direction_color(self.direction), text=str(self.tofloor.n), fontsize=xvisitor_dim * 0.7, textcolor="white", ) return size_x, size_y, an0 def process(self): self.enter(self.fromfloor.visitors) # Visitor enters the queue at its floor floor # Visitor requests a trip between one origin floor and one direction # That trip is added to requests if nobody has requested it before if not (self.fromfloor, self.direction) in requests: requests[self.fromfloor, self.direction] = self.env.now() # the arrival of the first request is used for the decision process where a car should go to for car in cars: # Every passive car is activated if car.ispassive(): car.activate() # this is not a very efficient way, but it's simple ... yield self.passivate() # The visitor will wait until it has arrived at its tofloor class VisitorsInCar(sim.Queue): # A queue of visitors inside the car pass class Car(sim.Component): # Class that inherits from sim.Component def setup(self): # Setup is a method of Component, can be overriden self.capacity = capacity # Capacity (visitors) of the car self.direction = still # Direction can be still, up or down self.floor = floors[0] # Stores the floor where the car is positioned, start at ground level self.visitors = VisitorsInCar() # Queue of visitors in the car def y(self, t): # This is used by the animation to define the level of a car # When the car is in mode 'Move' the level (y) will be varying over time if self.mode() == "Move": y = sim.interpolate(t, self.mode_time(), self.scheduled_time(), self.floor.y, self.nextfloor.y) # linear interpolation between self.floor.y and self.next_floor.y based on # the time it left self.floor.y (i.e. self.mode_time() and # the time it arrives at self.next_floor.y (i.e. self.scheduled_time()) else: y = self.floor.y return y def process(self): dooropen = False # Local variable controlling the state of the door self.floor = floors[0] # Car initiates at floor 0 self.direction = still # Car initiates as still while True: if self.direction == still: # If car is still and no requests then passivate and mode Idle if not requests: yield self.passivate(mode="Idle") if self.count_to_floor(self.floor) > 0: # If there are visitors inside the car for this floor then open door yield self.hold(dooropen_time, mode="Door open") dooropen = True for visitor in self.visitors: # A loop to allow all visitors for this floor to leave if visitor.tofloor == self.floor: visitor.leave(self.visitors) visitor.activate() # end of the visitor yield self.hold(exit_time, mode="Let exit") if self.direction == still: self.direction = up # just random for self.direction in (self.direction, -self.direction): # A loop to allow visitors going in the specific direction (up/down) # enter the car. if (self.floor, self.direction) in requests: del requests[self.floor, self.direction] # We, initialy, delete that job from requests if not dooropen: yield self.hold(dooropen_time, mode="Door open") dooropen = True for visitor in self.floor.visitors: if visitor.direction == self.direction: # If visitor goes in that direction then allow him to enter if len(self.visitors) < self.capacity: visitor.leave(self.floor.visitors) # Leaves the queue at that floor visitor.enter(self.visitors) # Enters the queue inside the car yield self.hold(enter_time, mode="Let in") if self.floor.count_in_direction(self.direction) > 0: # If there are still visitors going up/down in that floors # then add the request to the list of requests if not (self.floor, self.direction) in requests: requests[self.floor, self.direction] = self.env.now() if self.visitors: break else: if requests: # If we still have requests, which is the earliest? earliest = sim.inf for (floor, direction) in requests: if requests[floor, direction] < earliest: self.direction = getdirection(self.floor, floor) earliest = requests[floor, direction] # find the earliest request else: self.direction = still if dooropen: yield self.hold(doorclose_time, mode="Door close") dooropen = False if self.direction != still: # Finally, the car is moving up or down to the next floor self.nextfloor = floors[self.floor.n + self.direction] yield self.hold(move_time, mode="Move") # the mode_time is used in the animation self.floor = self.nextfloor def count_to_floor(self, tofloor): # Function to count the number of visitors inside the car for that floor n = 0 for visitor in self.visitors: if visitor.tofloor == tofloor: n += 1 return n class Visitors(sim.Queue): # A class to define a queue of visitors pass class Floor: # A class defining the floors with a method to count its visitors def __init__(self): self.visitors = Visitors() self.n = self.visitors.sequence_number() def count_in_direction(self, dir): n = 0 for visitor in self.visitors: if visitor.direction == dir: n += 1 return n def getdirection(fromfloor, tofloor): # A function to calculate the direction up or down if fromfloor.n < tofloor.n: return +1 if fromfloor.n > tofloor.n: return -1 return 0 up = 1 # Direction of move up still = 0 # Car is still down = -1 # Direction of move down move_time = 10 # Time for the car to move one floor up or down dooropen_time = 3 # Time required to open doors doorclose_time = 3 # Time required to close doors enter_time = 3 # Time required to let visitors enter the car exit_time = 3 # Time required to let visitors leave the car load_0_n = 50 # Initial number of visitors per hour willing to go from level 0 to level n load_n_n = 100 # Initial number of visitors per hour willing to go from level n to level n load_n_0 = 100 # Initial number of visitors per hour willing to go from level n to level 0 # These parameters are used in the class VisitorGenerator capacity = 4 # Inital capacity (persons) of the car (elevator cabin) ncars = 3 # Initial number of cars topfloor = 15 # Initial top floor to be reached by elevator while True: # The simulation (re)initialization in done here # The code is inside a while loop to reinitialize when a capacity, number of floors or number of cars # have been changed with the slider env = sim.Environment(trace=False) vg_0_n = VisitorGenerator(from_=(0, 0), to=(1, topfloor), id="0_n", name="vg_0_n") vg_n_0 = VisitorGenerator(from_=(1, topfloor), to=(0, 0), id="n_0", name="vg_n_0") vg_n_n = VisitorGenerator(from_=(1, topfloor), to=(1, topfloor), id="n_n", name="vg_n_n") requests = {} floors = [ Floor() for _ in range(topfloor + 1) ] # create the required number of floors and put them in the floors list cars = [Car() for _ in range(ncars)] # create the required number of cars and put them in the cars list make_video = False do_animation() if make_video: env.run(1000) break # if we make a video, only 1000 seconds are simulated else: env.run() env.animation_parameters(animate=False)
{ "content_hash": "550dc9a6df18b13fda0d20dcacf735ef", "timestamp": "", "source": "github", "line_count": 492, "max_line_length": 108, "avg_line_length": 38.63414634146341, "alnum_prop": 0.5959069865319865, "repo_name": "salabim/salabim", "id": "e5555aae5c38e90b991e4112648db3ef7e1641f2", "size": "19008", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sample models/Elevator animated commented.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1139317" } ], "symlink_target": "" }
import importlib import re import json import networkx as nx import numpy as np import networkx.algorithms.dag as nx_dag import collections from .tools import flatten_all, nested_update, group_by_function from .tools import is_iterable, is_mappable, is_numpy_iterable from . import tools from .class_lookup import is_storage_iterable, is_storage_mappable from .proxy import GenericLazyLoader from .uuids import ( has_uuid, get_uuid, set_uuid, encode_uuid, decode_uuid, encoded_uuid_re, is_uuid_string ) # UUID recognition and encoding ##################################### # Things in here might be modified for performance optimization. In # particular, it might be worth using a string representation of the UUID # whenever possible (dicts with string keys have a special fast-path) import sys if sys.version_info > (3, ): unicode = str long = int # Getting the list of UUIDs bsed on initial objets ################### def caches_contain(key, cache_list): for cache in cache_list: if key in cache: return True return False def filter_known_uuids(uuid_dict, cache_list): """Filters out UUIDs in the cache_list, returning what isn't cached""" return {uuid: value for (uuid, value) in uuid_dict.items() if not caches_contain(uuid, cache_list)} def unique_objects(object_list): found_uuids = set([]) return_objects = [] for obj in object_list: if has_uuid(obj): uuid = get_uuid(obj) if uuid not in found_uuids: found_uuids.update({uuid}) return_objects.append(obj) elif is_storage_mappable(obj) or is_storage_iterable(obj): return_objects.append(obj) return return_objects # this does not appear to be used # def may_contain_uuids(obj): # return (has_uuid(obj) or is_storage_mappable(obj) # or is_storage_iterable(obj)) def default_find_uuids(obj, cache_list): """Default method for finding new UUIDs in an object. Recursive. Parameters ---------- obj : Any the object to query for UUIDs cache_list : List[Mapping] caches that may contain existing UUIDs Returns ------- uuids : Dict[UUID, Any] mapping of UUID to object for any UUID-containing objects found new_objects : List[Any] Non-UUID container objects (iterables, mappings) that may contain futher UUIDs. Includes the dict from ``obj.to_dict()`` if ``obj`` has a UUID. """ uuids = {} new_objects = [] obj_uuid = get_uuid(obj) if has_uuid(obj) else None # filter known uuids: skip processing if known if caches_contain(obj_uuid, [uuids] + cache_list): return uuids, new_objects # UUID objects if obj_uuid: # print repr(obj) # print obj.to_dict().keys() uuids.update({obj_uuid: obj}) new_objects.extend(obj.to_dict().values()) # mappables and iterables if is_storage_mappable(obj): new_objects.extend(o for o in obj.keys() if has_uuid(o)) new_objects.extend(obj.values()) elif is_storage_iterable(obj): new_objects.extend(obj) return uuids, new_objects # NOTE: this needs find everything, including if the iterable/mapping has a # UUID, find that and things under it def get_all_uuids(initial_object, known_uuids=None, class_info=None): """Find all UUID objects (to be stored) This searches through an initial object, finding *all* nested objects (including those in lists and dictionaries) that have UUIDs. Parameters ---------- initial_object : object with UUID the object to search within known_uuids : dict of {uuid: object} objects that can be excluded from the search tree, presumably because they have already been searched and any object beneath them in the search tree also also already known class_info : :class:`.SerializationSchema` Returns ------- dict of {uuid: object} objects found in the search """ known_uuids = tools.none_to_default(known_uuids, {}) objects = [initial_object] uuids = {} # found_objs = collections.Counter() while objects: new_objects = [] objects = unique_objects(objects) # print objects # found_objs += collections.Counter(o.__class__.__name__) # for o in objects) for obj in objects: # TODO: this might be slow; check performance if isinstance(obj, GenericLazyLoader): obj = obj.load() # TODO: find a way to ensure that objects doesn't go over # duplicates here; see lprofile of default_find_uuids to see how # often abort due to being in cache in there, and whether we # should move the skip in here instead (how expensive is # info_from_instance?) info = class_info.info_from_instance(obj) \ if class_info else None if info and info.find_uuids is not None: find_uuids = info.find_uuids else: find_uuids = default_find_uuids new_uuids, new_objs = find_uuids(obj=obj, cache_list=[uuids, known_uuids]) uuids.update(new_uuids) new_objects.extend(new_objs) objects = new_objects # print(found_objs) return uuids class SchemaFindUUIDs(object): def __init__(self, schema_entries): self.schema_entries = [ (attr, attr_type) for (attr, attr_type) in schema_entries if attr_type in ['uuid', 'lazy', 'list_uuid'] ] def __call__(self, obj, cache_list): uuids = {get_uuid(obj): obj} new_objects = [] for (attr, attr_type) in self.schema_entries: attr_obj = getattr(obj, attr) if attr_type in ['uuid', 'lazy']: new_objects.append(attr_obj) elif attr_type == 'list_uuid': new_objects.extend(attr_obj) return uuids, new_objects # NOTE: this only need to find until the first UUID: iterables/mapping with # UUIDs aren't necessary here def replace_uuid(obj, uuid_encoding): """Return storage-ready replacements for values in dict representation This is used by first creating the dict representation of an object by calling ``obj.to_dict()``. The resulting dict can be the ``obj`` parameter of ``replace_uuid``. This algorithm is implemented recursively, so nested structures will call it again to create the correct nested locations of any UUID objects. Note that this does not go into the internal structure of any objects with UUIDs (such as UUID objects that are also iterable or mappable). The purpose here is to transform to the dict representation with UUIDs in the place of objects. Parameters ---------- obj : object input; replace with UUID if is has one, or search for nested structures uuid_encoding : callable function that maps a UUID to a version that can be identified from the (JSON) serialized string representation of the object. Returns ------- object input object with UUID objects replaced by the encoded form, leaving structure (of dicts, lists, etc) and all other objects the same """ # this is UUID => string replacement = obj # fast exit for string keys if tools.is_string(obj): return replacement if has_uuid(obj): replacement = uuid_encoding(get_uuid(obj)) elif is_mappable(obj): replacement = { replace_uuid(k, uuid_encoding): replace_uuid(v, uuid_encoding) for (k, v) in replacement.items() } elif is_storage_iterable(obj): replace_type = type(obj) replacement = replace_type([replace_uuid(o, uuid_encoding) for o in obj]) return replacement def to_dict_with_uuids(obj): dct = obj.to_dict() return replace_uuid(dct, uuid_encoding=encode_uuid) # this seems to not yet be obsolete, although I'm not sure why not -- it is # used a few places, but I think those places will be removed # (serialization.py) ... in principle, I think the custom json should be # used for this def to_bare_json(obj): replaced = replace_uuid(obj, uuid_encoding=encode_uuid) return json.dumps(replaced) # this should be made obsolete by custom_json stuff def to_json_obj(obj): dct = to_dict_with_uuids(obj) dct.update({'__module__': obj.__class__.__module__, '__class__': obj.__class__.__name__}) return json.dumps(dct) def do_import (module, thing): # TODO: this needs some error-checking mod = importlib.import_module(module) result = getattr(mod, thing) return result import_class = do_import # old name that was used def search_caches(key, cache_list, raise_error=True): """Find UUID if it is in the cache_list dicts Parameters ---------- key : str the UUID we're looking for cache_list : mapping or list of mapping caches that the objects are stored in (will be searched in order of the list). Mapping is {uuid: object} raise_error : bool whether to raise a KeyError if UUID not found; default True. If False, object not found returns None Returns ------- object or None the object with the given UUID, or ``None`` if the object is not found and ``raise_error`` is ``False``. """ if key is None: return None # some objects allow UUID to be None if not isinstance(cache_list, list): cache_list = [cache_list] obj = None for cache in cache_list: if key in cache: obj = cache[key] break if obj is None and raise_error: raise KeyError("Missing key: " + str(key)) return obj def from_dict_with_uuids(obj, cache_list): """Replace encoded UUIDs with the actual objects. This is used to replace UUIDs as encoding for storage with actual objects, to complete reconstruction of the dict representation. This method can be seen as the inverse process of :meth:`.replace_uuid`, an must be done before using the ``cls.from_dict()`` to properly instantiate the object. All input objects for the object being reconstructed must already be in the ``cache_list``. This means that the order is very important; that is controlled by :meth:`.get_reload_order`. Parameters ---------- obj : object object to reconstruct; replace UUID strings with the actual objects cache_list : mapping or list of mapping existing objects, keyed by their UUIDs Returns ------- object input object with UUID strings replaced by the actual objects """ replacement = obj if is_uuid_string(obj): # raises KeyError if object hasn't been visited # (indicates problem in DAG reconstruction) uuid = decode_uuid(obj) replacement = search_caches(uuid, cache_list) elif tools.is_string(obj): # fast exit for string keys return obj elif is_mappable(obj): replacement = {from_dict_with_uuids(k, cache_list): \ from_dict_with_uuids(v, cache_list) for (k, v) in obj.items()} elif is_storage_iterable(obj): replace_type = type(obj) replacement = replace_type([from_dict_with_uuids(o, cache_list) for o in obj]) return replacement def from_json_obj(uuid, table_row, cache_list): # TODO: OBSOLETE?! I think this has been replaced by custom_json # NOTE: from_json only works with existing_uuids (DAG-ordering) dct = json.loads(table_row['json']) cls = import_class(dct.pop('__module__'), dct.pop('__class__')) dct = from_dict_with_uuids(dct, cache_list) obj = cls.from_dict(dct) set_uuid(obj, uuid) return obj def _uuids_from_table_row(table_row, schema_entries, allow_lazy=True): """Gather UUIDs from a table row (as provided by storage). This organizes the UUIDS that are included in the table row based on information from that row. It separated objects to be proxied ('lazy') from objects to be directly loaded ('uuid', 'list_uuid', 'json_obj'). It also create the dependency dictionary (the entry for this row) that will be used to create the reconstruction DAG. This is for internal use; not to be part of the API. Parameters ---------- table_row : object must have attributes as defined by ``schema_entries``, plus a ``uuid`` attribute. Typically comes directly from the backend. schema_entries : list of 2-tuple the pairs of (attribute_name, attribute_type) describing the columns from the ``table_row``. Should match the schema entry for the table that the table row comes from. Returns ------- uuid : list list of UUIDs to be fully loaded lazy : set set of UUIDs to a lazy-loaded (i.e., as proxy) dependencies : dict length 1 dict mapping the input row's UUID to all UUIDs that it directly depends on (i.e., everything from ``uuid`` and ``lazy``). """ # take the schema entries here, not the whole schema uuid = set([]) lazy = set([]) if allow_lazy else uuid for (attr, attr_type) in schema_entries: if attr_type == 'uuid': uuid.add(getattr(table_row, attr)) elif attr_type == 'list_uuid': # TODO: better to use encoded_uuid_re here? uuid_list = json.loads(getattr(table_row, attr)) if uuid_list: # skip if None (or empty) uuid_list = {decode_uuid(u) for u in uuid_list} uuid.update(uuid_list) elif attr_type == 'json_obj': json_dct = getattr(table_row, attr) new_uuids = set(encoded_uuid_re.findall(json_dct)) uuid.update(new_uuids) elif attr_type == 'lazy': lazy.add(getattr(table_row, attr)) # other cases aren't UUIDs and are ignored if lazy is uuid: lazy = set([]) # remove all cases of None as a UUID to depend on # TODO: should None be in the UUID list even? # TODO: can we return the set here? dependencies = {table_row.uuid: (uuid | lazy) - {None}} return (list(uuid), lazy, dependencies) def get_all_uuids_loading(uuid_list, backend, schema, existing_uuids=None, allow_lazy=True): """Get all information to reload from UUIDs. This is the main function for identifying objects to reload from storage. It returns the table rows to load (sorted by table), the UUIDs of objects to lazy-load (sorted by table), and the dictionary of dependencies, which can be used to create the reconstruction DAG. Parameters ---------- uuid_list : Iterable[str] iterable of UUIDs backend : :class:`.Backend` schema : Dict existing_uuids : Mapping[str, Any] maps UUID to the relevant object Returns ------- to_load : List list of table rows lazy : Set[str] set of lazy object UUIDs dependencies : Dict[str, List[str]] dependency mapping; maps UUID of an object to a list of the UUIDs it depends on uuid_to_table : Dict[str, str] mapping of UUID to the name of the table that it is stored in """ if existing_uuids is None: existing_uuids = {} known_uuids = set(existing_uuids) uuid_to_table = {} all_table_rows = [] lazy = set([]) dependencies = {} while uuid_list: new_uuids = {uuid for uuid in uuid_list if uuid not in known_uuids} uuid_rows = backend.load_uuids_table(new_uuids) new_table_rows = backend.load_table_data(uuid_rows) uuid_to_table.update({r.uuid: backend.uuid_row_to_table_name(r) for r in uuid_rows}) uuid_list = [] for row in new_table_rows: entries = schema[uuid_to_table[row.uuid]] loc_uuid, loc_lazy, deps = _uuids_from_table_row( table_row=row, schema_entries=entries, allow_lazy=allow_lazy ) uuid_list += loc_uuid lazy.update(loc_lazy) dependencies.update(deps) all_table_rows += new_table_rows known_uuids |= new_uuids uuid_list = {uuid for uuid in uuid_list if uuid not in known_uuids} return (all_table_rows, lazy, dependencies, uuid_to_table) def dependency_dag(dependent_uuids, dag=None): """Create a DAG from the dependencies Parameters ---------- dependent_uuids: dict dictionary mapping UUID keys to set of UUID values, where the key-value pairs are edges of the dependency graph dag: networkx.DiGraph partially created DAG (optional) Returns ------- networkx.DiGraph DAG to recreate the input objects """ if dag is None: dag = nx.DiGraph() for from_node, to_nodes in dependent_uuids.items(): if to_nodes: dag.add_edges_from([(from_node, to_node) for to_node in to_nodes]) if not nx_dag.is_directed_acyclic_graph(dag): # pragma: no cover raise RuntimeError("Reconstruction DAG not acyclic?!?!") return dag def dag_reload_order(dag): return list(reversed(list(nx_dag.topological_sort(dag)))) def get_reload_order(to_load, dependencies): dag = dependency_dag(dependencies) no_deps = {row.uuid for row in to_load} no_deps.difference_update(set(dag.nodes)) ordered_uuids = list(no_deps) + dag_reload_order(dag) return ordered_uuids
{ "content_hash": "256a342cde0b4b87b3fd6c822873fd31", "timestamp": "", "source": "github", "line_count": 513, "max_line_length": 77, "avg_line_length": 34.96101364522417, "alnum_prop": 0.6225815444661277, "repo_name": "dwhswenson/openpathsampling", "id": "419a53d6d3d8571514cc7a698544fad6b2cc11ea", "size": "17935", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "openpathsampling/experimental/simstore/serialization_helpers.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1167" }, { "name": "CSS", "bytes": "2687" }, { "name": "HTML", "bytes": "81" }, { "name": "Makefile", "bytes": "64" }, { "name": "Python", "bytes": "2693398" }, { "name": "Shell", "bytes": "8655" } ], "symlink_target": "" }
""" Example of Minimum Component Filtering -------------------------------------- Figure 10.13 A minimum component filter applied to the spectrum of a white dwarf from SDSS data set (mjd= 52199, plate=659, fiber=381). The upper panel shows a portion of the input spectrum, along with the continuum computed via the minimum component filtering procedure described in Section 10.2.5 (see figure 10.12). The lower panel shows the PSD for both the input spectrum and the filtered result. """ # Author: Jake VanderPlas # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt from astroML.fourier import PSD_continuous from astroML.datasets import fetch_sdss_spectrum from astroML.filters import min_component_filter #---------------------------------------------------------------------- # This function adjusts matplotlib settings for a uniform feel in the textbook. # Note that with usetex=True, fonts are rendered with LaTeX. This may # result in an error if LaTeX is not installed on your system. In that case, # you can set usetex to False. from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=True) #------------------------------------------------------------ # Fetch the spectrum from SDSS database & pre-process plate = 659 mjd = 52199 fiber = 381 data = fetch_sdss_spectrum(plate, mjd, fiber) lam = data.wavelength() spec = data.spectrum # wavelengths are logorithmically spaced: we'll work in log(lam) loglam = np.log10(lam) flag = (lam > 4000) & (lam < 5000) lam = lam[flag] loglam = loglam[flag] spec = spec[flag] lam = lam[:-1] loglam = loglam[:-1] spec = spec[:-1] #---------------------------------------------------------------------- # Mask-out significant features and compute filtered version feature_mask = (((lam > 4080) & (lam < 4130)) | ((lam > 4315) & (lam < 4370)) | ((lam > 4830) & (lam < 4900))) spec_filtered = min_component_filter(loglam, spec, feature_mask, fcut=100) #------------------------------------------------------------ # Compute PSD of filtered and unfiltered versions f, spec_filt_PSD = PSD_continuous(loglam, spec_filtered) f, spec_PSD = PSD_continuous(loglam, spec) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(5, 3.75)) fig.subplots_adjust(hspace=0.25) # Top panel: plot noisy and smoothed spectrum ax = fig.add_subplot(211) ax.plot(lam, spec, '-', c='gray', lw=1) ax.plot(lam, spec_filtered, '-k') ax.text(0.97, 0.93, "SDSS white dwarf\n %i-%i-%i" % (mjd, plate, fiber), ha='right', va='top', transform=ax.transAxes) ax.set_ylim(25, 110) ax.set_xlabel(r'$\lambda\ {\rm (\AA)}$') ax.set_ylabel('flux') # Bottom panel: plot noisy and smoothed PSD ax = fig.add_subplot(212, yscale='log') ax.plot(f, spec_PSD, '-', c='gray', lw=1) ax.plot(f, spec_filt_PSD, '-k') ax.set_xlabel(r'$f$') ax.set_ylabel('$PSD(f)$') ax.set_xlim(0, 2000) plt.show()
{ "content_hash": "bde34a8393f9145d628614876a265cfd", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 79, "avg_line_length": 33.255102040816325, "alnum_prop": 0.6287204664007364, "repo_name": "kcavagnolo/astroML", "id": "7c4d49bf51ced0659d64db445cd1ea1051f06306", "size": "3259", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "book_figures/chapter10/fig_mincomp.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Makefile", "bytes": "696" }, { "name": "Python", "bytes": "1087103" } ], "symlink_target": "" }
from flask import Blueprint, render_template, request, session from flask.ext.login import login_required from websterton.user.models import User import json blueprint = Blueprint("user", __name__, url_prefix='/users', static_folder="../static") @blueprint.route("/") @login_required def members(): return render_template("users/members.html") @blueprint.route("/settings/") @login_required def settings(): user = load_user(session['user_id']) reddits = json.loads(user.monitored_reddits) print type(reddits) return render_template("users/settings.html", reddits=reddits) def load_user(id): return User.get_by_id(int(id))
{ "content_hash": "f8cec843755c00fdca17faecf87f6189", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 63, "avg_line_length": 25.53846153846154, "alnum_prop": 0.7108433734939759, "repo_name": "RylanGotto/web-dash", "id": "d9895cb647ab08945c736eea000a7ff8ec452831", "size": "688", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "websterton/user/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "17182" }, { "name": "HTML", "bytes": "134145" }, { "name": "JavaScript", "bytes": "315447" }, { "name": "Python", "bytes": "39341" } ], "symlink_target": "" }
import os import re import csv import glob import pandas as pd from datetime import datetime def merge_subdirectories(input_folder_path, output_filename): """ Cleans and processes multiple .csvs Creates new merged .csv file folder_path: string of top-level file folder with subfolders output_filename: string name for merged .csv file output_filename is placed one directory above the input_folder_path """ current_dir = os.getcwd() os.chdir(current_dir+"/"+input_folder_path) folder_path = os.getcwd() os.chdir('../') output_folder_path = os.getcwd() try: os.remove(output_filename) except OSError: pass filewriter = csv.writer(open(output_filename, 'wb')) file_counter = 0 for folder in os.listdir(folder_path): data_path = (folder_path+"/"+folder+"/"+"api_requests_csv") os.chdir(data_path) for input_file in glob.glob(os.path.join(data_path, '*.csv')): with open(input_file, 'rU') as csv_file: filereader = csv.reader(csv_file) if file_counter < 1: for row in filereader: filewriter.writerow(row) else: header = next(filereader, None) for row in filereader: filewriter.writerow(row) file_counter += 1 print("finished %s folder at %s" % (folder, datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) print("saved %s in %s" % (output_filename, output_folder_path)) os.chdir(current_dir) def process_facebook_data(df): df = df[["state", "name", "ages_ranges", "genders", "behavior", "audience"]] gender_map = {'1.0': 'female', '2.0': 'male', '0.0': 'total population'} age_map = {'13': 'ages_13_65', '15': 'ages_15_19', '20': 'ages_20_24', '25': 'ages_25_29', '30': 'ages_30_34', '35': 'ages_35_39', '40': 'ages_40_44', '45': 'ages_45_49', '50': 'ages_50_54', '55': 'ages_55_59', '60': 'ages_60_65'} df["behavior"] = df.loc[:, ("behavior")].apply(lambda x: re.findall(r'\d+',x)) df["behavior"] = df.loc[:, ("behavior")].apply(lambda x: ''.join(x)) df["genders"] = df.loc[:, ("genders")].astype('string') df["genders"] = df.loc[:, ("genders")].replace(gender_map) df["ages_ranges"] = df["ages_ranges"].apply(lambda x: age_map[x[-3:-1]]) df["genders"] = df.loc[:, ("genders")].astype('string') df["genders"] = df.loc[:, ("genders")].replace(gender_map) df = df[["state", "name", "ages_ranges", "genders", "behavior", "audience"]] return(df)
{ "content_hash": "9b3c4be337d22d842b131ac50b365540", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 82, "avg_line_length": 41.43283582089552, "alnum_prop": 0.5399855907780979, "repo_name": "CSDE-UW/paa_2017_social_media", "id": "54c6142c8b7bec8ca98e69923abc1449c889947c", "size": "2776", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Estimate_Facebook_Audience/notebooks/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "347640" }, { "name": "Jupyter Notebook", "bytes": "69672" }, { "name": "Python", "bytes": "43576" }, { "name": "R", "bytes": "33255" }, { "name": "Shell", "bytes": "1291" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_commoner_tatooine_aqualish_male_01.iff" result.attribute_template_id = 9 result.stfName("npc_name","aqualish_base_male") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "311244c882ec376207ccfd64db8063c1", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 88, "avg_line_length": 25, "alnum_prop": 0.7076923076923077, "repo_name": "anhstudios/swganh", "id": "c99f5a5acaa16e07e409928c449d83889d4de2e6", "size": "470", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "data/scripts/templates/object/mobile/shared_dressed_commoner_tatooine_aqualish_male_01.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11887" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2357839" }, { "name": "CMake", "bytes": "41264" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7503510" }, { "name": "SQLPL", "bytes": "42770" } ], "symlink_target": "" }
import requests import os import sys import json class SalecontrolClient(object): def __init__(self, cfg_name): self.cfg = json.loads(open(cfg_name).read()) self.auth = (self.cfg['username'], self.cfg['password']) self.url = self.cfg['url'] self.post_headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'} def get(self, path): r = requests.get(self.url + path, auth=self.auth) if r.status_code != 200: print("GET error %d: %s" % (r.status_code, path)) return None return r.text def put(self, path, data): r = requests.put(self.url + path, auth=self.auth, data=data, headers=self.post_headers) if r.status_code != 200: print("PUT error %d: %s" % (r.status_code, path)) return None return r.text def post(self, path, data): print(self.url + path) r = requests.post(self.url + path, auth=self.auth, data=data, headers=self.post_headers) if r.status_code != 200: print("POST error %d: %s" % (r.status_code, path)) return None return r.text def delete(self, path, data): r = requests.delete(self.url + path, auth=self.auth) if r.status_code != 200: print("DELETE error %d: %s" % (r.status_code, path)) return None return r.text # Client paths def list_clients(self): result = self.get('/clients') if not result: return "" for client in json.loads(result): print("Nome: %s, saldo: %f" % (client['name'], client['balance'])) def get_clients(self): results = self.get('/clients') clients = {} for result in json.loads(results): clients[result['name']] = result return clients def add_client(self, name, balance): params = {"name": name, "balance": balance} result = self.post('/clients', params); if not result: return False else: return True def update_client_balance(self, client_id, balance): params = {"balance": balance} result = self.put('/clients/' + client_id, params); if not result: return False else: return True def client_payment(self, client_id, payment): params = {"payment": payment} result = self.put('/clients/' + client_id + '/payment', params); if not result: return False else: return True def client_log(self, client_id): products = self.get_products() product_names = {} for k, v in products.items(): product_names[v['_id']] = v['name'] results = self.get('/clients/' + client_id + '/log') entries = [] for result in json.loads(results): if result['type'] == 'sell': result['product'] = product_names[result['product_id']] entries.append(result) return entries # Product paths def add_product(self, name, price, quantity, picture=""): params = {"name": name, "price": price, "picture": picture, "quantity": quantity} result = self.post('/products', params); if not result: return False else: return True def list_products(self): result = self.get('/products') if not result: return "" for product in json.loads(result): print("Nome: %s, preço: %f, quantidade: %d" % \ (product['name'], product['price'], product['quantity'])) def get_products(self): results = self.get('/products') products = {} for result in json.loads(results): products[result['name']] = result return products def delete_product(self, product_id): params = {} result = self.delete('/products/' + product_id, params) if not result: return False else: return True def sell(self, client_id, product_id, quantity=1): params = {"client_id": client_id, "product_id": product_id, "quantity": quantity} result = self.post('/sell', params) if not result: return False else: return True if __name__ == "__main__": sc = SalecontrolClient(sys.argv[1]) clients = sc.get_clients() sc.list_clients();
{ "content_hash": "f304d6a62135e8500fbbde80e08e74b6", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 105, "avg_line_length": 32.52857142857143, "alnum_prop": 0.5368906455862977, "repo_name": "baraujo/salecontrol", "id": "8f98dedf4508501109e4a64dbdc6f8e1d0d17303", "size": "4579", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "client/salecontrol_client.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "15888" }, { "name": "Python", "bytes": "4579" } ], "symlink_target": "" }
__author__ = 'greg' import aggregation import cPickle as pickle import os.path class CondorTools(aggregation.ClassificationTools): def __init__(self): aggregation.ClassificationTools.__init__(self,scale=1.875) def __classification_to_markings__(self,classification): annotations = classification["annotations"] # in which case, the user should not have made any markings try: marking_location = [ann.keys() for ann in annotations].index(["marks"]) except ValueError: return [] marks_list = annotations[marking_location].values()[0].values() return marks_list class CondorAggregation(aggregation.Aggregation): def __init__(self, to_skip=[]): #["carcassOrScale", "carcass", "other", ""] aggregation.Aggregation.__init__(self, "condor", "2015-01-22",tools=CondorTools(), to_skip=to_skip) def __get_gold_subjects__(self): subjects = [] for classification in self.classification_collection.find({"user_name":"wreness"}): zooniverse_id = classification["subjects"][0]["zooniverse_id"] state = self.subject_collection.find_one({"zooniverse_id":zooniverse_id})["state"] if state == "complete": subjects.append(zooniverse_id) return subjects def __load_gold_standard__(self,zooniverse_id): # have we already encountered this subject? if os.path.isfile("/Users/greg/Databases/condor/"+zooniverse_id+"_gold.pickle"): self.gold_data[zooniverse_id] = pickle.load(open("/Users/greg/Databases/condor/"+zooniverse_id+"_gold.pickle","rb")) else: annotations = self.classification_collection.find_one({"subjects.zooniverse_id":zooniverse_id,"user_name":"wreness"})["annotations"] self.gold_data[zooniverse_id] = [] #were there any markings? for ann in annotations: if "marks" in ann: for marks in ann["marks"].values(): marks["x"] = 1.875*float(marks["x"]) marks["y"] = 1.875*float(marks["y"]) self.gold_data[zooniverse_id].append(marks) pickle.dump(self.gold_data[zooniverse_id],open("/Users/greg/Databases/condor/"+zooniverse_id+"_gold.pickle","wb")) def __readin_subject__(self,zooniverse_id): aggregation.Aggregation.__readin_subject__(self,zooniverse_id,users_to_skip=["wreness"]) def __load_roi__(self,zooniverse_id): # the actual markings might be scaled down but since, for condor watch, we want to include every point # this should be fine return (1920,1080)
{ "content_hash": "ccd3cd7fe4fd40f00ba2e98ab68f2b3c", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 144, "avg_line_length": 40.35820895522388, "alnum_prop": 0.6187130177514792, "repo_name": "camallen/aggregation", "id": "8a2a7981905bd79ea8672d13e472e54bd2a53361", "size": "2704", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "experimental/paper/condorAggregation.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "723" }, { "name": "Python", "bytes": "1676640" }, { "name": "Scala", "bytes": "629" }, { "name": "Shell", "bytes": "95" } ], "symlink_target": "" }
""" ietf_system This module contains a collection of YANG definitions for the configuration and identification of some common system properties within a device containing a NETCONF server. This includes data node definitions for system identification, time\-of\-day management, user management, DNS resolver configuration, and some protocol operations for system management. Copyright (c) 2014 IETF Trust and the persons identified as authors of the code. All rights reserved. Redistribution and use in source and binary forms, with or without modification, is permitted pursuant to, and subject to the license terms contained in, the Simplified BSD License set forth in Section 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents (http\://trustee.ietf.org/license\-info). This version of this YANG module is part of RFC 7317; see the RFC itself for full legal notices. """ import re import collections from enum import Enum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk.errors import YPYError, YPYModelError class AuthenticationMethodIdentity(object): """ Base identity for user authentication methods. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): pass @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['AuthenticationMethodIdentity']['meta_info'] class RadiusAuthenticationTypeIdentity(object): """ Base identity for RADIUS authentication types. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): pass @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['RadiusAuthenticationTypeIdentity']['meta_info'] class System(object): """ System group configuration. .. attribute:: authentication The authentication configuration subtree **type**\: :py:class:`Authentication <ydk.models.ietf.ietf_system.System.Authentication>` .. attribute:: clock Configuration of the system date and time properties **type**\: :py:class:`Clock <ydk.models.ietf.ietf_system.System.Clock>` .. attribute:: contact The administrator contact information for the system. A server implementation MAY map this leaf to the sysContact MIB object. Such an implementation needs to use some mechanism to handle the differences in size and characters allowed between this leaf and sysContact. The definition of such a mechanism is outside the scope of this document **type**\: str .. attribute:: dns_resolver Configuration of the DNS resolver **type**\: :py:class:`DnsResolver <ydk.models.ietf.ietf_system.System.DnsResolver>` .. attribute:: hostname The name of the host. This name can be a single domain label or the fully qualified domain name of the host **type**\: str **pattern:** ((([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.)\*([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.?)\|\\. .. attribute:: location The system location. A server implementation MAY map this leaf to the sysLocation MIB object. Such an implementation needs to use some mechanism to handle the differences in size and characters allowed between this leaf and sysLocation. The definition of such a mechanism is outside the scope of this document **type**\: str .. attribute:: ntp Configuration of the NTP client **type**\: :py:class:`Ntp <ydk.models.ietf.ietf_system.System.Ntp>` **presence node**\: True .. attribute:: radius Configuration of the RADIUS client **type**\: :py:class:`Radius <ydk.models.ietf.ietf_system.System.Radius>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.authentication = System.Authentication() self.authentication.parent = self self.clock = System.Clock() self.clock.parent = self self.contact = None self.dns_resolver = System.DnsResolver() self.dns_resolver.parent = self self.hostname = None self.location = None self.ntp = None self.radius = System.Radius() self.radius.parent = self class Clock(object): """ Configuration of the system date and time properties. .. attribute:: timezone_name The TZ database name to use for the system, such as 'Europe/Stockholm' **type**\: str .. attribute:: timezone_utc_offset The number of minutes to add to UTC time to identify the time zone for this system. For example, 'UTC \- 8\:00 hours' would be represented as '\-480'. Note that automatic daylight saving time adjustment is not provided if this object is used **type**\: int **range:** \-1500..1500 **units**\: minutes """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.timezone_name = None self.timezone_utc_offset = None @property def _common_path(self): return '/ietf-system:system/ietf-system:clock' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.timezone_name is not None: return True if self.timezone_utc_offset is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Clock']['meta_info'] class Ntp(object): """ Configuration of the NTP client. .. attribute:: enabled Indicates that the system should attempt to synchronize the system clock with an NTP server from the 'ntp/server' list **type**\: bool **default value**\: true .. attribute:: server List of NTP servers to use for system clock synchronization. If '/system/ntp/enabled' is 'true', then the system will attempt to contact and utilize the specified NTP servers **type**\: list of :py:class:`Server <ydk.models.ietf.ietf_system.System.Ntp.Server>` .. attribute:: _is_presence Is present if this instance represents presence container else not **type**\: bool This class is a :ref:`presence class<presence-class>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self._is_presence = True self.enabled = None self.server = YList() self.server.parent = self self.server.name = 'server' class Server(object): """ List of NTP servers to use for system clock synchronization. If '/system/ntp/enabled' is 'true', then the system will attempt to contact and utilize the specified NTP servers. .. attribute:: name <key> An arbitrary name for the NTP server **type**\: str .. attribute:: association_type The desired association type for this NTP server **type**\: :py:class:`AssociationTypeEnum <ydk.models.ietf.ietf_system.System.Ntp.Server.AssociationTypeEnum>` **default value**\: server .. attribute:: iburst Indicates whether this server should enable burst synchronization or not **type**\: bool **default value**\: false .. attribute:: prefer Indicates whether this server should be preferred or not **type**\: bool **default value**\: false .. attribute:: udp Contains UDP\-specific configuration parameters for NTP **type**\: :py:class:`Udp <ydk.models.ietf.ietf_system.System.Ntp.Server.Udp>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.name = None self.association_type = None self.iburst = None self.prefer = None self.udp = System.Ntp.Server.Udp() self.udp.parent = self class AssociationTypeEnum(Enum): """ AssociationTypeEnum The desired association type for this NTP server. .. data:: server = 0 Use client association mode. This device will not provide synchronization to the configured NTP server. .. data:: peer = 1 Use symmetric active association mode. This device may provide synchronization to the configured NTP server. .. data:: pool = 2 Use client association mode with one or more of the NTP servers found by DNS resolution of the domain name given by the 'address' leaf. This device will not provide synchronization to the servers. """ server = 0 peer = 1 pool = 2 @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Ntp.Server.AssociationTypeEnum'] class Udp(object): """ Contains UDP\-specific configuration parameters for NTP. .. attribute:: address The address of the NTP server **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? **mandatory**\: True ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? **mandatory**\: True ---- ---- **type**\: str **pattern:** ((([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.)\*([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.?)\|\\. **mandatory**\: True ---- .. attribute:: port The port number of the NTP server **type**\: int **range:** 0..65535 **default value**\: 123 """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.address = None self.port = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/ietf-system:udp' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.address is not None: return True if self.port is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Ntp.Server.Udp']['meta_info'] @property def _common_path(self): if self.name is None: raise YPYModelError('Key property name is None') return '/ietf-system:system/ietf-system:ntp/ietf-system:server[ietf-system:name = ' + str(self.name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.name is not None: return True if self.association_type is not None: return True if self.iburst is not None: return True if self.prefer is not None: return True if self.udp is not None and self.udp._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Ntp.Server']['meta_info'] @property def _common_path(self): return '/ietf-system:system/ietf-system:ntp' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self._is_presence: return True if self.enabled is not None: return True if self.server is not None: for child_ref in self.server: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Ntp']['meta_info'] class DnsResolver(object): """ Configuration of the DNS resolver. .. attribute:: options Resolver options. The set of available options has been limited to those that are generally available across different resolver implementations and generally useful **type**\: :py:class:`Options <ydk.models.ietf.ietf_system.System.DnsResolver.Options>` .. attribute:: search An ordered list of domains to search when resolving a host name **type**\: list of str **pattern:** ((([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.)\*([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.?)\|\\. .. attribute:: server List of the DNS servers that the resolver should query. When the resolver is invoked by a calling application, it sends the query to the first name server in this list. If no response has been received within 'timeout' seconds, the resolver continues with the next server in the list. If no response is received from any server, the resolver continues with the first server again. When the resolver has traversed the list 'attempts' times without receiving any response, it gives up and returns an error to the calling application. Implementations MAY limit the number of entries in this list **type**\: list of :py:class:`Server <ydk.models.ietf.ietf_system.System.DnsResolver.Server>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.options = System.DnsResolver.Options() self.options.parent = self self.search = YLeafList() self.search.parent = self self.search.name = 'search' self.server = YList() self.server.parent = self self.server.name = 'server' class Server(object): """ List of the DNS servers that the resolver should query. When the resolver is invoked by a calling application, it sends the query to the first name server in this list. If no response has been received within 'timeout' seconds, the resolver continues with the next server in the list. If no response is received from any server, the resolver continues with the first server again. When the resolver has traversed the list 'attempts' times without receiving any response, it gives up and returns an error to the calling application. Implementations MAY limit the number of entries in this list. .. attribute:: name <key> An arbitrary name for the DNS server **type**\: str .. attribute:: udp_and_tcp Contains UDP\- and TCP\-specific configuration parameters for DNS **type**\: :py:class:`UdpAndTcp <ydk.models.ietf.ietf_system.System.DnsResolver.Server.UdpAndTcp>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.name = None self.udp_and_tcp = System.DnsResolver.Server.UdpAndTcp() self.udp_and_tcp.parent = self class UdpAndTcp(object): """ Contains UDP\- and TCP\-specific configuration parameters for DNS. .. attribute:: address The address of the DNS server **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? **mandatory**\: True ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? **mandatory**\: True ---- .. attribute:: port The UDP and TCP port number of the DNS server **type**\: int **range:** 0..65535 **default value**\: 53 """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.address = None self.port = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/ietf-system:udp-and-tcp' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.address is not None: return True if self.port is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.DnsResolver.Server.UdpAndTcp']['meta_info'] @property def _common_path(self): if self.name is None: raise YPYModelError('Key property name is None') return '/ietf-system:system/ietf-system:dns-resolver/ietf-system:server[ietf-system:name = ' + str(self.name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.name is not None: return True if self.udp_and_tcp is not None and self.udp_and_tcp._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.DnsResolver.Server']['meta_info'] class Options(object): """ Resolver options. The set of available options has been limited to those that are generally available across different resolver implementations and generally useful. .. attribute:: attempts The number of times the resolver will send a query to all of its name servers before giving up and returning an error to the calling application **type**\: int **range:** 1..255 **default value**\: 2 .. attribute:: timeout The amount of time the resolver will wait for a response from each remote name server before retrying the query via a different name server **type**\: int **range:** 1..255 **units**\: seconds **default value**\: 5 """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.attempts = None self.timeout = None @property def _common_path(self): return '/ietf-system:system/ietf-system:dns-resolver/ietf-system:options' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.attempts is not None: return True if self.timeout is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.DnsResolver.Options']['meta_info'] @property def _common_path(self): return '/ietf-system:system/ietf-system:dns-resolver' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.options is not None and self.options._has_data(): return True if self.search is not None: for child in self.search: if child is not None: return True if self.server is not None: for child_ref in self.server: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.DnsResolver']['meta_info'] class Radius(object): """ Configuration of the RADIUS client. .. attribute:: options RADIUS client options **type**\: :py:class:`Options <ydk.models.ietf.ietf_system.System.Radius.Options>` .. attribute:: server List of RADIUS servers used by the device. When the RADIUS client is invoked by a calling application, it sends the query to the first server in this list. If no response has been received within 'timeout' seconds, the client continues with the next server in the list. If no response is received from any server, the client continues with the first server again. When the client has traversed the list 'attempts' times without receiving any response, it gives up and returns an error to the calling application **type**\: list of :py:class:`Server <ydk.models.ietf.ietf_system.System.Radius.Server>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.options = System.Radius.Options() self.options.parent = self self.server = YList() self.server.parent = self self.server.name = 'server' class Server(object): """ List of RADIUS servers used by the device. When the RADIUS client is invoked by a calling application, it sends the query to the first server in this list. If no response has been received within 'timeout' seconds, the client continues with the next server in the list. If no response is received from any server, the client continues with the first server again. When the client has traversed the list 'attempts' times without receiving any response, it gives up and returns an error to the calling application. .. attribute:: name <key> An arbitrary name for the RADIUS server **type**\: str .. attribute:: authentication_type The authentication type requested from the RADIUS server **type**\: :py:class:`RadiusAuthenticationTypeIdentity <ydk.models.ietf.ietf_system.RadiusAuthenticationTypeIdentity>` **default value**\: radius-pap .. attribute:: udp Contains UDP\-specific configuration parameters for RADIUS **type**\: :py:class:`Udp <ydk.models.ietf.ietf_system.System.Radius.Server.Udp>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.name = None self.authentication_type = None self.udp = System.Radius.Server.Udp() self.udp.parent = self class Udp(object): """ Contains UDP\-specific configuration parameters for RADIUS. .. attribute:: address The address of the RADIUS server **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? **mandatory**\: True ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? **mandatory**\: True ---- ---- **type**\: str **pattern:** ((([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.)\*([a\-zA\-Z0\-9\_]([a\-zA\-Z0\-9\\\-\_]){0,61})?[a\-zA\-Z0\-9]\\.?)\|\\. **mandatory**\: True ---- .. attribute:: authentication_port The port number of the RADIUS server **type**\: int **range:** 0..65535 **default value**\: 1812 .. attribute:: shared_secret The shared secret, which is known to both the RADIUS client and server **type**\: str **mandatory**\: True """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.address = None self.authentication_port = None self.shared_secret = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/ietf-system:udp' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.address is not None: return True if self.authentication_port is not None: return True if self.shared_secret is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Radius.Server.Udp']['meta_info'] @property def _common_path(self): if self.name is None: raise YPYModelError('Key property name is None') return '/ietf-system:system/ietf-system:radius/ietf-system:server[ietf-system:name = ' + str(self.name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.name is not None: return True if self.authentication_type is not None: return True if self.udp is not None and self.udp._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Radius.Server']['meta_info'] class Options(object): """ RADIUS client options. .. attribute:: attempts The number of times the device will send a query to all of its RADIUS servers before giving up **type**\: int **range:** 1..255 **default value**\: 2 .. attribute:: timeout The number of seconds the device will wait for a response from each RADIUS server before trying with a different server **type**\: int **range:** 1..255 **units**\: seconds **default value**\: 5 """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.attempts = None self.timeout = None @property def _common_path(self): return '/ietf-system:system/ietf-system:radius/ietf-system:options' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.attempts is not None: return True if self.timeout is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Radius.Options']['meta_info'] @property def _common_path(self): return '/ietf-system:system/ietf-system:radius' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.options is not None and self.options._has_data(): return True if self.server is not None: for child_ref in self.server: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Radius']['meta_info'] class Authentication(object): """ The authentication configuration subtree. .. attribute:: user The list of local users configured on this device **type**\: list of :py:class:`User <ydk.models.ietf.ietf_system.System.Authentication.User>` .. attribute:: user_authentication_order When the device authenticates a user with a password, it tries the authentication methods in this leaf\-list in order. If authentication with one method fails, the next method is used. If no method succeeds, the user is denied access. An empty user\-authentication\-order leaf\-list still allows authentication of users using mechanisms that do not involve a password. If the 'radius\-authentication' feature is advertised by the NETCONF server, the 'radius' identity can be added to this list. If the 'local\-users' feature is advertised by the NETCONF server, the 'local\-users' identity can be added to this list **type**\: list of """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.user = YList() self.user.parent = self self.user.name = 'user' self.user_authentication_order = YLeafList() self.user_authentication_order.parent = self self.user_authentication_order.name = 'user_authentication_order' class User(object): """ The list of local users configured on this device. .. attribute:: name <key> The user name string identifying this entry **type**\: str .. attribute:: authorized_key A list of public SSH keys for this user. These keys are allowed for SSH authentication, as described in RFC 4253 **type**\: list of :py:class:`AuthorizedKey <ydk.models.ietf.ietf_system.System.Authentication.User.AuthorizedKey>` .. attribute:: password The password for this entry **type**\: str **pattern:** $0$.\*\|$1$[a\-zA\-Z0\-9./]{1,8}$[a\-zA\-Z0\-9./]{22}\|$5$(rounds=\\d+$)?[a\-zA\-Z0\-9./]{1,16}$[a\-zA\-Z0\-9./]{43}\|$6$(rounds=\\d+$)?[a\-zA\-Z0\-9./]{1,16}$[a\-zA\-Z0\-9./]{86} """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.name = None self.authorized_key = YList() self.authorized_key.parent = self self.authorized_key.name = 'authorized_key' self.password = None class AuthorizedKey(object): """ A list of public SSH keys for this user. These keys are allowed for SSH authentication, as described in RFC 4253. .. attribute:: name <key> An arbitrary name for the SSH key **type**\: str .. attribute:: algorithm The public key algorithm name for this SSH key. Valid values are the values in the IANA 'Secure Shell (SSH) Protocol Parameters' registry, Public Key Algorithm Names **type**\: str **mandatory**\: True .. attribute:: key_data The binary public key data for this SSH key, as specified by RFC 4253, Section 6.6, i.e.\: string certificate or public key format identifier byte[n] key/certificate data **type**\: str **mandatory**\: True """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.name = None self.algorithm = None self.key_data = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.name is None: raise YPYModelError('Key property name is None') return self.parent._common_path +'/ietf-system:authorized-key[ietf-system:name = ' + str(self.name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.name is not None: return True if self.algorithm is not None: return True if self.key_data is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Authentication.User.AuthorizedKey']['meta_info'] @property def _common_path(self): if self.name is None: raise YPYModelError('Key property name is None') return '/ietf-system:system/ietf-system:authentication/ietf-system:user[ietf-system:name = ' + str(self.name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.name is not None: return True if self.authorized_key is not None: for child_ref in self.authorized_key: if child_ref._has_data(): return True if self.password is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Authentication.User']['meta_info'] @property def _common_path(self): return '/ietf-system:system/ietf-system:authentication' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.user is not None: for child_ref in self.user: if child_ref._has_data(): return True if self.user_authentication_order is not None: for child_ref in self.user_authentication_order: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System.Authentication']['meta_info'] @property def _common_path(self): return '/ietf-system:system' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.authentication is not None and self.authentication._has_data(): return True if self.clock is not None and self.clock._has_data(): return True if self.contact is not None: return True if self.dns_resolver is not None and self.dns_resolver._has_data(): return True if self.hostname is not None: return True if self.location is not None: return True if self.ntp is not None and self.ntp._has_data(): return True if self.radius is not None and self.radius._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['System']['meta_info'] class SystemState(object): """ System group operational state. .. attribute:: clock Monitoring of the system date and time properties **type**\: :py:class:`Clock <ydk.models.ietf.ietf_system.SystemState.Clock>` .. attribute:: platform Contains vendor\-specific information for identifying the system platform and operating system **type**\: :py:class:`Platform <ydk.models.ietf.ietf_system.SystemState.Platform>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.clock = SystemState.Clock() self.clock.parent = self self.platform = SystemState.Platform() self.platform.parent = self class Platform(object): """ Contains vendor\-specific information for identifying the system platform and operating system. .. attribute:: machine A vendor\-specific identifier string representing the hardware in use **type**\: str .. attribute:: os_name The name of the operating system in use \- for example, 'Linux' **type**\: str .. attribute:: os_release The current release level of the operating system in use. This string MAY indicate the OS source code revision **type**\: str .. attribute:: os_version The current version level of the operating system in use. This string MAY indicate the specific OS build date and target variant information **type**\: str """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.machine = None self.os_name = None self.os_release = None self.os_version = None @property def _common_path(self): return '/ietf-system:system-state/ietf-system:platform' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.machine is not None: return True if self.os_name is not None: return True if self.os_release is not None: return True if self.os_version is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['SystemState.Platform']['meta_info'] class Clock(object): """ Monitoring of the system date and time properties. .. attribute:: boot_datetime The system date and time when the system last restarted **type**\: str **pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2}) .. attribute:: current_datetime The current system date and time **type**\: str **pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2}) """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.boot_datetime = None self.current_datetime = None @property def _common_path(self): return '/ietf-system:system-state/ietf-system:clock' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.boot_datetime is not None: return True if self.current_datetime is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['SystemState.Clock']['meta_info'] @property def _common_path(self): return '/ietf-system:system-state' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.clock is not None and self.clock._has_data(): return True if self.platform is not None and self.platform._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['SystemState']['meta_info'] class SetCurrentDatetimeRpc(object): """ Set the /system\-state/clock/current\-datetime leaf to the specified value. If the system is using NTP (i.e., /system/ntp/enabled is set to 'true'), then this operation will fail with error\-tag 'operation\-failed' and error\-app\-tag value of 'ntp\-active'. .. attribute:: input **type**\: :py:class:`Input <ydk.models.ietf.ietf_system.SetCurrentDatetimeRpc.Input>` """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.input = SetCurrentDatetimeRpc.Input() self.input.parent = self self.is_rpc = True class Input(object): """ .. attribute:: current_datetime The current system date and time **type**\: str **pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2}) **mandatory**\: True """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.parent = None self.current_datetime = None @property def _common_path(self): return '/ietf-system:set-current-datetime/ietf-system:input' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' if self.parent is None: raise YPYError('Parent reference is needed to determine if entity has configuration data') return self.parent.is_config() def _has_data(self): if self.current_datetime is not None: return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['SetCurrentDatetimeRpc.Input']['meta_info'] @property def _common_path(self): return '/ietf-system:set-current-datetime' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.input is not None and self.input._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['SetCurrentDatetimeRpc']['meta_info'] class SystemRestartRpc(object): """ Request that the entire system be restarted immediately. A server SHOULD send an rpc reply to the client before restarting the system. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.is_rpc = True @property def _common_path(self): return '/ietf-system:system-restart' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['SystemRestartRpc']['meta_info'] class SystemShutdownRpc(object): """ Request that the entire system be shut down immediately. A server SHOULD send an rpc reply to the client before shutting down the system. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): self.is_rpc = True @property def _common_path(self): return '/ietf-system:system-shutdown' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): return False @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['SystemShutdownRpc']['meta_info'] class LocalUsersIdentity(AuthenticationMethodIdentity): """ Indicates password\-based authentication of locally configured users. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): AuthenticationMethodIdentity.__init__(self) @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['LocalUsersIdentity']['meta_info'] class RadiusPapIdentity(RadiusAuthenticationTypeIdentity): """ The device requests Password Authentication Protocol (PAP) authentication from the RADIUS server. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): RadiusAuthenticationTypeIdentity.__init__(self) @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['RadiusPapIdentity']['meta_info'] class RadiusChapIdentity(RadiusAuthenticationTypeIdentity): """ The device requests Challenge Handshake Authentication Protocol (CHAP) authentication from the RADIUS server. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): RadiusAuthenticationTypeIdentity.__init__(self) @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['RadiusChapIdentity']['meta_info'] class RadiusIdentity(AuthenticationMethodIdentity): """ Indicates user authentication using RADIUS. """ _prefix = 'sys' _revision = '2014-08-06' def __init__(self): AuthenticationMethodIdentity.__init__(self) @staticmethod def _meta_info(): from ydk.models.ietf._meta import _ietf_system as meta return meta._meta_table['RadiusIdentity']['meta_info']
{ "content_hash": "5e79b69d013afcf90aee8d8b6c80bd62", "timestamp": "", "source": "github", "line_count": 1695, "max_line_length": 626, "avg_line_length": 31.811209439528024, "alnum_prop": 0.5109792284866469, "repo_name": "111pontes/ydk-py", "id": "f7eadec0c9d92b417ed6af09829fedccbda4eada", "size": "53920", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ietf/ydk/models/ietf/ietf_system.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "7226" }, { "name": "Python", "bytes": "446117948" } ], "symlink_target": "" }
""" Created on Nov 30, 2015 @author: jakob """ from django.dispatch.dispatcher import receiver from cms.models.titlemodels import Title from cms.signals import post_publish, post_unpublish from .signals import add_to_index, remove_from_index @receiver(post_publish, dispatch_uid='publish_cms_page') def publish_cms_page(sender, instance, language, **kwargs): title = instance.publisher_public.get_title_obj(language) add_to_index.send(sender=Title, instance=title, object_action='publish') @receiver(post_unpublish, dispatch_uid='unpublish_cms_page') def unpublish_cms_page(sender, instance, language, **kwargs): title = instance.publisher_public.get_title_obj(language) remove_from_index.send(sender=Title, instance=title, object_action='unpublish')
{ "content_hash": "44e2cd4187bc48c51c871e7fd5f71040", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 83, "avg_line_length": 35.13636363636363, "alnum_prop": 0.7645536869340233, "repo_name": "aldryn/aldryn-search", "id": "1e7e54b23e74acddd251d23466b508dd09660546", "size": "797", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "aldryn_search/receivers.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "2279" }, { "name": "Python", "bytes": "49472" }, { "name": "Shell", "bytes": "88" } ], "symlink_target": "" }
import logging import posixpath import traceback from data_source import DataSource from docs_server_utils import FormatKey from extensions_paths import ( ARTICLES_TEMPLATES, INTROS_TEMPLATES, PRIVATE_TEMPLATES) from file_system import FileNotFoundError from future import All from path_util import AssertIsDirectory class TemplateDataSource(DataSource): '''Provides a DataSource interface for compiled templates. ''' def __init__(self, server_instance, request=None): self._dir = type(self)._BASE AssertIsDirectory(self._dir) self._request = request self._template_cache = server_instance.compiled_fs_factory.ForTemplates( server_instance.host_file_system_provider.GetTrunk()) self._file_system = server_instance.host_file_system_provider.GetTrunk() def get(self, path): try: return self._template_cache.GetFromFile('%s%s' % (self._dir, FormatKey(path))).Get() except FileNotFoundError: logging.warning(traceback.format_exc()) return None def Cron(self): futures = [] for root, _, files in self._file_system.Walk(self._dir): futures += [self._template_cache.GetFromFile( posixpath.join(self._dir, root, FormatKey(f))) for f in files if posixpath.splitext(f)[1] == '.html'] return All(futures) class ArticleDataSource(TemplateDataSource): '''Serves templates for Articles. ''' _BASE = ARTICLES_TEMPLATES class IntroDataSource(TemplateDataSource): '''Serves templates for Intros. ''' _BASE = INTROS_TEMPLATES class PartialDataSource(TemplateDataSource): '''Serves templates for private templates. ''' _BASE = PRIVATE_TEMPLATES
{ "content_hash": "3b548629422c5606836a34bad6d0ea76", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 76, "avg_line_length": 29.46551724137931, "alnum_prop": 0.6974839087185488, "repo_name": "xin3liang/platform_external_chromium_org", "id": "c7ba8069151f635bf3daa5ba28b6e81d52a2e780", "size": "1876", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "chrome/common/extensions/docs/server2/template_data_source.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AppleScript", "bytes": "6973" }, { "name": "Assembly", "bytes": "53530" }, { "name": "Awk", "bytes": "7721" }, { "name": "C", "bytes": "35959480" }, { "name": "C++", "bytes": "223698188" }, { "name": "CSS", "bytes": "973752" }, { "name": "Java", "bytes": "6804114" }, { "name": "JavaScript", "bytes": "12537619" }, { "name": "Mercury", "bytes": "9480" }, { "name": "Objective-C", "bytes": "880662" }, { "name": "Objective-C++", "bytes": "7094479" }, { "name": "PHP", "bytes": "61320" }, { "name": "Perl", "bytes": "644436" }, { "name": "Python", "bytes": "10027001" }, { "name": "Shell", "bytes": "1313358" }, { "name": "Standard ML", "bytes": "4131" }, { "name": "Tcl", "bytes": "277091" }, { "name": "XSLT", "bytes": "12410" }, { "name": "nesC", "bytes": "15206" } ], "symlink_target": "" }