text
stringlengths
4
1.02M
meta
dict
from PIL import Image, ImageFilter import json import os import random def average(num1,num2): # average of two numbers # note we truncate before adding, not after return avg def take_Average(px1,px2,pixels,width,height): # For every pixel in the smaller dimensions # Get current RGB values # Average them r = average(r1,r2) g = average(g1,g2) b = average(b1,b2) # set current pixel value to the averages # return result return pixels def process_ImgFiles(fn1,fn2): # load the two images # including current pixel values # get the size of each image # Note that a problem you need to address is what happens if your # two images are not the same size. Here we take the simple approach # that we only average in the smaller of each of the dimensions. # You can think about how else you might solve this problem and # implement your own approach if you like. # only operate on the smaller dimensions width = min(width1,width2) height = min(height1,height2) # Create a new Image with the minimum dimensions # This is what we will use to store the average im = Image.new('RGB', (width,height), "white") avgpixels = im.load() # Get the average from the two images you just loaded avgpixels = take_Average(pixels1,pixels2,avgpixels,width,height) # Return the average image up to do_compute return im def do_compute(): #required for web app # Get a list of all the filenames in the resources (res) directory filenames = # Check that they are images # Check that they are not already averaged (note the naming convention for average images on line 86) # Prepend the directory (res) on to the front of the filename # Make sure the output here is also a list img_filenames = # Initialize a list of the output data you'll be creating outData = # Create five separate results, each of which should select a random pair of images and show those two images and the averaged result. # Loop through five times # Pick two files at random to average # Make sure that the second filename is not the same as the first fn1 = fn2 = # Create a new filename that you'll use to save the output # If the two files you chose were named "1.jpg" and "2.jpg" # Your resulting filename should follow the format: # "1_2avg.jpg" new_fn = # Process the two images associated with the random filenames chosen, including finding the average. Return an image with the average values. im = process_ImgFiles(fn1,fn2) # Save the image with the average values to the filename you just created # Build list of dictionary(s) containing origal images and new # Filename 1 should have key fn1 # Filename 2 should have key fn2 # The averaged image should have key avg outData.append() # output the data in a format readable by the workbook with open('res/data.json','w') as outfile: json.dump(outData, outfile, indent=4, ensure_ascii=False)
{ "content_hash": "8b537a035ff6d8928bb6169bcab12339", "timestamp": "", "source": "github", "line_count": 105, "max_line_length": 149, "avg_line_length": 31.34285714285714, "alnum_prop": 0.650865998176846, "repo_name": "CS205IL-sp15/workbook", "id": "2993aed01505946571fbf23ea11099ebb7b514b1", "size": "3325", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lab_Averages/py/compute.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "291" }, { "name": "HTML", "bytes": "27022" }, { "name": "Handlebars", "bytes": "522" }, { "name": "JavaScript", "bytes": "116411" }, { "name": "Python", "bytes": "58061" } ], "symlink_target": "" }
""" Django settings for nativx_survey project. Generated by 'django-admin startproject' using Django 1.10a1. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '@*bl9s$d1ok^c+@pe3x#_noh8&#&1l-l@h6y(vb1cm^*8mh(yw' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = [ '*', # 'nativx-survey.us-east-1.elasticbeanstalk.com/', # 'ec2-54-209-106-225.compute-1.amazonaws.com', # 'nativx-survey.us-east-1.elasticbeanstalk.com/', # '127.0.0.1:8000/', # '162.254.154.180/' ] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'nativx_survey.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'nativx_survey.wsgi.application' # Database # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static-root') STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), '/var/www/static/', )
{ "content_hash": "7774213b141f7a9e6cd736789e50ed56", "timestamp": "", "source": "github", "line_count": 135, "max_line_length": 91, "avg_line_length": 26.044444444444444, "alnum_prop": 0.6806029579067122, "repo_name": "DLance96/nativx-survey", "id": "b729e315353f13e0a1ba216ff096066a1a31a2c3", "size": "3516", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nativx_survey/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "293" }, { "name": "HTML", "bytes": "8378" }, { "name": "JavaScript", "bytes": "484" }, { "name": "Python", "bytes": "17521" } ], "symlink_target": "" }
from distutils.core import setup setup( name='django-availability', version='0.1.0', author='dashavoo', author_email='', packages=['availability'], url='https://github.com/dashavoo/django-availability', license='LICENSE.txt', description='Manage availability of objects in Django', long_description=open('README.md').read(), zip_safe = False, include_package_data=True, )
{ "content_hash": "f82cbf68af5d3ca6affd78535bdb3334", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 59, "avg_line_length": 27.8, "alnum_prop": 0.6738609112709832, "repo_name": "dashavoo/django-availability", "id": "b8d91fb3065c181adb0c4ce16773467946276c52", "size": "417", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "4861" } ], "symlink_target": "" }
"""Unit tests XACML Context handler. This PIP presents a SAML interface for its Policy Enforcement Point and has a SAML interface to query a remote attribute authority for attributes """ __author__ = "P J Kershaw" __date__ = "13/08/10" __copyright__ = "(C) 2010 Science and Technology Facilities Council" __license__ = "BSD - see LICENSE file in top-level directory" __contact__ = "Philip.Kershaw@stfc.ac.uk" __revision__ = '$Id$' import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) from os import path import unittest from ConfigParser import SafeConfigParser from ndg.security.test.unit.base import BaseTestCase from ndg.security.server.xacml.ctx_handler.saml_ctx_handler import SamlCtxHandler class SamlCtxHandlerTestCase(BaseTestCase): """Test XACML Context handler. This PIP presents a SAML interface for its Policy Enforcement Point and has a SAML interface to query a remote attribute authority for attributes """ THIS_DIR = path.abspath(path.dirname(__file__)) CONFIG_FILENAME = 'saml_ctx_handler.cfg' CONFIG_FILEPATH = path.join(THIS_DIR, CONFIG_FILENAME) def test01Init(self): handler = SamlCtxHandler() self.assert_(handler) def test02InitFromConfigFile(self): # Initialise from settings in a config file handler = SamlCtxHandler.fromConfig(self.__class__.CONFIG_FILEPATH) self.assert_(handler) self.assert_(handler.policyFilePath) def test03InitFromKeywords(self): # Initialise from a dictionary # Populate by reading from the config file cfg = SafeConfigParser(defaults={'here': self.__class__.THIS_DIR}) cfg.optionxform = str cfg.read(self.__class__.CONFIG_FILEPATH) kw = dict(cfg.items('DEFAULT')) handler = SamlCtxHandler.fromKeywords(**kw) self.assert_(handler) self.assert_(handler.pip.attributeQuery) self.assert_(handler.policyFilePath) self.assert_(handler.issuerName) self.assert_(handler.issuerFormat) self.assert_(handler.assertionLifetime) self.assert_(handler.xacmlExtFunc) if __name__ == "__main__": unittest.main()
{ "content_hash": "26955d8e94f19eb732b198d75af1f09c", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 81, "avg_line_length": 36.37096774193548, "alnum_prop": 0.6811529933481153, "repo_name": "philipkershaw/ndg_security_server", "id": "7070c7bf847d52ce24acd69b1ef9ae2759323c8f", "size": "2255", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ndg/security/server/test/unit/authz/xacml/test_saml_ctx_handler.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "206589" }, { "name": "JavaScript", "bytes": "32078" }, { "name": "Python", "bytes": "861854" } ], "symlink_target": "" }
DATA_DIR='/tmp/report_data' # Make directory, if it doesn't exist.
{ "content_hash": "c2669dc7800ee2ba6484e53066e09184", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 38, "avg_line_length": 33.5, "alnum_prop": 0.7164179104477612, "repo_name": "jfalkner/report_data", "id": "268eef773386a1714c115673432e3aebce152559", "size": "67", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "report_data/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1589" }, { "name": "Python", "bytes": "77245" } ], "symlink_target": "" }
"""Tests for cement.core.setup.""" import os import sys import json import signal from time import sleep from cement.core import foundation, exc, backend, config, extension, plugin from cement.core.handler import CementBaseHandler from cement.core.controller import CementBaseController, expose from cement.core import log, output, hook, arg, controller from cement.core.interface import Interface from cement.utils import test from cement.core.exc import CaughtSignal from cement.utils.misc import init_defaults, rando, minimal_logger from nose.plugins.attrib import attr APP = rando()[:12] def my_extended_func(): return 'KAPLA' class DeprecatedApp(foundation.CementApp): class Meta: label = 'deprecated' defaults = None class HookTestException(Exception): pass class MyTestInterface(Interface): class IMeta: label = 'my_test_interface' class MyTestHandler(CementBaseHandler): class Meta: label = 'my_test_handler' interface = MyTestInterface class TestOutputHandler(output.CementOutputHandler): file_suffix = None class Meta: interface = output.IOutput label = 'test_output_handler' def _setup(self, config_obj): self.config = config_obj def render(self, data_dict, template=None): return None class BogusBaseController(controller.CementBaseController): class Meta: label = 'bad_base_controller_label' def my_hook_one(app): return 1 def my_hook_two(app): return 2 def my_hook_three(app): return 3 class FoundationTestCase(test.CementCoreTestCase): def setUp(self): super(FoundationTestCase, self).setUp() self.app = self.make_app('my_app') def test_argv_is_none(self): app = self.make_app(APP, argv=None) app.setup() self.eq(app.argv, list(sys.argv[1:])) def test_framework_logging_is_true(self): del os.environ['CEMENT_FRAMEWORK_LOGGING'] app = self.make_app(APP, argv=None, framework_logging=True) app.setup() self.eq(os.environ['CEMENT_FRAMEWORK_LOGGING'], '1') ml = minimal_logger(__name__) self.eq(ml.logging_is_enabled, True) def test_framework_logging_is_false(self): del os.environ['CEMENT_FRAMEWORK_LOGGING'] app = self.make_app(APP, argv=None, framework_logging=False) app.setup() self.eq(os.environ['CEMENT_FRAMEWORK_LOGGING'], '0') ml = minimal_logger(__name__) self.eq(ml.logging_is_enabled, False) # coverage... should default to True if no key in os.environ del os.environ['CEMENT_FRAMEWORK_LOGGING'] self.eq(ml.logging_is_enabled, True) def test_bootstrap(self): app = self.make_app('my_app', bootstrap='tests.bootstrap') app.setup() self.eq(app._loaded_bootstrap.__name__, 'tests.bootstrap') def test_reload_bootstrap(self): app = self.make_app('my_app', bootstrap='cement.utils.test') app._loaded_bootstrap = test app.setup() self.eq(app._loaded_bootstrap.__name__, 'cement.utils.test') def test_argv(self): app = self.make_app('my_app', argv=['bogus', 'args']) self.eq(app.argv, ['bogus', 'args']) @test.raises(exc.FrameworkError) def test_resolve_handler_bad_handler(self): class Bogus(object): pass try: self.app._resolve_handler('output', Bogus) except exc.FrameworkError as e: self.ok(e.msg.find('resolve')) raise def test_default(self): self.app.setup() self.app.run() def test_passed_handlers(self): from cement.ext import ext_configparser from cement.ext import ext_logging from cement.ext import ext_argparse from cement.ext import ext_plugin from cement.ext import ext_dummy # forces CementApp._resolve_handler to register the handler from cement.ext import ext_json app = self.make_app('my-app-test', config_handler=ext_configparser.ConfigParserConfigHandler, log_handler=ext_logging.LoggingLogHandler(), arg_handler=ext_argparse.ArgParseArgumentHandler(), extension_handler=extension.CementExtensionHandler(), plugin_handler=ext_plugin.CementPluginHandler(), output_handler=ext_json.JsonOutputHandler(), mail_handler=ext_dummy.DummyMailHandler(), argv=[__file__, '--debug'] ) app.setup() def test_debug(self): app = self.make_app('my-app-test', argv=[__file__]) app.setup() self.eq(app.debug, False) self.reset_backend() app = self.make_app('my-app-test', argv=[__file__, '--debug']) app.setup() self.eq(app.debug, True) self.reset_backend() defaults = init_defaults('my-app-test') defaults['my-app-test']['debug'] = True app = self.make_app('my-app-test', argv=[__file__], config_defaults=defaults) app.setup() self.eq(app.debug, True) def test_render(self): # Render with default self.app.setup() self.app.render(dict(foo='bar')) # Render with no output_handler... this is hackish, but there are # circumstances where app.output would be None. app = self.make_app('test', output_handler=None) app.setup() app.output = None app.render(dict(foo='bar')) def test_render_out_to_file(self): self.app = self.make_app(APP, extensions=['json'], output_handler='json') self.app.setup() self.app.run() f = open(self.tmp_file, 'w') self.app.render(dict(foo='bar'), out=f) f.close() f = open(self.tmp_file, 'r') data = json.load(f) f.close() self.eq(data, dict(foo='bar')) @test.raises(TypeError) def test_render_bad_out(self): self.app.setup() self.app.run() try: self.app.render(dict(foo='bar'), out='bogus type') except TypeError as e: self.eq(e.args[0], "Argument 'out' must be a 'file' like object") raise @test.raises(exc.FrameworkError) def test_bad_label(self): try: app = foundation.CementApp(None) except exc.FrameworkError as e: # FIX ME: verify error msg raise @test.raises(exc.FrameworkError) def test_bad_label_chars(self): try: app = foundation.CementApp('some!bogus()label') except exc.FrameworkError as e: self.ok(e.msg.find('alpha-numeric')) raise def test_add_arg_shortcut(self): self.app.setup() self.app.add_arg('--foo', action='store') def test_reset_output_handler(self): app = self.make_app('test', argv=[], output_handler=TestOutputHandler) app.setup() app.run() app.output = None app._meta.output_handler = None app._setup_output_handler() def test_lay_cement(self): app = self.make_app('test', argv=['--quiet']) def test_none_member(self): class Test(object): var = None self.app.setup() self.app.args.parsed_args = Test() try: self.app._parse_args() except SystemExit: pass @test.raises(exc.CaughtSignal) def test_cement_signal_handler(self): import signal import types global app app = self.make_app('test') frame = sys._getframe(0) try: foundation.cement_signal_handler(signal.SIGTERM, frame) except exc.CaughtSignal as e: self.eq(e.signum, signal.SIGTERM) self.ok(isinstance(e.frame, types.FrameType)) raise def test_cement_without_signals(self): app = self.make_app('test', catch_signals=None) app.setup() def test_extend(self): self.app.extend('kapla', my_extended_func) self.eq(self.app.kapla(), 'KAPLA') @test.raises(exc.FrameworkError) def test_extended_duplicate(self): self.app.extend('config', my_extended_func) def test_no_handler(self): app = self.make_app(APP) app._resolve_handler('cache', None, raise_error=False) def test_config_files_is_none(self): app = self.make_app(APP, config_files=None) app.setup() label = APP user_home = os.path.abspath(os.path.expanduser(os.environ['HOME'])) files = [ os.path.join('/', 'etc', label, '%s.conf' % label), os.path.join(user_home, '.%s.conf' % label), os.path.join(user_home, '.%s' % label, 'config'), ] for f in files: res = f in app._meta.config_files self.ok(res) @test.raises(exc.FrameworkError) def test_base_controller_label(self): app = self.make_app(APP, base_controller=BogusBaseController) app.setup() def test_pargs(self): app = self.make_app(argv=['--debug']) app.setup() app.run() self.eq(app.pargs.debug, True) def test_last_rendered(self): self.app.setup() output_text = self.app.render({'foo': 'bar'}) last_data, last_output = self.app.last_rendered self.eq({'foo': 'bar'}, last_data) self.eq(output_text, last_output) def test_get_last_rendered(self): # DEPRECATED - REMOVE AFTER THE FUNCTION IS REMOVED self.app.setup() output_text = self.app.render({'foo': 'bar'}) last_data, last_output = self.app.get_last_rendered() self.eq({'foo': 'bar'}, last_data) self.eq(output_text, last_output) def test_with_operator(self): with self.app_class() as app: app.run() @test.raises(SystemExit) def test_close_with_code(self): app = self.make_app(APP, exit_on_close=True) app.setup() app.run() try: app.close(114) except SystemExit as e: self.eq(e.code, 114) raise @test.raises(AssertionError) def test_close_with_bad_code(self): self.app.setup() self.app.run() try: self.app.close('Not An Int') except AssertionError as e: self.eq(e.args[0], "Invalid exit status code (must be integer)") raise def test_handler_override_options(self): app = self.make_app(APP, argv=['-o', 'json'], extensions=['yaml', 'json'], ) app.setup() app.run() self.eq(app._meta.output_handler, 'json') def test_handler_override_options_is_none(self): app = self.make_app(APP, core_handler_override_options=None, handler_override_options=None ) app.setup() app.run() def test_handler_override_invalid_interface(self): app = self.make_app(APP, handler_override_options=dict( bogus_interface=(['-f'], ['--foo'], {}), ) ) app.setup() app.run() def test_handler_override_options_not_passed(self): app = self.make_app(APP, extensions=['yaml', 'json'], ) app.setup() app.run() def test_suppress_output_while_debug(self): app = self.make_app(APP, debug=True) app.setup() app._suppress_output() def test_core_meta_override(self): defaults = init_defaults(APP) defaults[APP]['mail_handler'] = 'dummy' app = self.make_app(APP, debug=True, config_defaults=defaults) app.setup() app.run() def test_define_hooks_meta(self): app = self.make_app(APP, define_hooks=['my_custom_hook']) app.setup() self.ok(hook.defined('my_custom_hook')) @test.raises(HookTestException) def test_register_hooks_meta(self): def my_custom_hook_func(): raise HookTestException('OK') app = self.make_app(APP, define_hooks=['my_custom_hook'], hooks=[('my_custom_hook', my_custom_hook_func)]) app.setup() for res in hook.run('my_custom_hook'): pass def test_define_handlers_meta(self): app = self.make_app(APP, define_handlers=[MyTestInterface]) app.setup() self.ok(app.handler.defined('my_test_interface')) def test_register_handlers_meta(self): app = self.make_app(APP, define_handlers=[MyTestInterface], handlers=[MyTestHandler], ) app.setup() self.ok(app.handler.registered('my_test_interface', 'my_test_handler')) def test_disable_backend_globals(self): app = self.make_app(APP, use_backend_globals=False, define_handlers=[MyTestInterface], handlers=[MyTestHandler], define_hooks=['my_hook'], ) app.setup() self.ok(app.handler.registered('my_test_interface', 'my_test_handler')) self.ok(app.hook.defined('my_hook')) def test_reload(self): with self.app as app: app.hook.define('bogus_hook1') app.handler.define(MyTestInterface) app.run() self.ok(app.hook.defined('bogus_hook1')) self.ok(app.handler.defined('my_test_interface')) app.reload() self.eq(app.hook.defined('bogus_hook1'), False) self.eq(app.handler.defined('my_test_interface'), False) app.run() @test.raises(AssertionError) def test_run_forever(self): class Controller(CementBaseController): class Meta: label = 'base' @expose() def runit(self): raise Exception("Fake some error") app = self.make_app(base_controller=Controller, argv=['runit']) def handler(signum, frame): raise AssertionError('It ran forever!') # set the signal handler and a 5-second alarm signal.signal(signal.SIGALRM, handler) signal.alarm(5) try: # this will run indefinitely with app as app: app.run_forever() except AssertionError as e: self.eq(e.args[0], 'It ran forever!') raise finally: signal.alarm(0)
{ "content_hash": "123c0284605f516ca279d28abab0b4cc", "timestamp": "", "source": "github", "line_count": 495, "max_line_length": 78, "avg_line_length": 30.038383838383837, "alnum_prop": 0.5673548994552424, "repo_name": "fxstein/cement", "id": "e5b57fbb694564a4e04aecae374a56ddbc880efc", "size": "14869", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/core/foundation_tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "91" }, { "name": "Python", "bytes": "408666" }, { "name": "Shell", "bytes": "1358" } ], "symlink_target": "" }
from object import * __all__ = ['Result']
{ "content_hash": "5ef60d1997c5e65b6be79876c26d822d", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 20, "avg_line_length": 14.333333333333334, "alnum_prop": 0.5813953488372093, "repo_name": "rlinguri/pyfi", "id": "7b624988f916d2bc5ebcfa811c1734c5c5d718c7", "size": "66", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyfi/yhapi/result/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "41076" } ], "symlink_target": "" }
__requires__ = 'Babel==1.3' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('Babel==1.3', 'console_scripts', 'pybabel')() )
{ "content_hash": "282434e7178e020cd7262f6fe861d9f0", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 70, "avg_line_length": 25.125, "alnum_prop": 0.5920398009950248, "repo_name": "marcosxddh/aula_script", "id": "ebd4e781d94d4c75807b762f0fdcae035069b447", "size": "320", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "backend/venv/Scripts/pybabel-script.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "42358" }, { "name": "C++", "bytes": "3500" }, { "name": "CSS", "bytes": "128629" }, { "name": "JavaScript", "bytes": "4226" }, { "name": "PowerShell", "bytes": "8104" }, { "name": "Python", "bytes": "94796" }, { "name": "Shell", "bytes": "4168" } ], "symlink_target": "" }
"""Python serializer Serialize object to Python code. The following data types are supported: - str, - dict, - list, - set, - tuple, - boolean values, - None value; If you encounter a circular reference, an ValueError will be thrown. """ from typing import Any, Set from o2a.utils.el_utils import escape_string_with_python_escapes def serialize(serializable_obj: Any) -> str: """ Serialize to Python code """ def serialize_recursively(target: Any, markers: Set[int]) -> str: marker_id = id(target) if marker_id in markers: raise ValueError("Circular reference detected") markers.add(marker_id) if isinstance(target, str): buf = f"{escape_string_with_python_escapes(target)}" elif isinstance(target, dict): buf = "{" buf += ", ".join( f"{serialize_recursively(key, markers)}: {serialize_recursively(value, markers)}" for key, value in target.items() ) buf += "}" elif isinstance(target, list): buf = "[" buf += ", ".join(serialize_recursively(item, markers) for item in target) buf += "]" elif isinstance(target, set): if target: buf = "{" buf += ", ".join(serialize_recursively(item, markers) for item in target) buf += "}" else: buf = "set()" elif isinstance(target, tuple): buf = "(" buf += ", ".join(serialize_recursively(item, markers) for item in target) buf += ")" elif target is True: buf = "True" elif target is False: buf = "False" elif target is None: buf = "None" else: raise ValueError(f"Type '{type(target)}' is not serializable") markers.remove(marker_id) return buf return serialize_recursively(serializable_obj, set())
{ "content_hash": "87244626564dac2f5792d1340e922eae", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 97, "avg_line_length": 29.25, "alnum_prop": 0.5485168426344897, "repo_name": "GoogleCloudPlatform/oozie-to-airflow", "id": "997521b2e8fdbeba33188d295faa156760c8fa74", "size": "2583", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "o2a/utils/python_serializer.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "528273" }, { "name": "Shell", "bytes": "57460" }, { "name": "Smarty", "bytes": "31948" } ], "symlink_target": "" }
import math from argparse import Namespace from dataclasses import dataclass, field from omegaconf import II from typing import Optional import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from fairseq.data.data_utils import post_process from fairseq.tasks import FairseqTask from fairseq.logging.meters import safe_round @dataclass class CtcCriterionConfig(FairseqDataclass): zero_infinity: bool = field( default=False, metadata={"help": "zero inf loss when source length <= target length"}, ) sentence_avg: bool = II("optimization.sentence_avg") post_process: str = field( default="letter", metadata={ "help": "how to post process predictions into words. can be letter, " "wordpiece, BPE symbols, etc. " "See fairseq.data.data_utils.post_process() for full list of options" }, ) wer_kenlm_model: Optional[str] = field( default=None, metadata={ "help": "if this is provided, use kenlm to compute wer (along with other wer_* args)" }, ) wer_lexicon: Optional[str] = field( default=None, metadata={"help": "lexicon to use with wer_kenlm_model"}, ) wer_lm_weight: float = field( default=2.0, metadata={"help": "lm weight to use with wer_kenlm_model"}, ) wer_word_score: float = field( default=-1.0, metadata={"help": "lm word score to use with wer_kenlm_model"}, ) wer_args: Optional[str] = field( default=None, metadata={ "help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)" }, ) @register_criterion("ctc", dataclass=CtcCriterionConfig) class CtcCriterion(FairseqCriterion): def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask): super().__init__(task) self.blank_idx = ( task.target_dictionary.index(task.blank_symbol) if hasattr(task, "blank_symbol") else 0 ) self.pad_idx = task.target_dictionary.pad() self.eos_idx = task.target_dictionary.eos() self.post_process = cfg.post_process if cfg.wer_args is not None: ( cfg.wer_kenlm_model, cfg.wer_lexicon, cfg.wer_lm_weight, cfg.wer_word_score, ) = eval(cfg.wer_args) if cfg.wer_kenlm_model is not None and cfg.wer_kenlm_model != "": from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder dec_args = Namespace() dec_args.nbest = 1 dec_args.criterion = "ctc" dec_args.kenlm_model = cfg.wer_kenlm_model dec_args.lexicon = cfg.wer_lexicon dec_args.beam = 50 dec_args.beam_size_token = min(50, len(task.target_dictionary)) dec_args.beam_threshold = min(50, len(task.target_dictionary)) dec_args.lm_weight = cfg.wer_lm_weight dec_args.word_score = cfg.wer_word_score dec_args.unk_weight = -math.inf dec_args.sil_weight = 0 self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary) else: self.w2l_decoder = None self.zero_infinity = cfg.zero_infinity self.sentence_avg = cfg.sentence_avg def forward(self, model, sample, reduce=True): net_output = model(**sample["net_input"]) lprobs = model.get_normalized_probs( net_output, log_probs=True ).contiguous() # (T, B, C) from the encoder if "src_lengths" in sample["net_input"]: input_lengths = sample["net_input"]["src_lengths"] else: if net_output["padding_mask"] is not None: non_padding_mask = ~net_output["padding_mask"] input_lengths = non_padding_mask.long().sum(-1) else: input_lengths = lprobs.new_full( (lprobs.size(1),), lprobs.size(0), dtype=torch.long ) pad_mask = (sample["target"] != self.pad_idx) & ( sample["target"] != self.eos_idx ) targets_flat = sample["target"].masked_select(pad_mask) if "target_lengths" in sample: target_lengths = sample["target_lengths"] else: target_lengths = pad_mask.sum(-1) with torch.backends.cudnn.flags(enabled=False): loss = F.ctc_loss( lprobs, targets_flat, input_lengths, target_lengths, blank=self.blank_idx, reduction="sum", zero_infinity=self.zero_infinity, ) ntokens = ( sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item() ) sample_size = sample["target"].size(0) if self.sentence_avg else ntokens logging_output = { "loss": utils.item(loss.data), # * sample['ntokens'], "ntokens": ntokens, "nsentences": sample["id"].numel(), "sample_size": sample_size, } if not model.training: import editdistance with torch.no_grad(): lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu() c_err = 0 c_len = 0 w_errs = 0 w_len = 0 wv_errs = 0 for lp, t, inp_l in zip( lprobs_t, sample["target_label"] if "target_label" in sample else sample["target"], input_lengths, ): lp = lp[:inp_l].unsqueeze(0) decoded = None if self.w2l_decoder is not None: decoded = self.w2l_decoder.decode(lp) if len(decoded) < 1: decoded = None else: decoded = decoded[0] if len(decoded) < 1: decoded = None else: decoded = decoded[0] p = (t != self.task.target_dictionary.pad()) & ( t != self.task.target_dictionary.eos() ) targ = t[p] targ_units = self.task.target_dictionary.string(targ) targ_units_arr = targ.tolist() toks = lp.argmax(dim=-1).unique_consecutive() pred_units_arr = toks[toks != self.blank_idx].tolist() c_err += editdistance.eval(pred_units_arr, targ_units_arr) c_len += len(targ_units_arr) targ_words = post_process(targ_units, self.post_process).split() pred_units = self.task.target_dictionary.string(pred_units_arr) pred_words_raw = post_process(pred_units, self.post_process).split() if decoded is not None and "words" in decoded: pred_words = decoded["words"] w_errs += editdistance.eval(pred_words, targ_words) wv_errs += editdistance.eval(pred_words_raw, targ_words) else: dist = editdistance.eval(pred_words_raw, targ_words) w_errs += dist wv_errs += dist w_len += len(targ_words) logging_output["wv_errors"] = wv_errs logging_output["w_errors"] = w_errs logging_output["w_total"] = w_len logging_output["c_errors"] = c_err logging_output["c_total"] = c_len return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) nsentences = utils.item( sum(log.get("nsentences", 0) for log in logging_outputs) ) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar("ntokens", ntokens) metrics.log_scalar("nsentences", nsentences) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) c_errors = sum(log.get("c_errors", 0) for log in logging_outputs) metrics.log_scalar("_c_errors", c_errors) c_total = sum(log.get("c_total", 0) for log in logging_outputs) metrics.log_scalar("_c_total", c_total) w_errors = sum(log.get("w_errors", 0) for log in logging_outputs) metrics.log_scalar("_w_errors", w_errors) wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs) metrics.log_scalar("_wv_errors", wv_errors) w_total = sum(log.get("w_total", 0) for log in logging_outputs) metrics.log_scalar("_w_total", w_total) if c_total > 0: metrics.log_derived( "uer", lambda meters: safe_round( meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3 ) if meters["_c_total"].sum > 0 else float("nan"), ) if w_total > 0: metrics.log_derived( "wer", lambda meters: safe_round( meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3 ) if meters["_w_total"].sum > 0 else float("nan"), ) metrics.log_derived( "raw_wer", lambda meters: safe_round( meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3 ) if meters["_w_total"].sum > 0 else float("nan"), ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
{ "content_hash": "ab7175139db6db460ffc2bfd61bd9b55", "timestamp": "", "source": "github", "line_count": 289, "max_line_length": 104, "avg_line_length": 37.55709342560554, "alnum_prop": 0.5231251151649161, "repo_name": "pytorch/fairseq", "id": "e966e47cf2680c6d47a75aa6bdbe5bef05113aa8", "size": "11095", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "fairseq/criterions/ctc.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "21106" }, { "name": "Cuda", "bytes": "38166" }, { "name": "Cython", "bytes": "13294" }, { "name": "Lua", "bytes": "4210" }, { "name": "Python", "bytes": "3699357" }, { "name": "Shell", "bytes": "2182" } ], "symlink_target": "" }
import sys sys.path.append("../../dependencies/Python") import mscl #TODO: change these constants to match your setup COM_PORT = "COM4" try: #create a Serial Connection with the specified COM Port, default baud rate of 921600 connection = mscl.Connection.Serial(COM_PORT) #create an InertialNode with the connection node = mscl.InertialNode(connection) node.setToIdle() #Note: you can also disable the datastream for each class/category # seperately if desired, by using the enableDataStream command shown in # the startSampling example, but passing a second parameter of 'false' except mscl.Error, e: print "Error:", e
{ "content_hash": "50b232d8052063922a96527ca34da985", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 88, "avg_line_length": 30.545454545454547, "alnum_prop": 0.7232142857142857, "repo_name": "LORD-MicroStrain/MSCL", "id": "f6916ed0db35814172a64df5a5cb892016531086", "size": "697", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "MSCL_Examples/Inertial/Python/setToIdle.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "15074" }, { "name": "C", "bytes": "19995" }, { "name": "C#", "bytes": "2883968" }, { "name": "C++", "bytes": "6340918" }, { "name": "CSS", "bytes": "45608" }, { "name": "MATLAB", "bytes": "4449" }, { "name": "Python", "bytes": "45086" }, { "name": "SWIG", "bytes": "136945" } ], "symlink_target": "" }
""" WSGI config for postero project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application from .settings import ENV os.environ.setdefault("DJANGO_SETTINGS_MODULE", "postero.settings") application = get_wsgi_application() if ENV == 'Production': from whitenoise.django import DjangoWhiteNoise application = DjangoWhiteNoise(application)
{ "content_hash": "9c93c7ace67b80b2dd1f64ea848b768a", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 78, "avg_line_length": 25.761904761904763, "alnum_prop": 0.7707948243992606, "repo_name": "PosteroCompany/postero.com.br", "id": "a5f3de204ed64e958f336be0c6e115738e1ef8a6", "size": "541", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "postero/postero/wsgi.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "31652" }, { "name": "HTML", "bytes": "15069" }, { "name": "JavaScript", "bytes": "2217" }, { "name": "Python", "bytes": "6285" } ], "symlink_target": "" }
from userpass import Userpass, UserpassError from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_raises from nose.tools import raises class Test_Userpass(object): def test_userpass_001(self): """Check userpass vs. 2 user adds, default/first user""" userpass = Userpass() userpass.add_user_passwd("user1", "passwd1") userpass.add_user_passwd("user2", "passwd2") assert_equal( userpass.user, "user1" ) assert_equal( userpass.passwd, "passwd1" ) assert_equal( userpass.passwd_for(), "passwd1" ) assert_equal( userpass.passwd_for("user2"), "passwd2") def test_userpass_002(self): """Check userpass vs. 2 user adds, switch users""" userpass = Userpass() userpass.add_user_passwd("user1", "passwd1") userpass.add_user_passwd("user2", "passwd2") userpass.user = "user2" assert_equal( userpass.user, "user2" ) assert_equal( userpass.passwd, "passwd2" ) def test_userpass_003(self): """Check userpass vs. 2 user adds, keys and has_key""" userpass = Userpass() userpass.add_user_passwd("user1", "passwd1") userpass.add_user_passwd("user2", "passwd2") assert_equal( userpass.has_key("user1"), True ) assert_equal( userpass.has_key("user2"), True ) assert_equal( userpass.has_key("user3"), False ) assert_equal( sorted(userpass.keys()), ["user1", "user2"] ) assert_equal( sorted(userpass.keys()), sorted(userpass.users()) ) def test_userpass_004(self): """Check userpass file load works and sets file permissions to 600""" #write yaml file filepath = "/var/tmp/test.userpass.004.yml" f = open( filepath, "w") f.write(""" users: user1: passwd1 user2: passwd2 defaultuser: user1 """) f.close() #set group readable import os,stat os.chmod( filepath, stat.S_IRUSR|stat.S_IWUSR|stat.S_IRGRP) beforestat = os.stat(filepath) assert_equal( bool(beforestat.st_mode & stat.S_IRGRP), True ) #read in yaml pwdb userpass = Userpass( filepath ) #verify file is not group readable after load afterstat = os.stat(filepath) assert_equal( bool(afterstat.st_mode & stat.S_IRGRP), False ) #perform checks per testcases above assert_equal( userpass.user, "user1" ) assert_equal( userpass.passwd, "passwd1" ) userpass.user = "user2" assert_equal( userpass.user, "user2" ) assert_equal( userpass.passwd, "passwd2" ) assert_equal( userpass.has_key("user1"), True ) assert_equal( userpass.has_key("user2"), True ) assert_equal( userpass.has_key("user3"), False ) @raises(UserpassError) def test_userpass_005(self): """Switch to invalid user""" userpass = Userpass() userpass.add_user_passwd("user1", "passwd1") userpass.add_user_passwd("user2", "passwd2") userpass.user = "user3" assert_equal( userpass.passwd, "passwd2" )
{ "content_hash": "3c0a6f0f0cf64995985466bd2f955fc9", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 77, "avg_line_length": 40.48717948717949, "alnum_prop": 0.6155794806839772, "repo_name": "francisluong/py-auth-userpass", "id": "82d14e389b058bb371a6054695fbe9c1473bb28b", "size": "3167", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/test_userpass.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "6792" } ], "symlink_target": "" }
import rdflib from rdflib import RDF, RDFS, OWL, Literal class Entity: """ base class for all owllib entities, e.g. classes, individuals, object properties """ def __init__(self, uri=None, ontology=None, labels=None, comments=None, definitions=None): if not uri: uri = rdflib.BNode self.uri = uri self.ontology = ontology self.annotations = set() if labels: self.labels = labels else: self.labels = set() if comments: self.comments = comments else: self.comments = set() if definitions: self.definitions = definitions else: self.definitions = set() self.triples = set() self.parents = set() self.children = set() def sync_from_ontology(self): """ makes the entity match the representation in the ontology :return: """ if not self.ontology: raise ValueError("No associated ontology.") self.annotations = self.ontology.get_annotations(self) self.labels = self.ontology.get_labels(self) self.comments = self.ontology.get_comments(self) self.definitions = self.ontology.get_definitions(self) self.triples = self.ontology.get_triples(self) self.parents = self._get_parents() self.children = self._get_children() def sync_to_ontology(self): """ makes the ontology representation match the entity :return: """ self.ontology.sync_entity_to_graph(self) def _get_parents(self): raise NotImplementedError def _get_children(self): raise NotImplementedError def is_named(self): """ returns true of the uri of the entity is not a bnode :return: """ return isinstance(self.uri, rdflib.URIRef) class Class(Entity): """ Represents an OWL2 Class """ def __init__(self, uri=None, ontology=None, labels=None, comments=None): super(Class, self).__init__(uri, ontology, labels, comments) def _get_parents(self): """ raises an error if there is no ontology associated; otherwise, returns all classes that this is a 'rdfs:subClassOf' :return: """ if not self.ontology: raise ValueError("No associated ontology.") return self.ontology.get_super_classes(self) def _get_children(self): """ raises an error if there is no ontology associated; otherwise, returns all classes that are a 'rdfs:subClassOf' this class :return: """ if not self.ontology: raise ValueError("No associated ontology.") return self.ontology.get_sub_classes(self) class Individual(Entity): """ represents an OWL2 individual """ def __init__(self, uri=None, ontology=None, labels=None, comments=None): super(Individual, self).__init__(uri, ontology, labels, comments) def _get_parents(self): """ raises an error if there is no ontology associated; otherwise, returns all classes that this is a 'rdf:type' of :return: """ if not self.ontology: raise ValueError("No associated ontology.") return self.ontology.get_individual_type(self) def _get_children(self): """ individuals cannot have children, so this returns an empty set :return: """ return set() class Property(Entity): """ base class for the three property types in OWL """ def __init__(self, uri=None, ontology=None, labels=None, comments=None): super(Property, self).__init__(uri, ontology, labels, comments) def _get_parents(self): """ raises an error if there is no ontology associated; otherwise, returns all properties that this is a 'rdfs:subPropertyOf' :return: """ if not self.ontology: raise ValueError("No associated ontology.") return self.ontology.get_super_properties(self) def _get_children(self): """ raises an error if there is no ontology associated; otherwise, returns all properties that are a 'rdfs:subPropertyOf' this property :return: """ if not self.ontology: raise ValueError("No associated ontology.") return self.ontology.get_sub_properties(self) class ObjectProperty(Property): """ represents a OWL ObjectProperty """ def __init__(self, uri=None, ontology=None, labels=None, comments=None): super(ObjectProperty, self).__init__(uri, ontology, labels, comments) class DataProperty(Property): """ represents a OWL DataProperty """ def __init__(self, uri=None, ontology=None, labels=None, comments=None): super(DataProperty, self).__init__(uri, ontology, labels, comments) class AnnotationProperty(Property): """ represents a OWL AnnotationProperty """ def __init__(self, uri=None, ontology=None, labels=None, comments=None): super(AnnotationProperty, self).__init__(uri, ontology, labels, comments)
{ "content_hash": "1f6dd1d5c9937a60277a9233bc98a177", "timestamp": "", "source": "github", "line_count": 180, "max_line_length": 119, "avg_line_length": 29.011111111111113, "alnum_prop": 0.607621600919188, "repo_name": "joshhanna/owllib", "id": "6834102081af7a510e92dc58f309a00d80584679", "size": "5222", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "owllib/entities.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "26908" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function, unicode_literals import mock from mopidy import audio, models from pandora import APITransport from pandora.models.pandora import PlaylistItem import pytest from mopidy_pandora import playback from mopidy_pandora.backend import MopidyAPIClient from mopidy_pandora.library import PandoraLibraryProvider, TrackCacheItem from mopidy_pandora.playback import PandoraPlaybackProvider from mopidy_pandora.uri import PandoraUri from . import conftest @pytest.fixture def audio_mock(): audio_mock = mock.Mock(spec=audio.Audio) return audio_mock @pytest.fixture def provider(audio_mock, config): return playback.PandoraPlaybackProvider( audio=audio_mock, backend=conftest.get_backend(config) ) @pytest.fixture(scope="session") def client_mock(): client_mock = mock.Mock(spec=MopidyAPIClient) return client_mock def test_change_track_enforces_skip_limit_if_no_track_available( provider, playlist_item_mock, caplog ): with mock.patch.object( PandoraLibraryProvider, "lookup_pandora_track", return_value=None ): track = PandoraUri.factory(playlist_item_mock) provider._trigger_track_unplayable = mock.PropertyMock() provider._trigger_skip_limit_exceeded = mock.PropertyMock(0) for i in range(PandoraPlaybackProvider.SKIP_LIMIT + 1): assert provider.change_track(track) is False if i < PandoraPlaybackProvider.SKIP_LIMIT - 1: assert provider._trigger_track_unplayable.called provider._trigger_track_unplayable.reset_mock() assert not provider._trigger_skip_limit_exceeded.called else: assert not provider._trigger_track_unplayable.called assert provider._trigger_skip_limit_exceeded.called assert ( "Maximum track skip limit ({:d}) exceeded.".format( PandoraPlaybackProvider.SKIP_LIMIT ) in caplog.text ) def test_change_track_enforces_skip_limit_if_no_audio_url( provider, playlist_item_mock, caplog ): with mock.patch.object( PandoraLibraryProvider, "lookup_pandora_track", return_value=playlist_item_mock ): track = PandoraUri.factory(playlist_item_mock) provider._trigger_track_unplayable = mock.PropertyMock() provider._trigger_skip_limit_exceeded = mock.PropertyMock(0) playlist_item_mock.audio_url = None for i in range(PandoraPlaybackProvider.SKIP_LIMIT + 1): assert provider.change_track(track) is False if i < PandoraPlaybackProvider.SKIP_LIMIT - 1: assert provider._trigger_track_unplayable.called provider._trigger_track_unplayable.reset_mock() assert not provider._trigger_skip_limit_exceeded.called else: assert not provider._trigger_track_unplayable.called assert provider._trigger_skip_limit_exceeded.called assert ( "Maximum track skip limit ({:d}) exceeded.".format( PandoraPlaybackProvider.SKIP_LIMIT ) in caplog.text ) def test_change_track_enforces_skip_limit_on_request_exceptions( provider, playlist_item_mock, caplog ): with mock.patch.object( PandoraLibraryProvider, "lookup_pandora_track", return_value=playlist_item_mock ): with mock.patch.object( APITransport, "__call__", side_effect=conftest.request_exception_mock ): track = PandoraUri.factory(playlist_item_mock) provider._trigger_track_unplayable = mock.PropertyMock() provider._trigger_skip_limit_exceeded = mock.PropertyMock(0) playlist_item_mock.audio_url = "pandora:track:mock_id:mock_token" for i in range(PandoraPlaybackProvider.SKIP_LIMIT + 1): assert provider.change_track(track) is False if i < PandoraPlaybackProvider.SKIP_LIMIT - 1: assert provider._trigger_track_unplayable.called provider._trigger_track_unplayable.reset_mock() assert not provider._trigger_skip_limit_exceeded.called else: assert not provider._trigger_track_unplayable.called assert provider._trigger_skip_limit_exceeded.called assert ( "Maximum track skip limit ({:d}) exceeded.".format( PandoraPlaybackProvider.SKIP_LIMIT ) in caplog.text ) def test_change_track_fetches_next_track_if_unplayable( provider, playlist_item_mock, caplog ): with mock.patch.object( PandoraLibraryProvider, "lookup_pandora_track", return_value=None ): track = PandoraUri.factory(playlist_item_mock) provider._trigger_track_unplayable = mock.PropertyMock() assert provider.change_track(track) is False assert provider._trigger_track_unplayable.called assert "Error changing Pandora track" in caplog.text def test_change_track_fetches_next_track_if_station_uri( provider, get_station_mock_return_value, caplog ): station = PandoraUri.factory(get_station_mock_return_value) provider.backend._trigger_next_track_available = mock.PropertyMock() assert provider.change_track(station) is False assert ( "Cannot play Pandora stations directly. Retrieving tracks for station with ID: {}...".format( station.station_id ) in caplog.text ) assert provider.backend._trigger_next_track_available.called def test_change_track_skips_if_no_track_uri(provider): track = models.Track(uri=None) provider.change_pandora_track = mock.PropertyMock() assert provider.change_track(track) is False assert not provider.change_pandora_track.called def test_change_track_skips_if_track_not_available_in_buffer( provider, playlist_item_mock, caplog ): track = PandoraUri.factory(playlist_item_mock) provider.backend.prepare_next_track = mock.PropertyMock() assert provider.change_track(track) is False assert ( "Error changing Pandora track: failed to lookup '{}'.".format(track.uri) in caplog.text ) def test_change_track_resets_skips_on_success(provider, playlist_item_mock): with mock.patch.object( PandoraLibraryProvider, "lookup_pandora_track", return_value=playlist_item_mock ): with mock.patch.object(PlaylistItem, "get_is_playable", return_value=True): track = PandoraUri.factory(playlist_item_mock) provider._consecutive_track_skips = 1 assert provider.change_track(track) is True assert provider._consecutive_track_skips == 0 def test_change_track_triggers_event_on_success(provider, playlist_item_mock): with mock.patch.object( PandoraLibraryProvider, "lookup_pandora_track", return_value=playlist_item_mock ): with mock.patch.object(PlaylistItem, "get_is_playable", return_value=True): track = PandoraUri.factory(playlist_item_mock) provider._trigger_track_changing = mock.PropertyMock() assert provider.change_track(track) is True assert provider._trigger_track_changing.called def test_translate_uri_returns_audio_url(provider, playlist_item_mock): test_uri = "pandora:track:test_station_id:test_token" provider.backend.library.pandora_track_cache[test_uri] = TrackCacheItem( mock.Mock(spec=models.Ref.track), playlist_item_mock ) assert provider.translate_uri(test_uri) == conftest.MOCK_TRACK_AUDIO_HIGH def test_resume_click_ignored_if_start_of_track(provider): with mock.patch.object( PandoraPlaybackProvider, "get_time_position", return_value=0 ): process_click_mock = mock.PropertyMock() provider.process_click = process_click_mock provider.resume() provider.process_click.assert_not_called() def add_artist_bookmark(provider): provider.add_artist_bookmark(conftest.MOCK_TRACK_TOKEN) provider.client.add_artist_bookmark.assert_called_once_with( conftest.MOCK_TRACK_TOKEN ) def add_song_bookmark(provider): provider.add_song_bookmark(conftest.MOCK_TRACK_TOKEN) provider.client.add_song_bookmark.assert_called_once_with(conftest.MOCK_TRACK_TOKEN)
{ "content_hash": "3a31a6e5eadf14ff5d61da430341a82f", "timestamp": "", "source": "github", "line_count": 248, "max_line_length": 101, "avg_line_length": 34.193548387096776, "alnum_prop": 0.6757075471698113, "repo_name": "jcass77/mopidy-pandora", "id": "fc1886a8a6cf68d03927f6731bca9d8ea0c05502", "size": "8480", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/test_playback.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "185568" } ], "symlink_target": "" }
from gettext import gettext as _ import scene, game_objects, kbInput, pygame, sys isLinux = sys.platform.startswith("linux") if(isLinux): try: from gi.repository import Gtk import sugar3.activity.activity from sugar3.graphics.toolbarbox import ToolbarBox from sugar3.graphics.toolbarbox import ToolbarButton from sugar3.activity.widgets import ActivityToolbarButton from sugar3.graphics.toolbutton import ToolButton from sugar3.activity.widgets import StopButton except ImportError: isLinux = False class CodingScene(scene.Scene): def __init__(self, mainChar): self.char = mainChar self.font = pygame.font.SysFont("couriernew", 24) self.selIndex = 0 self.selBlock = None self.totalArrowCount = 0 self.totalBlockCount = 0 self.mode = 0 self.blockMenu = False self.menuIndex = 0 self.inputIndex = 0 self.toolbars = [None, None, None, None] self.activity = None def enter(self): self.mode = 0 self.blockMenu = False self.selIndex = 0 self.menuIndex = 0 self.inputIndex = 0 def render(self, surface): width, height = surface.get_size() surface.fill((0,0,0)) top = 16 arrIndex = self.selIndex for block in self.char.list_of_bots[0].queue_of_code_blocks: top += block.render(surface, 16, top, arrIndex, self.mode) arrIndex -= block.getArrowCount() if(self.mode == 0 and arrIndex == 0): pygame.draw.polygon(surface, (255, 255, 255), [(16, top + 1), (16 - 10, top + 6), (16 - 10, top), (16 + 512 + 26, top), (16 + 512 + 26, top + 6), (16 + 512 + 16, top + 1)]) if(self.blockMenu): if(self.activity != None and self.mode == 1): pygame.draw.rect(surface, (0, 230, 180), (width - 522, 10, 512, 128)) top = 25 if(isinstance(self.selBlock, game_objects.CommentBlock)): menuItem = self.font.render("Comment:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 477, top)) top += 30 menuItem = self.font.render(self.selBlock.comment, 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 506, top)) inWidth, _ = self.font.size(self.selBlock.comment[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 506 + inWidth, top), (width - 506 + inWidth, top + 30), 2) elif(isinstance(self.selBlock, game_objects.SayBlock)): menuItem = self.font.render("Message:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 477, top)) top += 30 menuItem = self.font.render(self.selBlock.message, 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 506, top)) inWidth, _ = self.font.size(self.selBlock.message[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 506 + inWidth, top), (width - 506 + inWidth, top + 30), 2) elif(isinstance(self.selBlock, game_objects.IfManaBlock)): menuItem = self.font.render("Req Mana:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 477, top)) top += 30 menuItem = self.font.render(str(self.selBlock.mthresh), 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 506, top)) inWidth, _ = self.font.size(str(self.selBlock.mthresh)[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 506 + inWidth, top), (width - 506 + inWidth, top + 30), 2) elif(isinstance(self.selBlock, game_objects.IfOwnHealthBlock)): menuItem = self.font.render("Req Health:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 477, top)) top += 30 menuItem = self.font.render(str(self.selBlock.hthresh), 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 506, top)) inWidth, _ = self.font.size(str(self.selBlock.hthresh)[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 506 + inWidth, top), (width - 506 + inWidth, top + 30), 2) else: pygame.draw.rect(surface, (0, 230, 180), (width - 266, 10, 256, 512)) menuItem = self.font.render("ADD BLOCK", 0, (0, 0, 0), (0, 230, 180)) top = 25 surface.blit(menuItem, (width - 200, top)) if(self.menuIndex == 0): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) menuItem = self.font.render("MODIFY BLOCK", 0, (0, 0, 0), (0, 230, 180)) top += 30 surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 1): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) menuItem = self.font.render("REMOVE BLOCK", 0, (0, 0, 0), (0, 230, 180)) top += 30 surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 2): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 pygame.draw.line(surface, (0, 0, 0), (width - 240, top), (width - 26, top), 3) top += 5 if(self.mode == 0): menuItem = self.font.render("ADD BLOCK", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 200, top)) top += 30 menuItem = self.font.render("Comment", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 3): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 menuItem = self.font.render("Say", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 4): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 menuItem = self.font.render("While", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 5): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 menuItem = self.font.render("If (Mana)", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 6): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 menuItem = self.font.render("If (Health)", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 7): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 40 menuItem = self.font.render("Heal", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 8): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 menuItem = self.font.render("Fireball", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 9): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 menuItem = self.font.render("Moss Leech", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 10): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 30 menuItem = self.font.render("Douse", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 11): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) top += 40 menuItem = self.font.render("End Turn", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 12): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) elif(self.mode == 1): menuItem = self.font.render("MODIFY BLOCK", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) top += 45 if(isinstance(self.selBlock, game_objects.CommentBlock)): menuItem = self.font.render("Comment:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) top += 30 menuItem = self.font.render(self.selBlock.comment, 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 250, top)) if(self.menuIndex == 3): inWidth, _ = self.font.size(self.selBlock.comment[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 250 + inWidth, top), (width - 250 + inWidth, top + 30), 2) elif(isinstance(self.selBlock, game_objects.SayBlock)): menuItem = self.font.render("Message:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) top += 30 menuItem = self.font.render(self.selBlock.message, 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 250, top)) if(self.menuIndex == 3): inWidth, _ = self.font.size(self.selBlock.message[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 250 + inWidth, top), (width - 250 + inWidth, top + 30), 2) elif(isinstance(self.selBlock, game_objects.IfManaBlock)): menuItem = self.font.render("Req Mana:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) top += 30 menuItem = self.font.render(str(self.selBlock.mthresh), 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 250, top)) if(self.menuIndex == 3): inWidth, _ = self.font.size(str(self.selBlock.mthresh)[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 250 + inWidth, top), (width - 250 + inWidth, top + 30), 2) elif(isinstance(self.selBlock, game_objects.IfOwnHealthBlock)): menuItem = self.font.render("Req Health:", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) top += 30 menuItem = self.font.render(str(self.selBlock.hthresh), 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 250, top)) if(self.menuIndex == 3): inWidth, _ = self.font.size(str(self.selBlock.hthresh)[:self.inputIndex]) pygame.draw.line(surface, (0, 0, 0), (width - 250 + inWidth, top), (width - 250 + inWidth, top + 30), 2) elif(self.mode == 2): menuItem = self.font.render("REMOVE BLOCK", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) top += 30 if(self.menuIndex == 3): pygame.draw.polygon(surface, (0, 0, 0), [(width - 148, top), (width - 128, top), (width - 138, top + 12)], 4) else: pygame.draw.polygon(surface, (128, 128, 128), [(width - 148, top), (width - 128, top), (width - 138, top + 12)]) top += 20 if(self.menuIndex == 4): pygame.draw.polygon(surface, (0, 0, 0), [(width - 148, top), (width - 128, top), (width - 138, top + 12)], 4) else: pygame.draw.polygon(surface, (128, 128, 128), [(width - 148, top), (width - 128, top), (width - 138, top + 12)]) top += 20 if(self.menuIndex == 5): pygame.draw.polygon(surface, (0, 0, 0), [(width - 148, top), (width - 128, top), (width - 138, top + 12)], 4) else: pygame.draw.polygon(surface, (128, 128, 128), [(width - 148, top), (width - 128, top), (width - 138, top + 12)]) top += 20 menuItem = self.font.render("Yes, Remove!", 0, (0, 0, 0), (0, 230, 180)) surface.blit(menuItem, (width - 221, top)) if(self.menuIndex == 6): pygame.draw.polygon(surface, (0, 0, 0), [(width - 241, top), (width - 241, top + 24), (width - 235, top + 12)], 4) def doKeys(self, keys, keysLastFrame): if(self.blockMenu): # Menu is open if(self.activity != None): # Only do if we're on a Sugar-based System if kbInput.isOkayPressed(keys) and not kbInput.isOkayPressed(keysLastFrame): self.blockMenu = False self.inputIndex = 0 elif(isinstance(self.selBlock, game_objects.CommentBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(self.selBlock.comment)) newCmmnt, self.inputIndex = kbInput.kbTextInput(keys, keysLastFrame, self.selBlock.comment, self.inputIndex) self.selBlock.setComment(newCmmnt) elif(isinstance(self.selBlock, game_objects.SayBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(self.selBlock.message)) newMsg, self.inputIndex = kbInput.kbTextInput(keys, keysLastFrame, self.selBlock.message, self.inputIndex) self.selBlock.setMessage(newMsg) elif(isinstance(self.selBlock, game_objects.IfManaBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(str(self.selBlock.mthresh))) newThresh, self.inputIndex = kbInput.kbNumInput(keys, keysLastFrame, self.selBlock.mthresh, self.inputIndex) self.selBlock.setThresh(newThresh) elif(isinstance(self.selBlock, game_objects.IfOwnHealthBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(str(self.selBlock.hthresh))) newThresh, self.inputIndex = kbInput.kbNumInput(keys, keysLastFrame, self.selBlock.hthresh, self.inputIndex) self.selBlock.setThresh(newThresh) self.inputIndex = max(self.inputIndex, 0) else: # Do on a non-Sugar System if kbInput.isMenuPressed(keys) and not kbInput.isMenuPressed(keysLastFrame): self.blockMenu = False self.menuIndex = 0 if kbInput.isDownPressed(keys, False) and not kbInput.isDownPressed(keysLastFrame, False): self.inputIndex = 0 if(self.mode == 0): self.menuIndex = min(self.menuIndex + 1, 12) elif(self.mode == 1): self.menuIndex = min(self.menuIndex + 1, 3) elif(self.mode == 2): self.menuIndex = min(self.menuIndex + 1, 6) if kbInput.isUpPressed(keys, False) and not kbInput.isUpPressed(keysLastFrame, False): self.inputIndex = 0 self.menuIndex = max(self.menuIndex - 1, 0) if kbInput.isOkayPressed(keys) and not kbInput.isOkayPressed(keysLastFrame): if(self.menuIndex < 3): self.mode = self.menuIndex self.blockMenu = False self.menuIndex = 0 self.selIndex = 0 self.currentBlockIndex = 0 else: if(self.mode == 0): newBlock = None if(self.menuIndex == 3): newBlock = game_objects.CommentBlock() elif(self.menuIndex == 4): newBlock = game_objects.SayBlock() elif(self.menuIndex == 5): newBlock = game_objects.WhileBlock() elif(self.menuIndex == 6): newBlock = game_objects.IfManaBlock() elif(self.menuIndex == 7): newBlock = game_objects.IfOwnHealthBlock() elif(self.menuIndex == 8): newBlock = game_objects.HealBlock(20, 15) elif(self.menuIndex == 9): newBlock = game_objects.FireballBlock(10, 15) elif(self.menuIndex == 10): newBlock = game_objects.MossLeechBlock(10, 15) elif(self.menuIndex == 11): newBlock = game_objects.DouseBlock(10, 15) elif(self.menuIndex == 12): newBlock = game_objects.EndTurnBlock() if(self.insert(newBlock)): self.selIndex += 1 elif(self.mode == 2): if(self.menuIndex == 6): self.remove() if(self.mode == 1 and self.menuIndex == 3): if(isinstance(self.selBlock, game_objects.CommentBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(self.selBlock.comment)) newCmmnt, self.inputIndex = kbInput.kbTextInput(keys, keysLastFrame, self.selBlock.comment, self.inputIndex) self.selBlock.setComment(newCmmnt) elif(isinstance(self.selBlock, game_objects.SayBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(self.selBlock.message)) newMsg, self.inputIndex = kbInput.kbTextInput(keys, keysLastFrame, self.selBlock.message, self.inputIndex) self.selBlock.setMessage(newMsg) elif(isinstance(self.selBlock, game_objects.IfManaBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(str(self.selBlock.mthresh))) newThresh, self.inputIndex = kbInput.kbNumInput(keys, keysLastFrame, self.selBlock.mthresh, self.inputIndex) self.selBlock.setThresh(newThresh) elif(isinstance(self.selBlock, game_objects.IfOwnHealthBlock)): if kbInput.isLeftPressed(keys, False) and not kbInput.isLeftPressed(keysLastFrame, False): self.inputIndex = max(self.inputIndex - 1, 0) elif kbInput.isRightPressed(keys, False) and not kbInput.isRightPressed(keysLastFrame, False): self.inputIndex = min(self.inputIndex + 1, len(str(self.selBlock.hthresh))) newThresh, self.inputIndex = kbInput.kbNumInput(keys, keysLastFrame, self.selBlock.hthresh, self.inputIndex) self.selBlock.setThresh(newThresh) self.inputIndex = max(self.inputIndex, 0) else: # Menu is closed if kbInput.isBackPressed(keys) and not kbInput.isBackPressed(keysLastFrame): self.manager.go_to(scene.Scenes.INTERACTIVE) self.selIndex = 0 if kbInput.isDownPressed(keys) and not kbInput.isDownPressed(keysLastFrame): if(self.mode == 0): self.selIndex = min(self.selIndex + 1, self.totalArrowCount) else: self.selIndex = min(self.selIndex + 1, self.totalBlockCount - 1) if kbInput.isUpPressed(keys) and not kbInput.isUpPressed(keysLastFrame): self.selIndex = max(self.selIndex - 1, 0) if self.activity == None and kbInput.isMenuPressed(keys) and not kbInput.isMenuPressed(keysLastFrame): self.blockMenu = True keysLastFrame = keys def insert(self, block): if(self.selIndex == 0): # Insert block at beginning of list self.char.list_of_bots[0].queue_of_code_blocks.insert(0, block) return True elif(self.selIndex == self.totalArrowCount): # Append block to end of list self.char.list_of_bots[0].queue_of_code_blocks.append(block) return True else: # Insert somewhere in list currArrowIndex = self.selIndex for i in range(0, len(self.char.list_of_bots[0].queue_of_code_blocks)): if(currArrowIndex == 0): self.char.list_of_bots[0].queue_of_code_blocks.insert(i, block) return True elif(self.char.list_of_bots[0].queue_of_code_blocks[i].insert(block, currArrowIndex)): return True else: currArrowIndex -= self.char.list_of_bots[0].queue_of_code_blocks[i].getArrowCount() print("Failed to insert a new block at insertion point " + str(self.selIndex)) return False def remove(self): currSel = self.selIndex for i in range(0, len(self.char.list_of_bots[0].queue_of_code_blocks)): if(currSel == 0): del self.char.list_of_bots[0].queue_of_code_blocks[i] return True elif(self.char.list_of_bots[0].queue_of_code_blocks[i].remove(currSel)): return True else: currSel -= self.char.list_of_bots[0].queue_of_code_blocks[i].getBlockCount() def fetch(self): currSel = self.selIndex for i in range(0, len(self.char.list_of_bots[0].queue_of_code_blocks)): if(currSel == 0): return self.char.list_of_bots[0].queue_of_code_blocks[i] rtn = self.char.list_of_bots[0].queue_of_code_blocks[i].fetch(currSel) if(rtn != None): return rtn currSel -= self.char.list_of_bots[0].queue_of_code_blocks[i].getBlockCount() return None def update(self, keys, keysLastFrame): self.totalArrowCount = 0 self.totalBlockCount = 0 for block in self.char.list_of_bots[0].queue_of_code_blocks: self.totalArrowCount += block.getArrowCount() self.totalBlockCount += block.getBlockCount() if(self.mode == 1): self.selBlock = self.fetch() self.doKeys(keys, keysLastFrame) def handle_events(self, events): pass def makeToolbar(self, activity): self.activity = activity self.toolbars[0] = self.makeAddToolbar(activity) self.toolbars[1] = self.makeModToolbar(activity) self.toolbars[2] = self.makeDelToolbar(activity) return self.toolbars[0] def enterMode(self, modenum): self.mode = modenum self.selIndex = 0 self.currentBlockIndex = 0 self.activity.set_toolbar_box(self.toolbars[modenum]) self.toolbars[modenum].show() def enterModeAdd(self, button=None): self.enterMode(0) def enterModeModify(self, button=None): self.enterMode(1) def enterModeDelete(self, button=None): self.enterMode(2) def makeGenericToolbar(self, activity): toolbar = ToolbarBox() activity_button = ActivityToolbarButton(activity) toolbar.toolbar.insert(activity_button, -1) activity_button.show() addButton = ToolButton('mode-add') addButton.set_tooltip(_('Add Block Mode')) addButton.props.accelerator = '<Ctrl>1' toolbar.toolbar.insert(addButton, -1) addButton.connect('clicked', self.enterModeAdd) addButton.show() modButton = ToolButton('mode-mod') modButton.set_tooltip(_('Modify Block Mode')) modButton.props.accelerator = '<Ctrl>2' toolbar.toolbar.insert(modButton, -1) modButton.connect('clicked', self.enterModeModify) modButton.show() delButton = ToolButton('mode-del') delButton.set_tooltip(_('Delete Block Mode')) delButton.props.accelerator = '<Ctrl>3' toolbar.toolbar.insert(delButton, -1) delButton.connect('clicked', self.enterModeDelete) delButton.show() separator = Gtk.SeparatorToolItem() separator.props.draw = False separator.set_expand(True) toolbar.toolbar.insert(separator, -1) separator.show() stop_button = StopButton(activity) toolbar.toolbar.insert(stop_button, -1) stop_button.show() return toolbar def makeAddToolbar(self, activity): toolbar = self.makeGenericToolbar(activity) separator = Gtk.SeparatorToolItem() toolbar.toolbar.insert(separator, 4) separator.show() btn = ToolButton('block-comment') btn.set_tooltip(_('Add Comment Block')) btn.connect('clicked', self.insertBlockComment) toolbar.toolbar.insert(btn, 5) btn.show() btn = ToolButton('block-say') btn.set_tooltip(_('Add Say Block')) btn.connect('clicked', self.insertBlockSay) toolbar.toolbar.insert(btn, 6) btn.show() separator = Gtk.SeparatorToolItem() toolbar.toolbar.insert(separator, 7) separator.show() btn = ToolButton('block-while') btn.set_tooltip(_('Add While Block')) btn.connect('clicked', self.insertBlockWhile) toolbar.toolbar.insert(btn, 8) btn.show() btn = ToolButton('block-ifmana') btn.set_tooltip(_('Add If-Mana Block')) btn.connect('clicked', self.insertBlockIfMana) toolbar.toolbar.insert(btn, 9) btn.show() btn = ToolButton('block-ifhealth') btn.set_tooltip(_('Add If-Health Block')) btn.connect('clicked', self.insertBlockIfHealth) toolbar.toolbar.insert(btn, 10) btn.show() separator = Gtk.SeparatorToolItem() toolbar.toolbar.insert(separator, 11) separator.show() btn = ToolButton('block-heal') btn.set_tooltip(_('Add Heal Block')) btn.connect('clicked', self.insertBlockHeal) toolbar.toolbar.insert(btn, 12) btn.show() btn = ToolButton('block-fireball') btn.set_tooltip(_('Add Fireball Block')) btn.connect('clicked', self.insertBlockFireball) toolbar.toolbar.insert(btn, 13) btn.show() btn = ToolButton('block-mossleech') btn.set_tooltip(_('Add Moss Leech Block')) btn.connect('clicked', self.insertBlockMossLeech) toolbar.toolbar.insert(btn, 14) btn.show() btn = ToolButton('block-douse') btn.set_tooltip(_('Add Douse Block')) btn.connect('clicked', self.insertBlockDouse) toolbar.toolbar.insert(btn, 15) btn.show() separator = Gtk.SeparatorToolItem() toolbar.toolbar.insert(separator, 16) separator.show() btn = ToolButton('block-endturn') btn.set_tooltip(_('Add End Turn Block')) btn.connect('clicked', self.insertBlockEndTurn) toolbar.toolbar.insert(btn, 17) btn.show() return toolbar def insertMenu(self, block): if(self.insert(block)): self.selIndex += 1 def insertBlockComment(self, button=None): self.insertMenu(game_objects.CommentBlock()) def insertBlockSay(self, button=None): self.insertMenu(game_objects.SayBlock()) def insertBlockWhile(self, button=None): self.insertMenu(game_objects.WhileBlock()) def insertBlockIfMana(self, button=None): self.insertMenu(game_objects.IfManaBlock()) def insertBlockIfHealth(self, button=None): self.insertMenu(game_objects.IfOwnHealthBlock()) def insertBlockHeal(self, button=None): self.insertMenu(game_objects.HealBlock(20, 15)) def insertBlockFireball(self, button=None): self.insertMenu(game_objects.FireballBlock(10, 15)) def insertBlockMossLeech(self, button=None): self.insertMenu(game_objects.MossLeechBlock(10, 15)) def insertBlockDouse(self, button=None): self.insertMenu(game_objects.DouseBlock(10, 15)) def insertBlockEndTurn(self, button=None): self.insertMenu(game_objects.EndTurnBlock()) def makeModToolbar(self, activity): toolbar = self.makeGenericToolbar(activity) separator = Gtk.SeparatorToolItem() toolbar.toolbar.insert(separator, 5) separator.show() btn = ToolButton('block-edit') btn.set_tooltip(_('Edit Selected Block')) btn.props.accelerator = '<Ctrl>e' toolbar.toolbar.insert(btn, 6) btn.connect('clicked', self.menuEdit) btn.show() return toolbar def menuEdit(self, button=None): self.blockMenu = True def makeDelToolbar(self, activity): toolbar = self.makeGenericToolbar(activity) separator = Gtk.SeparatorToolItem() toolbar.toolbar.insert(separator, 5) separator.show() btn = ToolButton('block-delete') btn.set_tooltip(_('Delete Selected Block')) toolbar.toolbar.insert(btn, 6) btn.connect('clicked', self.menuRemove) btn.show() return toolbar def menuRemove(self, button=None): self.remove()
{ "content_hash": "3890324af066fc2cb1ac95acc97820fa", "timestamp": "", "source": "github", "line_count": 608, "max_line_length": 184, "avg_line_length": 55.64473684210526, "alnum_prop": 0.5296169307164814, "repo_name": "ColdSauce/golems", "id": "75058444ae32456cf45e4791ccbc9c2ef3a25f2a", "size": "33832", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/codingscene.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "142127" } ], "symlink_target": "" }
""" Module with some useful utilities for minimization of scalar functiom - a kind of replacement for scipy.minimize.minimize_scalar when scipy is not accessible - the actual code is copied from scipy.minimize 0.18.11 The main entry point is a function <code>minimize_scalar</code>. - a copy from scipy 0.18.11 """ # ============================================================================= __version__ = "$Revision:$" __author__ = "Vanya BELYAEV Ivan.Belyaev@cern.ch" __date__ = "2018-10-05" __all__ = ( 'scalar_minimize' , ## local copy of minimize_scalar from scipy 'minimize_scalar' , ## the main entry ## helper functions: 'sp_minimum_1D' , 'sp_maximum_1D' , 'sp_minimum_2D' , 'sp_maximum_2D' , 'sp_minimum_3D' , 'sp_maximum_3D' , ) # ============================================================================= # logging # ============================================================================= from ostap.logger.logger import getLogger if '__main__' == __name__ : logger = getLogger ( 'ostap.math.minimize' ) else : logger = getLogger ( __name__ ) # ============================================================================= import math, warnings from math import sqrt try : import numpy import numpy as np with warnings.catch_warnings(): warnings.simplefilter("ignore") _epsilon = math.sqrt(numpy.finfo(float).eps) except ImportError : class numpy(object) : @staticmethod def abs ( value ) : return abs ( value ) @staticmethod def size ( value ) : return 1 @staticmethod def sign ( value ) : return math.copysign ( 1.0 , value ) @staticmethod def max ( a , *kwargs ) : return max ( a ) import sys _epsilon = sys.float_info.epsilon*0.5 np = numpy # ============================================================================= class OptimizeWarning(UserWarning): pass def is_array_scalar(x): """Test whether `x` is either a scalar or an array scalar. """ return np.size(x) == 1 def _check_unknown_options(unknown_options): if unknown_options: msg = ", ".join(map(str, unknown_options.keys())) # Stack level 4: this is called from _minimize_*, which is # called from another function in Scipy. Level 4 is the first # level in user code. warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4) class OptimizeResult(dict): """ Represents the optimization result. Attributes ---------- x : ndarray The solution of the optimization. success : bool Whether or not the optimizer exited successfully. status : int Termination status of the optimizer. Its value depends on the underlying solver. Refer to `message` for details. message : str Description of the cause of the termination. fun, jac, hess: ndarray Values of objective function, its Jacobian and its Hessian (if available). The Hessians may be approximations, see the documentation of the function in question. hess_inv : object Inverse of the objective function's Hessian; may be an approximation. Not available for all solvers. The type of this attribute may be either np.ndarray or scipy.sparse.linalg.LinearOperator. nfev, njev, nhev : int Number of evaluations of the objective functions and of its Jacobian and Hessian. nit : int Number of iterations performed by the optimizer. maxcv : float The maximum constraint violation. Notes ----- There may be additional attributes not listed above depending of the specific solver. Since this class is essentially a subclass of dict with attribute accessors, one can see which attributes are available using the `keys()` method. """ def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def __repr__(self): if self.keys(): m = max(map(len, list(self.keys()))) + 1 return '\n'.join([k.rjust(m) + ': ' + repr(v) for k, v in sorted(self.items())]) else: return self.__class__.__name__ + "()" def __dir__(self): return list(self.keys()) def scalar_minimize (fun, bracket=None, bounds=None, args=(), method='brent', tol=None, options=None): """Minimization of scalar function of one variable. Parameters ---------- fun : callable Objective function. Scalar function, must return a scalar. bracket : sequence, optional For methods 'brent' and 'golden', `bracket` defines the bracketing interval and can either have three items `(a, b, c)` so that `a < b < c` and `fun(b) < fun(a), fun(c)` or two items `a` and `c` which are assumed to be a starting interval for a downhill bracket search (see `bracket`); it doesn't always mean that the obtained solution will satisfy `a <= x <= c`. bounds : sequence, optional For method 'bounded', `bounds` is mandatory and must have two items corresponding to the optimization bounds. args : tuple, optional Extra arguments passed to the objective function. method : str or callable, optional Type of solver. Should be one of - 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>` - 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>` - 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>` - custom - a callable object (added in version 0.14.0), see below tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. See :func:`show_options()` for solver-specific options. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize : Interface to minimization algorithms for scalar multivariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *Brent*. Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's algorithm to find a local minimum. The algorithm uses inverse parabolic interpolation when possible to speed up convergence of the golden section method. Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the golden section search technique. It uses analog of the bisection method to decrease the bracketed interval. It is usually preferable to use the *Brent* method. Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can perform bounded minimization. It uses the Brent method to find a local minimum in the interval x1 < xopt < x2. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using some library frontend to minimize_scalar. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `bracket`, `tol`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. The method shall return an ``OptimizeResult`` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 Examples -------- Consider the problem of minimizing the following function. >>> def f(x): ... return (x - 2) * x * (x + 2)**2 Using the *Brent* method, we find the local minimum as: >>> from scipy.optimize import minimize_scalar >>> res = minimize_scalar(f) >>> res.x 1.28077640403 Using the *Bounded* method, we find a local minimum with specified bounds as: >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') >>> res.x -2.0000002026 """ if not isinstance(args, tuple): args = (args,) if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} if tol is not None: options = dict(options) if meth == 'bounded' and 'xatol' not in options: warn("Method 'bounded' does not support relative tolerance in x; " "defaulting to absolute tolerance.", RuntimeWarning) options['xatol'] = tol elif meth == '_custom': options.setdefault('tol', tol) else: options.setdefault('xtol', tol) if meth == '_custom': return method(fun, args=args, bracket=bracket, bounds=bounds, **options) elif meth == 'brent': return _minimize_scalar_brent(fun, bracket, args, **options) elif meth == 'bounded': if bounds is None: raise ValueError('The `bounds` parameter is mandatory for ' 'method `bounded`.') return _minimize_scalar_bounded(fun, bounds, args, **options) elif meth == 'golden': return _minimize_scalar_golden(fun, bracket, args, **options) else: raise ValueError('Unknown solver %s' % method) def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8, maxiter=500, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution `xopt` acceptable for convergence. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method. """ _check_unknown_options(unknown_options) tol = xtol if tol < 0: raise ValueError('tolerance should be >= 0, got %r' % tol) brent = Brent(func=func, args=args, tol=tol, full_output=True, maxiter=maxiter) brent.set_bracket(brack) brent.optimize() x, fval, nit, nfev = brent.get_result(full_output=True) return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, success=nit < maxiter) class Brent: #need to rethink design of __init__ def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, full_output=0): self.func = func self.args = args self.tol = tol self.maxiter = maxiter self._mintol = 1.0e-11 self._cg = 0.3819660 self.xmin = None self.fval = None self.iter = 0 self.funcalls = 0 # need to rethink design of set_bracket (new options, etc) def set_bracket(self, brack=None): self.brack = brack def get_bracket_info(self): #set up func = self.func args = self.args brack = self.brack ### BEGIN core bracket_info code ### ### carefully DOCUMENT any CHANGES in core ## if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError("Not a bracketing interval.") fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError("Not a bracketing interval.") funcalls = 3 else: raise ValueError("Bracketing interval must be " "length 2 or 3 sequence.") ### END core bracket_info code ### return xa, xb, xc, fa, fb, fc, funcalls def optimize(self): # set up for optimization func = self.func xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() _mintol = self._mintol _cg = self._cg ################################# #BEGIN CORE ALGORITHM ################################# x = w = v = xb fw = fv = fx = func(*((x,) + self.args)) if (xa < xc): a = xa b = xc else: a = xc b = xa deltax = 0.0 funcalls = 1 iter = 0 while (iter < self.maxiter): tol1 = self.tol * numpy.abs(x) + _mintol tol2 = 2.0 * tol1 xmid = 0.5 * (a + b) # check for convergence if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)): break # XXX In the first iteration, rat is only bound in the true case # of this conditional. This used to cause an UnboundLocalError # (gh-4140). It should be set before the if (but to what?). if (numpy.abs(deltax) <= tol1): if (x >= xmid): deltax = a - x # do a golden section step else: deltax = b - x rat = _cg * deltax else: # do a parabolic step tmp1 = (x - w) * (fx - fv) tmp2 = (x - v) * (fx - fw) p = (x - v) * tmp2 - (x - w) * tmp1 tmp2 = 2.0 * (tmp2 - tmp1) if (tmp2 > 0.0): p = -p tmp2 = numpy.abs(tmp2) dx_temp = deltax deltax = rat # check parabolic fit if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and (numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))): rat = p * 1.0 / tmp2 # if parabolic step is useful. u = x + rat if ((u - a) < tol2 or (b - u) < tol2): if xmid - x >= 0: rat = tol1 else: rat = -tol1 else: if (x >= xmid): deltax = a - x # if it's not do a golden section step else: deltax = b - x rat = _cg * deltax if (numpy.abs(rat) < tol1): # update by at least tol1 if rat >= 0: u = x + tol1 else: u = x - tol1 else: u = x + rat fu = func(*((u,) + self.args)) # calculate new output value funcalls += 1 if (fu > fx): # if it's bigger than current if (u < x): a = u else: b = u if (fu <= fw) or (w == x): v = w w = u fv = fw fw = fu elif (fu <= fv) or (v == x) or (v == w): v = u fv = fu else: if (u >= x): a = x else: b = x v = w w = x x = u fv = fw fw = fx fx = fu iter += 1 ################################# #END CORE ALGORITHM ################################# self.xmin = x self.fval = fx self.iter = iter self.funcalls = funcalls def get_result(self, full_output=False): if full_output: return self.xmin, self.fval, self.iter, self.funcalls else: return self.xmin def _minimize_scalar_bounded(func, bounds, args=(), xatol=1e-5, maxiter=500, disp=0, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. xatol : float Absolute error in solution `xopt` acceptable for convergence. """ _check_unknown_options(unknown_options) maxfun = maxiter # Test bounds are of correct form if len(bounds) != 2: raise ValueError('bounds must have two elements.') x1, x2 = bounds if not (is_array_scalar(x1) and is_array_scalar(x2)): raise ValueError("Optimisation bounds must be scalars" " or array scalars.") if x1 > x2: raise ValueError("The lower bound exceeds the upper bound.") flag = 0 header = ' Func-count x f(x) Procedure' step = ' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5 * (3.0 - sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean * (b - a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x, *args) num = 1 fmin_data = (1, xf, fx) ffulc = fnfc = fx xm = 0.5 * (a + b) tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if disp > 2: print(" ") print(header) print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))): golden = 1 # Check for parabolic fit if numpy.abs(e) > tol1: golden = 0 r = (xf - nfc) * (fx - ffulc) q = (xf - fulc) * (fx - fnfc) p = (xf - fulc) * q - (xf - nfc) * r q = 2.0 * (q - r) if q > 0.0: p = -p q = numpy.abs(q) r = e e = rat # Check for acceptability of parabola if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and (p < q * (b - xf))): rat = (p + 0.0) / q x = xf + rat step = ' parabolic' if ((x - a) < tol2) or ((b - x) < tol2): si = numpy.sign(xm - xf) + ((xm - xf) == 0) rat = tol1 * si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e = a - xf else: e = b - xf rat = golden_mean*e step = ' golden' si = numpy.sign(rat) + (rat == 0) x = xf + si * numpy.max([numpy.abs(rat), tol1]) fu = func(x, *args) num += 1 fmin_data = (num, x, fu) if disp > 2: print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5 * (a + b) tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if num >= maxfun: flag = 1 break fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xatol, disp) result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), message={0: 'Solution found.', 1: 'Maximum number of function calls ' 'reached.'}.get(flag, ''), x=xf, nfev=num) return result def _minimize_scalar_golden(func, brack=None, args=(), xtol=_epsilon, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution `xopt` acceptable for convergence. """ _check_unknown_options(unknown_options) tol = xtol if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError("Not a bracketing interval.") fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError("Not a bracketing interval.") funcalls = 3 else: raise ValueError("Bracketing interval must be length 2 or 3 sequence.") _gR = 0.61803399 _gC = 1.0 - _gR x3 = xc x0 = xa if (numpy.abs(xc - xb) > numpy.abs(xb - xa)): x1 = xb x2 = xb + _gC * (xc - xb) else: x2 = xb x1 = xb - _gC * (xb - xa) f1 = func(*((x1,) + args)) f2 = func(*((x2,) + args)) funcalls += 2 while (numpy.abs(x3 - x0) > tol * (numpy.abs(x1) + numpy.abs(x2))): if (f2 < f1): x0 = x1 x1 = x2 x2 = _gR * x1 + _gC * x3 f1 = f2 f2 = func(*((x2,) + args)) else: x3 = x2 x2 = x1 x1 = _gR * x2 + _gC * x0 f2 = f1 f1 = func(*((x1,) + args)) funcalls += 1 if (f1 < f2): xmin = x1 fval = f1 else: xmin = x2 fval = f2 return OptimizeResult(fun=fval, nfev=funcalls, x=xmin) def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): """ Bracket the minimum of the function. Given a function and distinct initial points, search in the downhill direction (as defined by the initital points) and return new points xa, xb, xc that bracket the minimum of the function f(xa) > f(xb) < f(xc). It doesn't always mean that obtained solution will satisfy xa<=x<=xb Parameters ---------- func : callable f(x,*args) Objective function to minimize. xa, xb : float, optional Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0. args : tuple, optional Additional arguments (if present), passed to `func`. grow_limit : float, optional Maximum grow limit. Defaults to 110.0 maxiter : int, optional Maximum number of iterations to perform. Defaults to 1000. Returns ------- xa, xb, xc : float Bracket. fa, fb, fc : float Objective function values in bracket. funcalls : int Number of function evaluations made. """ _gold = 1.618034 _verysmall_num = 1e-21 fa = func(*(xa,) + args) fb = func(*(xb,) + args) if (fa < fb): # Switch so fa > fb xa, xb = xb, xa fa, fb = fb, fa xc = xb + _gold * (xb - xa) fc = func(*((xc,) + args)) funcalls = 3 iter = 0 while (fc < fb): tmp1 = (xb - xa) * (fb - fc) tmp2 = (xb - xc) * (fb - fa) val = tmp2 - tmp1 if numpy.abs(val) < _verysmall_num: denom = 2.0 * _verysmall_num else: denom = 2.0 * val w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom wlim = xb + grow_limit * (xc - xb) if iter > maxiter: raise RuntimeError("Too many iterations.") iter += 1 if (w - xc) * (xb - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xa = xb xb = w fa = fb fb = fw return xa, xb, xc, fa, fb, fc, funcalls elif (fw > fb): xc = w fc = fw return xa, xb, xc, fa, fb, fc, funcalls w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(wlim - xc) >= 0.0: w = wlim fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(xc - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xb = xc xc = w w = xc + _gold * (xc - xb) fb = fc fc = fw fw = func(*((w,) + args)) funcalls += 1 else: w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 xa = xb xb = xc xc = w fa = fb fb = fc fc = fw return xa, xb, xc, fa, fb, fc, funcalls try : with warnings.catch_warnings(): warnings.simplefilter("ignore") from scipy.optimize import minimize_scalar as ms minimize_scalar = ms scipy_available = True except ImportError : minimize_scalar = scalar_minimize scipy_available = False # ============================================================================= if not scipy_available : sp_minimum_1D = None sp_maximum_1D = None sp_minimum_2D = None sp_maximum_2D = None sp_minimum_3D = None sp_maximum_3D = None else : # ========================================================================= ## get a minimum for 1D-function # @code # model = ... # x = model.minimum() # @endcode def sp_minimum_1D ( fun , xmin , xmax , x0 = None , *args ) : """Get a minimum for 1D-function >>> model = ... >>> x = model.minimum () >>> """ if x0 == None : x0 = 0.5 * ( xmin + xmax ) import numpy as np x0 = np.array ( [ x0 ] ) bounds = [ ( xmin , xmax ) ] import scipy.optimize as spo res = spo.minimize ( fun , x0 = x0 , bounds = bounds ) if not res.success : logger.error ( "Can't minimize the function: %s" % res.message ) return res.x[0] # ========================================================================= ## get a maximum for 1D-function # @code # model = ... # x = model.maximum() # @endcode def sp_maximum_1D ( fun , xmin , xmax , x0 = None , *args ) : """Get a maximum for 1D-function >>> model = ... >>> x = model.maximum () >>> """ funmin = lambda x , *a : -1.0 * ( float ( fun ( x , *a ) ) ) return sp_minimum_1D ( funmin , xmin , xmax , x0 , *args ) # ========================================================================= ## get a minimum for 2D-function # @code # model2 = ... # x , y = model2.minimum () # @endcode def sp_minimum_2D ( fun , xmin , xmax , ymin , ymax , x0 = () , *args ) : """Get a maximum for 2D-function >>> model2 = ... >>> x , y = model2.maximum() >>> """ if not x0 : x0 = 0.5 * ( xmin + xmax ) , 0.5 * ( ymin + ymax ) import numpy as np x0 = np.array ( *x0 ) bounds = [ ( xmin , xmax ) , ( ymin , ymax ) ] import scipy.optimize as spo res = spo.minimize ( fun , x0 = x0 , bounds = bounds ) if not res.success : logger.error ( "Can't minimize the function: %s" % res.message ) return res.x[0] , res.x[1] # ========================================================================= ## get a maximum for 2D-function # @code # model2 = ... # x , y = model2.maximum() # @endcode def sp_maximum_2D ( fun , xmin , xmax , ymin , ymax , x0 = () , *args ) : """Get a maximum for 2D-function >>> model2 = ... >>> x , y = model2.maximum () >>> """ funmin = lambda x , y , *a : -1.0 * ( float ( fun ( x , y , *a ) ) ) return sp_minimum_2D ( funmin , xmin , xmax , ymin , ymax , x0 , *args ) # ========================================================================= ## get a minimum for 3D-function # @code # model3 = ... # x , y , z = model2.minimum () # @endcode def sp_minimum_3D ( fun , xmin , xmax , ymin , ymax , zmin , zmax , x0 = () , *args ) : """Get a minimum for 3D-function >>> model3 = ... >>> x , y , z = model3.minimum() >>> """ if not x0 : x0 = 0.5 * ( xmin + xmax ) , 0.5 * ( ymin + ymax ) , 0.5 * ( zmin + zmax ) import numpy as np x0 = np.array ( *x0 ) bounds = [ ( xmin , xmax ) , ( ymin , ymax ) , ( zmin , zmax ) ] import scipy.optimize as spo res = spo.minimize ( fun , x0 = x0 , bounds = bounds ) if not res.success : logger.error ( "Can't minimize the function: %s" % res.message ) return res.x[0] , res.x[1] , res.x[2] # ========================================================================= ## get a maximum for 3D-function # @code # model3 = ... # x , y , z = model3.maximum() # @endcode def sp_maximum_3D ( fun , xmin , xmax , ymin , ymax , zmin , zmax , x0 = () , *args ) : """Get a maximum for 3D-function >>> model3 = ... >>> x, y , z = model3.maximum () >>> """ funmin = lambda x , y , z , *a : -1.0 * ( float ( fun ( x , y , z , *a ) ) ) return sp_minimum_3D ( funmin , xmin , xmax , ymin , ymax , zmin , zmax , x0 , *args ) # ============================================================================= if '__main__' == __name__ : from ostap.utils.docme import docme docme ( __name__ , logger = logger ) # ============================================================================= ## The END # =============================================================================
{ "content_hash": "d87dac59858649f63a5f34db311c8a2d", "timestamp": "", "source": "github", "line_count": 944, "max_line_length": 96, "avg_line_length": 33.940677966101696, "alnum_prop": 0.4681647940074906, "repo_name": "OstapHEP/ostap", "id": "c434ff485b05e5dd4ac8d60f0b9dbc0c08aa184a", "size": "32603", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ostap/math/minimize.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "41595313" }, { "name": "C++", "bytes": "7480608" }, { "name": "CMake", "bytes": "43634" }, { "name": "Dockerfile", "bytes": "1028" }, { "name": "Python", "bytes": "6658186" }, { "name": "Shell", "bytes": "10365" } ], "symlink_target": "" }
import os from collections import OrderedDict from conans.client.conf import ConanClientConfigParser, default_client_conf, default_settings_yml from conans.client.conf.detect import detect_defaults_settings from conans.client.output import Color from conans.client.profile_loader import read_profile from conans.errors import ConanException from conans.model.info import ConanInfo from conans.model.manifest import FileTreeManifest from conans.model.profile import Profile from conans.model.ref import ConanFileReference from conans.model.settings import Settings from conans.paths import SimplePaths, CONANINFO, PUT_HEADERS from conans.util.files import save, load, normalize from conans.util.locks import SimpleLock, ReadLock, WriteLock, NoLock CONAN_CONF = 'conan.conf' CONAN_SETTINGS = "settings.yml" LOCALDB = ".conan.db" REGISTRY = "registry.txt" PROFILES_FOLDER = "profiles" class ClientCache(SimplePaths): """ Class to represent/store/compute all the paths involved in the execution of conans commands. Accesses to real disk and reads/write things. (OLD client ConanPaths) """ def __init__(self, base_folder, store_folder, output): self.conan_folder = os.path.join(base_folder, ".conan") self._conan_config = None self._settings = None self._output = output self._store_folder = store_folder or self.conan_config.storage_path or self.conan_folder self._default_profile = None self._no_lock = None super(ClientCache, self).__init__(self._store_folder) def _no_locks(self): if self._no_lock is None: self._no_lock = self.conan_config.cache_no_locks return self._no_lock def conanfile_read_lock(self, conan_ref): if self._no_locks(): return NoLock() return ReadLock(self.conan(conan_ref), conan_ref, self._output) def conanfile_write_lock(self, conan_ref): if self._no_locks(): return NoLock() return WriteLock(self.conan(conan_ref), conan_ref, self._output) def conanfile_lock_files(self, conan_ref): if self._no_locks(): return () return WriteLock(self.conan(conan_ref), conan_ref, self._output).files def package_lock(self, package_ref): if self._no_locks(): return NoLock() return SimpleLock(os.path.join(self.conan(package_ref.conan), "locks", package_ref.package_id)) @property def put_headers_path(self): return os.path.join(self.conan_folder, PUT_HEADERS) def read_put_headers(self): ret = {} if not os.path.exists(self.put_headers_path): save(self.put_headers_path, "") return ret try: contents = load(self.put_headers_path) for line in contents.splitlines(): if line and not line.strip().startswith("#"): tmp = line.split("=", 1) if len(tmp) != 2: raise Exception() name = tmp[0].strip() value = tmp[1].strip() ret[str(name)] = str(value) return ret except Exception: raise ConanException("Invalid %s file!" % self.put_headers_path) @property def registry(self): return os.path.join(self.conan_folder, REGISTRY) @property def conan_config(self): if not self._conan_config: if not os.path.exists(self.conan_conf_path): save(self.conan_conf_path, normalize(default_client_conf)) self._conan_config = ConanClientConfigParser(self.conan_conf_path) return self._conan_config @property def localdb(self): return os.path.join(self.conan_folder, LOCALDB) @property def conan_conf_path(self): return os.path.join(self.conan_folder, CONAN_CONF) @property def profiles_path(self): return os.path.join(self.conan_folder, PROFILES_FOLDER) @property def settings_path(self): return os.path.join(self.conan_folder, CONAN_SETTINGS) @property def default_profile_path(self): if os.path.isabs(self.conan_config.default_profile): return self.conan_config.default_profile else: return os.path.expanduser(os.path.join(self.conan_folder, PROFILES_FOLDER, self.conan_config.default_profile)) @property def default_profile(self): if self._default_profile is None: if not os.path.exists(self.default_profile_path): self._output.writeln("Auto detecting your dev setup to initialize the " "default profile (%s)" % self.default_profile_path, Color.BRIGHT_YELLOW) default_settings = detect_defaults_settings(self._output) self._output.writeln("Default settings", Color.BRIGHT_YELLOW) self._output.writeln("\n".join(["\t%s=%s" % (k, v) for (k, v) in default_settings]), Color.BRIGHT_YELLOW) self._output.writeln("*** You can change them in %s ***" % self.default_profile_path, Color.BRIGHT_MAGENTA) self._output.writeln("*** Or override with -s compiler='other' -s ...s***\n\n", Color.BRIGHT_MAGENTA) self._default_profile = Profile() tmp = OrderedDict(default_settings) self._default_profile.update_settings(tmp) save(self.default_profile_path, self._default_profile.dumps()) else: self._default_profile, _ = read_profile(self.default_profile_path, None, None) # Mix profile settings with environment mixed_settings = _mix_settings_with_env(self._default_profile.settings) self._default_profile.settings = mixed_settings return self._default_profile @property def settings(self): """Returns {setting: [value, ...]} defining all the possible settings without values""" if not self._settings: # TODO: Read default environment settings if not os.path.exists(self.settings_path): save(self.settings_path, normalize(default_settings_yml)) settings = Settings.loads(default_settings_yml) else: content = load(self.settings_path) settings = Settings.loads(content) self._settings = settings return self._settings def conan_packages(self, conan_reference): """ Returns a list of package_id from a local cache package folder """ assert isinstance(conan_reference, ConanFileReference) packages_dir = self.packages(conan_reference) try: packages = [dirname for dirname in os.listdir(packages_dir) if os.path.isdir(os.path.join(packages_dir, dirname))] except: # if there isn't any package folder packages = [] return packages def conan_builds(self, conan_reference): """ Returns a list of package ids from a local cache build folder """ assert isinstance(conan_reference, ConanFileReference) builds_dir = self.builds(conan_reference) try: builds = [dirname for dirname in os.listdir(builds_dir) if os.path.isdir(os.path.join(builds_dir, dirname))] except: # if there isn't any package folder builds = [] return builds def load_manifest(self, conan_reference): """conan_id = sha(zip file)""" filename = self.digestfile_conanfile(conan_reference) return FileTreeManifest.loads(load(filename)) def load_package_manifest(self, package_reference): """conan_id = sha(zip file)""" filename = self.digestfile_package(package_reference, short_paths=None) return FileTreeManifest.loads(load(filename)) @staticmethod def read_package_recipe_hash(package_folder): filename = os.path.join(package_folder, CONANINFO) info = ConanInfo.loads(load(filename)) return info.recipe_hash def conan_manifests(self, conan_reference): digest_path = self.digestfile_conanfile(conan_reference) if not os.path.exists(digest_path): return None, None export_sources_path = self.export_sources(conan_reference, short_paths=None) return self._digests(digest_path, export_sources_path) def package_manifests(self, package_reference): digest_path = self.digestfile_package(package_reference, short_paths=None) if not os.path.exists(digest_path): return None, None return self._digests(digest_path) @staticmethod def _digests(digest_path, exports_sources_folder=None): readed_digest = FileTreeManifest.loads(load(digest_path)) expected_digest = FileTreeManifest.create(os.path.dirname(digest_path), exports_sources_folder) return readed_digest, expected_digest def delete_empty_dirs(self, deleted_refs): for ref in deleted_refs: ref_path = self.conan(ref) for _ in range(4): if os.path.exists(ref_path): try: # Take advantage that os.rmdir does not delete non-empty dirs os.rmdir(ref_path) except OSError: break # not empty ref_path = os.path.dirname(ref_path) def _mix_settings_with_env(settings): """Reads CONAN_ENV_XXXX variables from environment and if it's defined uses these value instead of the default from conf file. If you specify a compiler with ENV variable you need to specify all the subsettings, the file defaulted will be ignored""" def get_env_value(name): env_name = "CONAN_ENV_%s" % name.upper().replace(".", "_") return os.getenv(env_name, None) def get_setting_name(env_name): return env_name[10:].lower().replace("_", ".") ret = OrderedDict() for name, value in settings.items(): if get_env_value(name): ret[name] = get_env_value(name) else: # being a subsetting, if parent exist in env discard this, because # env doesn't define this setting. EX: env=>Visual Studio but # env doesn't define compiler.libcxx if "." not in name or not get_env_value(name.split(".")[0]): ret[name] = value # Now read if there are more env variables for env, value in sorted(os.environ.items()): if env.startswith("CONAN_ENV_") and get_setting_name(env) not in ret: ret[get_setting_name(env)] = value return ret
{ "content_hash": "99be5483ce7a58f597ac5aea068eb1af", "timestamp": "", "source": "github", "line_count": 267, "max_line_length": 123, "avg_line_length": 40.57677902621723, "alnum_prop": 0.6189772937050028, "repo_name": "tivek/conan", "id": "0ac003e7c55515ef21b1d9f330bee602223cb2e3", "size": "10834", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "conans/client/client_cache.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1100" }, { "name": "Groovy", "bytes": "6080" }, { "name": "Python", "bytes": "2456395" }, { "name": "Shell", "bytes": "1864" } ], "symlink_target": "" }
"""DMD (hardware device).""" import struct from kivy.graphics.instructions import Callback from kivy.uix.effectwidget import EffectWidget from kivy.clock import Clock from kivy.graphics.fbo import Fbo from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE from kivy.graphics.texture import Texture from mpfmc.effects.gain import GainEffect from mpfmc.effects.flip_vertical import FlipVerticalEffect from mpfmc.effects.gamma import GammaEffect MYPY = False if MYPY: # pragma: no cover from mpfmc.core.mc import MpfMc # pylint: disable-msg=cyclic-import,unused-import class DmdBase: """Base class for DMD devices.""" dmd_name_string = 'DMD' def __init__(self, mc: "MpfMc", name: str, config: dict) -> None: """Initialise DMD.""" self.mc = mc self.name = name self.mc.log.info('Initializing DMD') self.config = self._get_validated_config(config) self.source = self.mc.displays[self.config['source_display']] self.prev_data = None self._dirty = True # put the widget canvas on a Fbo texture = Texture.create(size=self.source.size, colorfmt='rgb') self.fbo = Fbo(size=self.source.size, texture=texture) self.effect_widget = EffectWidget() effect_list = list() effect_list.append(FlipVerticalEffect()) if self.config['brightness'] != 1.0: if not 0.0 <= self.config['brightness'] <= 1.0: raise ValueError("DMD brightness value should be between 0.0 " "and 1.0. Yours is {}".format(self.config['brightness'])) effect_list.append(GainEffect(gain=self.config['brightness'])) if self.config['gamma'] != 1.0: effect_list.append(GammaEffect(gamma=self.config['gamma'])) self.effect_widget.effects = effect_list self.effect_widget.size = self.source.size self.fbo.add(self.effect_widget.canvas) with self.source.canvas: self.callback = Callback(self._trigger_rendering) self._set_dmd_fps() def _trigger_rendering(self, *args): del args self._dirty = True def _get_validated_config(self, config: dict) -> dict: raise NotImplementedError def _set_dmd_fps(self) -> None: # fps is the rate that the connected client requested. We'll use the # lower of the two mc_fps = self.config['fps'] if mc_fps == 0: # pylint: disable-msg=protected-access mc_fps = Clock._max_fps # pylint: disable-msg=protected-access if mc_fps > Clock._max_fps: self.mc.log.warning("%s fps is higher than mpf-mc fps. " "Will use mpf-mc fps setting for the DMD.", DmdBase.dmd_name_string) # pylint: disable-msg=protected-access fps = Clock._max_fps update = 0 # pylint: disable-msg=protected-access elif Clock._max_fps > mc_fps > 0: fps = mc_fps update = 1 / fps else: # pylint: disable-msg=protected-access fps = Clock._max_fps update = 0 Clock.schedule_interval(self.tick, update) self.mc.log.info("Setting %s to %sfps", DmdBase.dmd_name_string, fps) def tick(self, *args) -> None: """Draw image for DMD and send it.""" del args # run this at the end of the tick to make sure all kivy bind callbacks have executed if self._dirty: Clock.schedule_once(self._render, -1) def _render(self, dt): del dt self._dirty = False widget = self.source fbo = self.fbo # detach the widget from the parent parent = widget.parent if parent and hasattr(parent, "remove_display_source"): parent.remove_display_source(widget) # clear the fbo background fbo.bind() fbo.clear_buffer() fbo.release() self.effect_widget.add_widget(widget.container) fbo.draw() fbo.bind() data = glReadPixels(0, 0, widget.native_size[0], widget.native_size[1], GL_RGB, GL_UNSIGNED_BYTE) fbo.release() self.effect_widget.remove_widget(widget.container) # reattach to the parent if parent and hasattr(parent, "add_display_source"): parent.add_display_source(widget) if not self.config['only_send_changes'] or self.prev_data != data: self.prev_data = data self.send(data) def send(self, data: bytes) -> None: """Send data to DMD via BCP.""" raise NotImplementedError class Dmd(DmdBase): """Monochrome DMD.""" def _get_validated_config(self, config: dict) -> dict: return self.mc.config_validator.validate_config('dmds', config) @classmethod def _convert_to_single_bytes(cls, data, config: dict) -> bytes: new_data = bytearray() loops = 0 config.setdefault('luminosity', (.299, .587, .114)) luminosity = config['luminosity'] for r, g, b in struct.iter_unpack('BBB', data): loops += 1 try: pixel_weight = ((r * luminosity[0]) + (g * luminosity[1]) + (b * luminosity[2])) / 255. new_data.append(int(round(pixel_weight * 15))) except ValueError: raise ValueError(loops, r, g, b) return bytes(new_data) def send(self, data: bytes) -> None: """Send data to DMD via BCP.""" data = self._convert_to_single_bytes(data, self.config) self.mc.bcp_processor.send('dmd_frame', rawbytes=data, name=self.name) class RgbDmd(DmdBase): """RGB DMD.""" dmd_name_string = 'RGB DMD' def _get_validated_config(self, config: dict) -> dict: return self.mc.config_validator.validate_config('rgb_dmds', config) @staticmethod def _reorder_channels(data, order): new_data = bytearray() for r, g, b in struct.iter_unpack('BBB', data): for channel in order: if channel == "r": new_data.append(r) elif channel == "g": new_data.append(g) elif channel == "b": new_data.append(b) else: raise ValueError("Unknown channel {}".format(channel)) return bytes(new_data) def send(self, data: bytes) -> None: """Send data to RGB DMD via BCP.""" if self.config['channel_order'] != 'rgb': data = self._reorder_channels(data, self.config['channel_order']) self.mc.bcp_processor.send('rgb_dmd_frame', rawbytes=data, name=self.name)
{ "content_hash": "7cff121a071ee618e24d9686490e3495", "timestamp": "", "source": "github", "line_count": 212, "max_line_length": 103, "avg_line_length": 32.25471698113208, "alnum_prop": 0.5798479087452472, "repo_name": "missionpinball/mpf-mc", "id": "92ed9631fd5b1cd59bdadf4f7ff31e92809dd623", "size": "6838", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "mpfmc/core/dmd.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "3434" }, { "name": "Cython", "bytes": "44814" }, { "name": "Dockerfile", "bytes": "1441" }, { "name": "Makefile", "bytes": "262" }, { "name": "Python", "bytes": "1198826" }, { "name": "Shell", "bytes": "829" } ], "symlink_target": "" }
from datetime import datetime from ansible.plugins.callback import CallbackBase class PlayLogger: """Store log output in a single object. We create a new object per Ansible run """ def __init__(self): self.log = '' self.runtime = 0 def append(self, log_line): """append to log""" self.log += log_line+"\n\n" def banner(self, msg): """Output Trailing Stars""" width = 78 - len(msg) if width < 3: width = 3 filler = "*" * width return "\n%s %s " % (msg, filler) class CallbackModule(CallbackBase): """ Reference: https://github.com/ansible/ansible/blob/v2.0.0.2-1/lib/ansible/plugins/callback/default.py """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stored' CALLBACK_NAME = 'database' def __init__(self): super(CallbackModule, self).__init__() self.logger = PlayLogger() self.start_time = datetime.now() def v2_runner_on_failed(self, result, ignore_errors=False): delegated_vars = result._result.get('_ansible_delegated_vars', None) # Catch an exception # This may never be called because default handler deletes # the exception, since Ansible thinks it knows better if 'exception' in result._result: # Extract the error message and log it error = result._result['exception'].strip().split('\n')[-1] self.logger.append(error) # Remove the exception from the result so it's not shown every time del result._result['exception'] # Else log the reason for the failure if result._task.loop and 'results' in result._result: self._process_items(result) # item_on_failed, item_on_skipped, item_on_ok else: if delegated_vars: self.logger.append("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result))) else: self.logger.append("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result))) def v2_runner_on_ok(self, result): self._clean_results(result._result, result._task.action) delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': return elif result._result.get('changed', False): if delegated_vars: msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() if result._task.loop and 'results' in result._result: self._process_items(result) # item_on_failed, item_on_skipped, item_on_ok else: self.logger.append(msg) def v2_runner_on_skipped(self, result): if result._task.loop and 'results' in result._result: self._process_items(result) # item_on_failed, item_on_skipped, item_on_ok else: msg = "skipping: [%s]" % result._host.get_name() self.logger.append(msg) def v2_runner_on_unreachable(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: self.logger.append("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result))) else: self.logger.append("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result))) def v2_runner_on_no_hosts(self, task): self.logger.append("skipping: no hosts matched") def v2_playbook_on_task_start(self, task, is_conditional): self.logger.append("TASK [%s]" % task.get_name().strip()) def v2_playbook_on_play_start(self, play): name = play.get_name().strip() if not name: msg = "PLAY" else: msg = "PLAY [%s]" % name self.logger.append(msg) def v2_playbook_item_on_ok(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': return elif result._result.get('changed', False): if delegated_vars: msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() msg += " => (item=%s)" % (result._result['item']) self.logger.append(msg) def v2_playbook_item_on_failed(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if 'exception' in result._result: # Extract the error message and log it error = result._result['exception'].strip().split('\n')[-1] self.logger.append(error) # Remove the exception from the result so it's not shown every time del result._result['exception'] if delegated_vars: self.logger.append("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result))) else: self.logger.append("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result))) def v2_playbook_item_on_skipped(self, result): msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item']) self.logger.append(msg) def v2_playbook_on_stats(self, stats): run_time = datetime.now() - self.start_time self.logger.runtime = run_time.seconds # returns an int, unlike run_time.total_seconds() hosts = sorted(stats.processed.keys()) for h in hosts: t = stats.summarize(h) msg = "PLAY RECAP [%s] : %s %s %s %s %s" % ( h, "ok: %s" % (t['ok']), "changed: %s" % (t['changed']), "unreachable: %s" % (t['unreachable']), "skipped: %s" % (t['skipped']), "failed: %s" % (t['failures']), ) self.logger.append(msg) def record_logs(self, success=False): """ Special callback added to this callback plugin Called by Runner objet :param user_id: :return: """ print(self.logger.log) print(self.logger.runtime) print(success)
{ "content_hash": "720dcf7e9ad2e6b0d9bdf6f4c31c6c92", "timestamp": "", "source": "github", "line_count": 180, "max_line_length": 191, "avg_line_length": 39.166666666666664, "alnum_prop": 0.5573049645390071, "repo_name": "aivaturi/aws-terraform-ansible", "id": "cf70bc1dbdd461fb8024396777519dc77ce6b41f", "size": "7050", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ansible/plugins/callback/logger.py", "mode": "33188", "license": "mit", "language": [ { "name": "HCL", "bytes": "22000" }, { "name": "Python", "bytes": "65039" }, { "name": "Shell", "bytes": "20064" }, { "name": "Smarty", "bytes": "438" } ], "symlink_target": "" }
from . import Graph def get_conflict_for_node(n, graph, colors): num_conflicts = 0 cur_color = colors[n] edges = graph.nodes[n].edges for e in edges: num_conflicts += (1 if colors[e] == cur_color else 0) return num_conflicts def get_all_conflicts(graph, colors): conflicts = [] for n in xrange(0, graph.get_num_nodes()): conflicts.append(get_conflict_for_node(n, graph, colors)) return conflicts def solve_min_conflict(node_count, edges): g = Graph(node_count, edges) # Start the same color. This will cause a lot of conflicts. colors = [0] * node_count conflicts = get_all_conflicts(g, colors) num_item = 0 num_colors = 1 while sum(conflicts) > 0: #print "Colors: %s" % colors #print "Conflicts %s" % conflicts # Get the most conflicting node worst_node = -1 max_conflicts = -1 for i, c in enumerate(conflicts): if c > max_conflicts: max_conflicts = c worst_node = i #print "Worst: %d conflicts for node %d" % (max_conflicts, worst_node) node_to_change = worst_node # Decide what color to change to best_new_conflicts = max_conflicts best_new_color = -1 new_colors = list(colors) new_color_conflicts = [] for new_color in xrange(0, num_colors): new_colors[node_to_change] = new_color new_conflicts = get_conflict_for_node(node_to_change, g, new_colors) if new_conflicts < best_new_conflicts: best_new_conflicts = new_conflicts best_new_color = new_color new_color_conflicts.append(new_conflicts) #print "New conflicts: %s" % new_color_conflicts if best_new_conflicts < max_conflicts: # Reduced conflicts. That's good. #print "Changing color" new_colors[node_to_change] = best_new_color colors = new_colors else: # Couldn't reduce conflict. Add a new color so we eventually converge #print "Adding color" colors[node_to_change] = num_colors num_colors += 1 # Recompute conflicts. Can probably do this faster. conflicts = get_all_conflicts(g, colors) #print return num_colors, colors
{ "content_hash": "e42f9436009551145b731b940f6e08a8", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 81, "avg_line_length": 32.736111111111114, "alnum_prop": 0.5867628341111583, "repo_name": "chriselion/discreteopt", "id": "a2531a3adcf0ad868b2053a7d8666e49b83d6d1b", "size": "2357", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "coloring/min_conflict.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "60085" } ], "symlink_target": "" }
""" homeassistant.util.template ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Template utility methods for rendering strings with HA data. """ # pylint: disable=too-few-public-methods import json import logging import jinja2 from jinja2.sandbox import ImmutableSandboxedEnvironment from homeassistant.const import STATE_UNKNOWN from homeassistant.exceptions import TemplateError _LOGGER = logging.getLogger(__name__) _SENTINEL = object() def render_with_possible_json_value(hass, template, value, error_value=_SENTINEL): """ Renders template with value exposed. If valid JSON will expose value_json too. """ variables = { 'value': value } try: variables['value_json'] = json.loads(value) except ValueError: pass try: return render(hass, template, variables) except TemplateError: _LOGGER.exception('Error parsing value') return value if error_value is _SENTINEL else error_value def render(hass, template, variables=None, **kwargs): """ Render given template. """ if variables is not None: kwargs.update(variables) try: return ENV.from_string(template, { 'states': AllStates(hass), 'is_state': hass.states.is_state, 'is_state_attr': hass.states.is_state_attr }).render(kwargs).strip() except jinja2.TemplateError as err: raise TemplateError(err) class AllStates(object): """ Class to expose all HA states as attributes. """ def __init__(self, hass): self._hass = hass def __getattr__(self, name): return DomainStates(self._hass, name) def __iter__(self): return iter(sorted(self._hass.states.all(), key=lambda state: state.entity_id)) def __call__(self, entity_id): state = self._hass.states.get(entity_id) return STATE_UNKNOWN if state is None else state.state class DomainStates(object): """ Class to expose a specific HA domain as attributes. """ def __init__(self, hass, domain): self._hass = hass self._domain = domain def __getattr__(self, name): return self._hass.states.get('{}.{}'.format(self._domain, name)) def __iter__(self): return iter(sorted( (state for state in self._hass.states.all() if state.domain == self._domain), key=lambda state: state.entity_id)) def forgiving_round(value, precision=0): """ Rounding method that accepts strings. """ try: value = round(float(value), precision) return int(value) if precision == 0 else value except ValueError: # If value can't be converted to float return value def multiply(value, amount): """ Converts to float and multiplies value. """ try: return float(value) * amount except ValueError: # If value can't be converted to float return value class TemplateEnvironment(ImmutableSandboxedEnvironment): """ Home Assistant template environment. """ def is_safe_callable(self, obj): return isinstance(obj, AllStates) or super().is_safe_callable(obj) ENV = TemplateEnvironment() ENV.filters['round'] = forgiving_round ENV.filters['multiply'] = multiply
{ "content_hash": "817b2593eee64e2977629c1123e1c9c3", "timestamp": "", "source": "github", "line_count": 115, "max_line_length": 74, "avg_line_length": 28.6, "alnum_prop": 0.6299787169352387, "repo_name": "nnic/home-assistant", "id": "d9b1990a252d1708cfcc7bb16206c8116c1088b7", "size": "3289", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/util/template.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1482064" }, { "name": "Python", "bytes": "1790232" }, { "name": "Shell", "bytes": "3570" } ], "symlink_target": "" }
"""Diofant is a Python library for symbolic mathematics.""" import os DIOFANT_DEBUG = os.getenv('DIOFANT_DEBUG', 'False') != 'False' del os import pkg_resources __version__ = pkg_resources.get_distribution(__name__).version del pkg_resources from .core import * from .logic import * from .polys import * from .domains import * from .series import * from .functions import * from .ntheory import * from .concrete import * from .simplify import * from .sets import * from .solvers import * from .matrices import * from .geometry import * from .utilities import * from .integrals import * from .tensor import * from .calculus import * from .combinatorics import * from .plotting import * from .printing import * from .interactive import *
{ "content_hash": "222bd41c893808f5e09cea1bf3b1eebb", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 62, "avg_line_length": 23.838709677419356, "alnum_prop": 0.7388362652232747, "repo_name": "skirpichev/omg", "id": "78c221bac0d85fd94f124b56a2a6148661a57e49", "size": "739", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "diofant/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "10305079" } ], "symlink_target": "" }
from django.conf.urls import url from django.views.generic import RedirectView from .views import Index, MissionDetail, MissionCreate, MissionUpdate, MissionDelete, VesselCreate, AllVessels app_name = 'missionlogger' urlpatterns = [ url(r'^$', RedirectView.as_view(url='missions')), url(r'^missions$', Index.as_view(), name='index'), url(r'^missions/(?P<mission_status>\d)$', Index.as_view(), name='index-by-status'), url(r'^missions/(?P<vessel_name>.+)$', Index.as_view(), name='index-by-name'), url(r'^mission/(?P<mission_id>\d+)$', MissionDetail.as_view(), name='mission-detail'), url(r'^mission/add$', MissionCreate.as_view(), name='mission-add'), url(r'^mission/(?P<pk>\d+)/update$', MissionUpdate.as_view(), name='mission-update'), url(r'^mission/(?P<pk>\d+)/delete$', MissionDelete.as_view(), name='mission-delete'), url(r'^vessels$', AllVessels.as_view(), name='vessels'), url(r'^vessel/add$', VesselCreate.as_view(), name='vessel-add'), ]
{ "content_hash": "7e4ba568a874f7d588802da2fad57b8c", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 110, "avg_line_length": 55, "alnum_prop": 0.6686868686868687, "repo_name": "DeepSpace2/ksp-mission-logger", "id": "d1066f383f719b2def2dd1642eb43941f5160c8b", "size": "990", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "missionlogger/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "44" }, { "name": "HTML", "bytes": "7397" }, { "name": "Python", "bytes": "15590" } ], "symlink_target": "" }
"""SCons.Tool.nasm Tool-specific initialization for nasm, the famous Netwide Assembler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2019 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/nasm.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan" import SCons.Defaults import SCons.Tool import SCons.Util ASSuffixes = ['.s', '.asm', '.ASM'] ASPPSuffixes = ['.spp', '.SPP', '.sx'] if SCons.Util.case_sensitive_suffixes('.s', '.S'): ASPPSuffixes.extend(['.S']) else: ASSuffixes.extend(['.S']) def generate(env): """Add Builders and construction variables for nasm to an Environment.""" static_obj, shared_obj = SCons.Tool.createObjBuilders(env) for suffix in ASSuffixes: static_obj.add_action(suffix, SCons.Defaults.ASAction) static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter) for suffix in ASPPSuffixes: static_obj.add_action(suffix, SCons.Defaults.ASPPAction) static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter) env['AS'] = 'nasm' env['ASFLAGS'] = SCons.Util.CLVar('') env['ASPPFLAGS'] = '$ASFLAGS' env['ASCOM'] = '$AS $ASFLAGS -o $TARGET $SOURCES' env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES' def exists(env): return env.Detect('nasm') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
{ "content_hash": "49afd939fd148c55b9d923a6457ecd4e", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 115, "avg_line_length": 36.94444444444444, "alnum_prop": 0.7274436090225563, "repo_name": "kayhayen/Nuitka", "id": "f08930e48e702d350027a5573019908d7374a01f", "size": "2660", "binary": false, "copies": "4", "ref": "refs/heads/develop", "path": "nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/nasm.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1868" }, { "name": "C", "bytes": "617681" }, { "name": "C++", "bytes": "149777" }, { "name": "Python", "bytes": "6603718" }, { "name": "Shell", "bytes": "1088" } ], "symlink_target": "" }
import csv import codecs import os import json # this code will be used to read data from each of the problem based on json file and store it as csv file def open_text(path): opened_file = codecs.open(path, 'r') text = opened_file.read() return text def file_info(path): info_path = open(path) data = json.load(info_path) info_path.close() dict_file = {} for i in xrange(len(data)): dict_file[data[i][str("folder")]] = data[i][str("language")] return dict_file def convert_to_csv(all_problem_path, csv_path, dict_info): for k, v in dict_info.iteritems(): problem_path = all_problem_path + "/" + k all_data = [["id", "article"]] for subdir, dirs, files in os.walk(problem_path): all_data = [["id", "article"]] for file_ in files: each_file = [] file_path = subdir + os.path.sep + file_ file_id = file_.split(".")[0] file_text = open_text(file_path) each_file.append(file_id) each_file.append(file_text) all_data.append(each_file) pro_path = problem_path.split("/") file_name = csv_path + "/" + pro_path[-1] + "." + dict_info[k] + ".csv" with codecs.open(file_name, 'wb') as csvfile: writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL, dialect='excel') writer.writerows(all_data)
{ "content_hash": "0bf7cfa211fc9a57f75861fcce01011a", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 106, "avg_line_length": 32.86363636363637, "alnum_prop": 0.5643153526970954, "repo_name": "yunitata/PAN16", "id": "feba6bef0f38fb506f2ab6f605b436ea3b1faa58", "size": "1446", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "read_data.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13266" } ], "symlink_target": "" }
''' OpTestPNOR ---------- This testcase will deal with testing access to the host pnor from petitboot through the pflash program ''' import time import subprocess import subprocess import re import sys import os import os.path import unittest import OpTestConfiguration from common.OpTestSystem import OpSystemState from common.OpTestConstants import OpTestConstants as BMC_CONST from common.Exceptions import CommandFailed import logging import OpTestLogger log = OpTestLogger.optest_logger_glob.get_logger(__name__) class OpTestPNOR(): def setUp(self): conf = OpTestConfiguration.conf self.cv_HOST = conf.host() self.cv_IPMI = conf.ipmi() self.cv_SYSTEM = conf.system() def pflashErase(self, offset, length): self.c.run_command("pflash -e -f -a %d -s %d" % (offset, length)) def pflashErasePartition(self, partition): self.c.run_command("pflash -e -f -P %s" % (partition)) def pflashRead(self, filename, offset, length): self.c.run_command("pflash -r %s -a %d -s %d" % (filename, offset, length)) def pflashReadPartition(self, filename, partition): self.c.run_command("pflash -r %s -P %s" % (filename, partition)) def pflashWrite(self, filename, offset, length): self.c.run_command("pflash -f -p %s -a %d -s %d" % (filename, offset, length)) def pflashWritePartition(self, filename, partition): self.c.run_command("pflash -f -p %s -P %s" % (filename, partition)) def pflashGetPartition(self, partition): d = self.c.run_command("pflash --info") for line in d: s = re.search(partition, line) if s: m = re.match( r'ID=\d+\s+\S+\s+((0[xX])?[0-9a-fA-F]+)..(0[xX])?[0-9a-fA-F]+\s+\(actual=((0[xX])?[0-9a-fA-F]+)\)\s(\[)?([A-Za-z-]+)?(\])?.*', line) if not m: continue offset = int(m.group(1), 16) length = int(m.group(4), 16) ret = {'offset': offset, 'length': length } flags = m.group(7) if flags: ret['flags'] = [x for x in list(flags) if x != '-'] return ret def comparePartitionFile(self, filename, partition): self.c.run_command("pflash -r /tmp/tmp -P %s" % (partition)) try: self.c.run_command("diff /tmp/tmp %s" % (filename)) except CommandFailed as cf: self.assertEqual(cf.output, "0") def runTestReadEraseWriteNVRAM(self): # Read NVRAM to file /tmp/nvram self.pflashReadPartition("/tmp/nvram", "NVRAM") nvramInfo = self.pflashGetPartition("NVRAM") # Erase the NVRAM partition self.pflashErase(nvramInfo['offset'], nvramInfo['length']) # Read the (hopefully) erased NVRAM self.pflashReadPartition("/tmp/null", "NVRAM") # Write back to the NVRAM partition self.pflashWrite( "/tmp/nvram", nvramInfo['offset'], nvramInfo['length']) # Compare /tmp/nvram to rewritten nvram contents self.comparePartitionFile("/tmp/nvram", "NVRAM") # Check /tmp/null all "erased" (377 is 0xFF in octal) d = self.c.run_command("cat /tmp/null | tr -d '\\377' | wc -c") self.assertEqual(d[0], "0") def runTestReadWritePAYLOAD(self): payloadInfo = self.pflashGetPartition("PAYLOAD") log.debug(repr(payloadInfo)) # Read PAYLOAD to file /tmp/payload self.pflashReadPartition("/tmp/payload", "PAYLOAD") # Write /tmp/payload to PAYLOAD try: self.pflashWrite( "/tmp/payload", payloadInfo['offset'], payloadInfo['length']) except CommandFailed as cf: log.debug("pflashWrite Flags={} cf={} cf.output={} cf.exitcode={}" .format(payloadInfo.get('flags'), cf, cf.output, cf.exitcode)) if payloadInfo.get('flags'): # we have an iterable, so check it if not ('R' in payloadInfo.get('flags') and cf.exitcode in [8]): raise cf else: # we have no flags if cf.exitcode in [8]: raise cf # Check the same self.comparePartitionFile("/tmp/payload", "PAYLOAD") # Try using the pflash -P option as well try: self.pflashWritePartition("/tmp/payload", "PAYLOAD") except CommandFailed as cf: log.debug("pflashWritePartition Flags={} cf={} cf.output={} cf.exitcode={}" .format(payloadInfo.get('flags'), cf, cf.output, cf.exitcode)) if payloadInfo.get('flags'): # we have an iterable, so check it if not ('R' in payloadInfo.get('flags') and cf.exitcode in [8]): raise cf else: # we have no flags if cf.exitcode in [8]: raise cf # Check the same self.comparePartitionFile("/tmp/payload", "PAYLOAD") def runTestWriteTOC(self): tocInfo = self.pflashGetPartition("part") # Read the toc so we can write it back later self.pflashRead("/tmp/toc", tocInfo['offset'], tocInfo['length']) # Write all zeros to the toc (Because why not :D) self.c.run_command( "dd if=/dev/zero of=/tmp/zeros bs=1 count=%s" % (tocInfo['length'])) try: self.pflashWrite( "/tmp/zeros", tocInfo['offset'], tocInfo['length']) except CommandFailed as cf: self.assertEqual(cf.exitcode, 8, "pflash did not exit with correct exit code for " "a Read Only TOC. Expeceted 8, got {}.".format( cf.exitcode)) # Read and compare self.pflashRead("/tmp/tmp", tocInfo['offset'], tocInfo['length']) try: self.c.run_command("diff /tmp/tmp /tmp/zeros") except CommandFailed as cf: # This is not an error -> expected for vPNOR (prior to RO-TOC) log.debug("Failed to zero TOC") # Better write the toc back now try: self.pflashWrite("/tmp/toc", tocInfo['offset'], tocInfo['length']) except CommandFailed as cf: self.assertEqual(cf.exitcode, 8, "pflash did not exit with correct exit code for " "a Read Only TOC. Expeceted 8, got {}.".format( cf.exitcode)) def runTest(self): self.setup_test() if not self.cv_SYSTEM.has_mtd_pnor_access(): self.skipTest("Host doesn't have MTD PNOR access") # retry in case this comes after a hung console recovery self.c.run_command("uname -a", retry=5) self.c.run_command("cat /etc/os-release", retry=5) # Read Erase Write NVRAM self.runTestReadEraseWriteNVRAM() # Read and then reWrite PAYLOAD self.runTestReadWritePAYLOAD() # Try write to the TOC self.runTestWriteTOC() class Skiroot(OpTestPNOR, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL) self.c = self.cv_SYSTEM.console class Host(OpTestPNOR, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.OS) self.c = self.cv_SYSTEM.cv_HOST.get_ssh_connection()
{ "content_hash": "d379db49dfae896343d01e26bedafb43", "timestamp": "", "source": "github", "line_count": 194, "max_line_length": 152, "avg_line_length": 39.170103092783506, "alnum_prop": 0.5649427556257403, "repo_name": "open-power/op-test-framework", "id": "6b99b898fa3843fb8ec8e62c4457f7310e098dbd", "size": "8448", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "testcases/OpTestPNOR.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "2080" }, { "name": "Python", "bytes": "1311268" }, { "name": "Shell", "bytes": "71724" }, { "name": "Tcl", "bytes": "18813" } ], "symlink_target": "" }
COLOR_LIST = [ "white", "black", "brown", "gray", "chrome", "stainless steel", "whites", "red", "browns / tans", "bronze", "silver", "blacks", "beige", "stainless", "blue", "nickel", "metallics", "clear", "grays", "green", "multi", "beige / cream", "tan", "greens", "yellow", "wood", "blues", "reds / pinks", "brushed nickel", "orange", "metallic", "brass", "yellows / golds", "oil rubbed bronze", "polished chrome", "almond", "multi-colored", "dark brown wood", "primed white", "beige/bisque", "biscuit", "ivory", "oranges / peaches", "grey", "unfinished wood", "light brown wood", "wood grain", "silver metallic", "copper", "medium brown wood", "soft white", "gold", "satin nickel", "cherry", "bright white", "red/orange", "teal", "natural", "oak", "mahogany", "aluminum", "espresso", "unfinished", "purples / lavenders", "brown/tan", "steel", "venetian bronze", "slate", "warm white", "bone", "pink", "stainless look", "reddish brown wood", "solid colors", "off-white", "walnut", "chocolate", "light almond", "vibrant brushed nickel", "satin white", "polished brass", "linen", "white primer", "purple", "charcoal", "color", "oil-rubbed bronze", "melamine white", "turquoises / aquas", "blue/purple", "primed", "bisque", "browns/tans", "assorted colors", "java", "pewter", "chestnut", "yellow/gold", "taupe", "pacific white", "cedar", "monochromatic stainless steel", "other", "platinum", "mocha", "cream", "sand", "daylight", "brushed stainless steel", "powder-coat white", ]
{ "content_hash": "d4591b9c9c5d1483b6e38e68a512cc87", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 32, "avg_line_length": 13.196428571428571, "alnum_prop": 0.6502029769959404, "repo_name": "ChenglongChen/Kaggle_HomeDepot", "id": "d5522ac8b8cf344e50d9edef09a3c75eaa6a499c", "size": "1574", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Data/dict/color_data.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2074919" }, { "name": "R", "bytes": "1179" }, { "name": "TeX", "bytes": "92772" } ], "symlink_target": "" }
import os import sys import string import xml.etree.ElementTree as etree from xml.etree.ElementTree import SubElement from utils import _make_path_relative from utils import xml_indent fs_encoding = sys.getfilesystemencoding() def _get_filetype(fn): if fn.rfind('.cpp') != -1 or fn.rfind('.cxx') != -1: return 8 if fn.rfind('.c') != -1 or fn.rfind('.C') != -1: return 1 # assemble file type if fn.rfind('.s') != -1 or fn.rfind('.S') != -1: return 2 # header type if fn.rfind('.h') != -1: return 5 if fn.rfind('.lib') != -1: return 4 # other filetype return 5 def MDK4AddGroupForFN(ProjectFiles, parent, name, filename, project_path): group = SubElement(parent, 'Group') group_name = SubElement(group, 'GroupName') group_name.text = name name = os.path.basename(filename) path = os.path.dirname (filename) basename = os.path.basename(path) path = _make_path_relative(project_path, path) path = os.path.join(path, name) files = SubElement(group, 'Files') file = SubElement(files, 'File') file_name = SubElement(file, 'FileName') name = os.path.basename(path) if name.find('.cpp') != -1: obj_name = name.replace('.cpp', '.o') elif name.find('.c') != -1: obj_name = name.replace('.c', '.o') elif name.find('.s') != -1: obj_name = name.replace('.s', '.o') elif name.find('.S') != -1: obj_name = name.replace('.s', '.o') else: obj_name = name if ProjectFiles.count(obj_name): name = basename + '_' + name ProjectFiles.append(obj_name) file_name.text = name.decode(fs_encoding) file_type = SubElement(file, 'FileType') file_type.text = '%d' % _get_filetype(name) file_path = SubElement(file, 'FilePath') file_path.text = path.decode(fs_encoding) return group def MDK4AddLibToGroup(ProjectFiles, group, name, filename, project_path): name = os.path.basename(filename) path = os.path.dirname (filename) basename = os.path.basename(path) path = _make_path_relative(project_path, path) path = os.path.join(path, name) files = SubElement(group, 'Files') file = SubElement(files, 'File') file_name = SubElement(file, 'FileName') name = os.path.basename(path) if name.find('.cpp') != -1: obj_name = name.replace('.cpp', '.o') elif name.find('.c') != -1: obj_name = name.replace('.c', '.o') elif name.find('.s') != -1: obj_name = name.replace('.s', '.o') elif name.find('.S') != -1: obj_name = name.replace('.s', '.o') else: obj_name = name if ProjectFiles.count(obj_name): name = basename + '_' + name ProjectFiles.append(obj_name) file_name.text = name.decode(fs_encoding) file_type = SubElement(file, 'FileType') file_type.text = '%d' % _get_filetype(name) file_path = SubElement(file, 'FilePath') file_path.text = path.decode(fs_encoding) return group def MDK4AddGroup(ProjectFiles, parent, name, files, project_path): # don't add an empty group if len(files) == 0: return group = SubElement(parent, 'Group') group_name = SubElement(group, 'GroupName') group_name.text = name for f in files: fn = f.rfile() name = fn.name path = os.path.dirname(fn.abspath) basename = os.path.basename(path) path = _make_path_relative(project_path, path) path = os.path.join(path, name) files = SubElement(group, 'Files') file = SubElement(files, 'File') file_name = SubElement(file, 'FileName') name = os.path.basename(path) if name.find('.cpp') != -1: obj_name = name.replace('.cpp', '.o') elif name.find('.c') != -1: obj_name = name.replace('.c', '.o') elif name.find('.s') != -1: obj_name = name.replace('.s', '.o') elif name.find('.S') != -1: obj_name = name.replace('.s', '.o') if ProjectFiles.count(obj_name): name = basename + '_' + name ProjectFiles.append(obj_name) file_name.text = name # name.decode(fs_encoding) file_type = SubElement(file, 'FileType') file_type.text = '%d' % _get_filetype(name) file_path = SubElement(file, 'FilePath') file_path.text = path # path.decode(fs_encoding) return group # The common part of making MDK4/5 project def MDK45Project(tree, target, script): project_path = os.path.dirname(os.path.abspath(target)) root = tree.getroot() out = open(target, 'w') out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n') CPPPATH = [] CPPDEFINES = [] LINKFLAGS = '' CCFLAGS = '' ProjectFiles = [] # add group groups = tree.find('Targets/Target/Groups') if groups is None: groups = SubElement(tree.find('Targets/Target'), 'Groups') groups.clear() # clean old groups for group in script: group_tree = MDK4AddGroup(ProjectFiles, groups, group['name'], group['src'], project_path) # for local CPPPATH/CPPDEFINES if (group_tree != None) and ('LOCAL_CPPPATH' in group or 'LOCAL_CCFLAGS' in group or 'LOCAL_CPPDEFINES' in group): GroupOption = SubElement(group_tree, 'GroupOption') GroupArmAds = SubElement(GroupOption, 'GroupArmAds') Cads = SubElement(GroupArmAds, 'Cads') VariousControls = SubElement(Cads, 'VariousControls') MiscControls = SubElement(VariousControls, 'MiscControls') if 'LOCAL_CCFLAGS' in group: MiscControls.text = group['LOCAL_CCFLAGS'] else: MiscControls.text = ' ' Define = SubElement(VariousControls, 'Define') if 'LOCAL_CPPDEFINES' in group: Define.text = ', '.join(set(group['LOCAL_CPPDEFINES'])) else: Define.text = ' ' Undefine = SubElement(VariousControls, 'Undefine') Undefine.text = ' ' IncludePath = SubElement(VariousControls, 'IncludePath') if 'LOCAL_CPPPATH' in group: IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in group['LOCAL_CPPPATH']]) else: IncludePath.text = ' ' # get each include path if 'CPPPATH' in group and group['CPPPATH']: if CPPPATH: CPPPATH += group['CPPPATH'] else: CPPPATH += group['CPPPATH'] # get each group's definitions if 'CPPDEFINES' in group and group['CPPDEFINES']: if CPPDEFINES: CPPDEFINES += group['CPPDEFINES'] else: CPPDEFINES = group['CPPDEFINES'] # get each group's link flags if 'LINKFLAGS' in group and group['LINKFLAGS']: if LINKFLAGS: LINKFLAGS += ' ' + group['LINKFLAGS'] else: LINKFLAGS += group['LINKFLAGS'] if 'LIBS' in group and group['LIBS']: for item in group['LIBS']: lib_path = '' for path_item in group['LIBPATH']: full_path = os.path.join(path_item, item + '.lib') if os.path.isfile(full_path): # has this library lib_path = full_path break if lib_path != '': if group_tree != None: MDK4AddLibToGroup(ProjectFiles, group_tree, group['name'], lib_path, project_path) else: group_tree = MDK4AddGroupForFN(ProjectFiles, groups, group['name'], lib_path, project_path) # write include path, definitions and link flags IncludePath = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/IncludePath') IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in CPPPATH]) Define = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/Define') Define.text = ', '.join(set(CPPDEFINES)) Misc = tree.find('Targets/Target/TargetOption/TargetArmAds/LDads/Misc') Misc.text = LINKFLAGS xml_indent(root) out.write(etree.tostring(root, encoding='utf-8').decode()) out.close() def MDK4Project(target, script): template_tree = etree.parse('template.uvproj') MDK45Project(template_tree, target, script) # remove project.uvopt file project_uvopt = os.path.abspath(target).replace('uvproj', 'uvopt') if os.path.isfile(project_uvopt): os.unlink(project_uvopt) # copy uvopt file if os.path.exists('template.uvopt'): import shutil shutil.copy2('template.uvopt', 'project.uvopt') def MDK5Project(target, script): template_tree = etree.parse('template.uvprojx') MDK45Project(template_tree, target, script) # remove project.uvopt file project_uvopt = os.path.abspath(target).replace('uvprojx', 'uvoptx') if os.path.isfile(project_uvopt): os.unlink(project_uvopt) # copy uvopt file if os.path.exists('template.uvoptx'): import shutil shutil.copy2('template.uvoptx', 'project.uvoptx') def MDKProject(target, script): template = open('template.Uv2', "r") lines = template.readlines() project = open(target, "w") project_path = os.path.dirname(os.path.abspath(target)) line_index = 5 # write group for group in script: lines.insert(line_index, 'Group (%s)\r\n' % group['name']) line_index += 1 lines.insert(line_index, '\r\n') line_index += 1 # write file ProjectFiles = [] CPPPATH = [] CPPDEFINES = [] LINKFLAGS = '' CCFLAGS = '' # number of groups group_index = 1 for group in script: # print group['name'] # get each include path if 'CPPPATH' in group and group['CPPPATH']: if CPPPATH: CPPPATH += group['CPPPATH'] else: CPPPATH += group['CPPPATH'] # get each group's definitions if 'CPPDEFINES' in group and group['CPPDEFINES']: if CPPDEFINES: CPPDEFINES += group['CPPDEFINES'] else: CPPDEFINES = group['CPPDEFINES'] # get each group's link flags if 'LINKFLAGS' in group and group['LINKFLAGS']: if LINKFLAGS: LINKFLAGS += ' ' + group['LINKFLAGS'] else: LINKFLAGS += group['LINKFLAGS'] # generate file items for node in group['src']: fn = node.rfile() name = fn.name path = os.path.dirname(fn.abspath) basename = os.path.basename(path) path = _make_path_relative(project_path, path) path = os.path.join(path, name) if ProjectFiles.count(name): name = basename + '_' + name ProjectFiles.append(name) lines.insert(line_index, 'File %d,%d,<%s><%s>\r\n' % (group_index, _get_filetype(name), path, name)) line_index += 1 group_index = group_index + 1 lines.insert(line_index, '\r\n') line_index += 1 # remove repeat path paths = set() for path in CPPPATH: inc = _make_path_relative(project_path, os.path.normpath(path)) paths.add(inc) #.replace('\\', '/') paths = [i for i in paths] CPPPATH = string.join(paths, ';') definitions = [i for i in set(CPPDEFINES)] CPPDEFINES = string.join(definitions, ', ') while line_index < len(lines): if lines[line_index].startswith(' ADSCINCD '): lines[line_index] = ' ADSCINCD (' + CPPPATH + ')\r\n' if lines[line_index].startswith(' ADSLDMC ('): lines[line_index] = ' ADSLDMC (' + LINKFLAGS + ')\r\n' if lines[line_index].startswith(' ADSCDEFN ('): lines[line_index] = ' ADSCDEFN (' + CPPDEFINES + ')\r\n' line_index += 1 # write project for line in lines: project.write(line) project.close() def ARMCC_Version(): import rtconfig import subprocess import re path = rtconfig.EXEC_PATH path = os.path.join(path, 'armcc.exe') if os.path.exists(path): cmd = path else: print('Error: get armcc version failed. Please update the KEIL MDK installation path in rtconfig.py!') return "0.0" child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = child.communicate() ''' example stdout: Product: MDK Plus 5.24 Component: ARM Compiler 5.06 update 5 (build 528) Tool: armcc [4d3621] return version: MDK Plus 5.24/ARM Compiler 5.06 update 5 (build 528)/armcc [4d3621] ''' version_Product = re.search(r'Product: (.+)', stdout).group(1) version_Product = version_Product[:-1] version_Component = re.search(r'Component: (.*)', stdout).group(1) version_Component = version_Component[:-1] version_Tool = re.search(r'Tool: (.*)', stdout).group(1) version_Tool = version_Tool[:-1] version_str_format = '%s/%s/%s' version_str = version_str_format % (version_Product, version_Component, version_Tool) #print('version_str:' + version_str) return version_str
{ "content_hash": "7e92d9b69d4819a272f02bacb4285b1f", "timestamp": "", "source": "github", "line_count": 412, "max_line_length": 133, "avg_line_length": 32.74514563106796, "alnum_prop": 0.5791268252909347, "repo_name": "FlyLu/rt-thread", "id": "7d4913dcf80359754971b451e0c9aabc1b18cc6a", "size": "14438", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tools/keil.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "10167873" }, { "name": "Batchfile", "bytes": "187896" }, { "name": "C", "bytes": "560912490" }, { "name": "C++", "bytes": "7528549" }, { "name": "CMake", "bytes": "148026" }, { "name": "CSS", "bytes": "9978" }, { "name": "DIGITAL Command Language", "bytes": "13234" }, { "name": "GDB", "bytes": "11796" }, { "name": "HTML", "bytes": "2631222" }, { "name": "Lex", "bytes": "7026" }, { "name": "Logos", "bytes": "7078" }, { "name": "M4", "bytes": "17515" }, { "name": "Makefile", "bytes": "271627" }, { "name": "Module Management System", "bytes": "1548" }, { "name": "Objective-C", "bytes": "4110192" }, { "name": "Pawn", "bytes": "1427" }, { "name": "Perl", "bytes": "9520" }, { "name": "PowerShell", "bytes": "1628" }, { "name": "Python", "bytes": "1479327" }, { "name": "RPC", "bytes": "14162" }, { "name": "Rich Text Format", "bytes": "355402" }, { "name": "Roff", "bytes": "4486" }, { "name": "Ruby", "bytes": "869" }, { "name": "Shell", "bytes": "407900" }, { "name": "TeX", "bytes": "3113" }, { "name": "Yacc", "bytes": "16084" } ], "symlink_target": "" }
from pipeline.param.base_param import BaseParam from pipeline.param import consts class ScaleParam(BaseParam): """ Define the feature scale parameters. Parameters ---------- method : {"standard_scale", "min_max_scale"} like scale in sklearn, now it support "min_max_scale" and "standard_scale", and will support other scale method soon. Default standard_scale, which will do nothing for scale mode : {"normal", "cap"} for mode is "normal", the feat_upper and feat_lower is the normal value like "10" or "3.1" and for "cap", feat_upper and feature_lower will between 0 and 1, which means the percentile of the column. Default "normal" feat_upper : int or float or list of int or float the upper limit in the column. If use list, mode must be "normal", and list length should equal to the number of features to scale. If the scaled value is larger than feat_upper, it will be set to feat_upper feat_lower: int or float or list of int or float the lower limit in the column. If use list, mode must be "normal", and list length should equal to the number of features to scale. If the scaled value is less than feat_lower, it will be set to feat_lower scale_col_indexes: list the idx of column in scale_column_idx will be scaled, while the idx of column is not in, it will not be scaled. scale_names : list of string Specify which columns need to scaled. Each element in the list represent for a column name in header. default: [] with_mean : bool used for "standard_scale". Default True. with_std : bool used for "standard_scale". Default True. The standard scale of column x is calculated as : $z = (x - u) / s$ , where $u$ is the mean of the column and $s$ is the standard deviation of the column. if with_mean is False, $u$ will be 0, and if with_std is False, $s$ will be 1. need_run : bool Indicate if this module needed to be run, default True """ def __init__(self, method="standard_scale", mode="normal", scale_col_indexes=-1, scale_names=None, feat_upper=None, feat_lower=None, with_mean=True, with_std=True, need_run=True): super().__init__() self.scale_names = [] if scale_names is None else scale_names self.method = method self.mode = mode self.feat_upper = feat_upper # LOGGER.debug("self.feat_upper:{}, type:{}".format(self.feat_upper, type(self.feat_upper))) self.feat_lower = feat_lower self.scale_col_indexes = scale_col_indexes self.scale_names = scale_names self.with_mean = with_mean self.with_std = with_std self.need_run = need_run def check(self): if self.method is not None: descr = "scale param's method" self.method = self.check_and_change_lower(self.method, [consts.MINMAXSCALE, consts.STANDARDSCALE], descr) descr = "scale param's mode" self.mode = self.check_and_change_lower(self.mode, [consts.NORMAL, consts.CAP], descr) # LOGGER.debug("self.feat_upper:{}, type:{}".format(self.feat_upper, type(self.feat_upper))) # if type(self.feat_upper).__name__ not in ["float", "int"]: # raise ValueError("scale param's feat_upper {} not supported, should be float or int".format( # self.feat_upper)) if self.scale_col_indexes != -1 and not isinstance(self.scale_col_indexes, list): raise ValueError("scale_col_indexes is should be -1 or a list") if self.scale_names is None: self.scale_names = [] if not isinstance(self.scale_names, list): raise ValueError("scale_names is should be a list of string") else: for e in self.scale_names: if not isinstance(e, str): raise ValueError("scale_names is should be a list of string") self.check_boolean(self.with_mean, "scale_param with_mean") self.check_boolean(self.with_std, "scale_param with_std") self.check_boolean(self.need_run, "scale_param need_run") return True
{ "content_hash": "9937d514bef552dbe15c0b93f2ad1b40", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 162, "avg_line_length": 44.98979591836735, "alnum_prop": 0.6089816284871853, "repo_name": "FederatedAI/FATE", "id": "c4ec58fcd5ef36965dae707df1f206d2acbee51b", "size": "5074", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/fate_client/pipeline/param/scale_param.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Lua", "bytes": "19716" }, { "name": "Python", "bytes": "5121767" }, { "name": "Rust", "bytes": "3971" }, { "name": "Shell", "bytes": "19676" } ], "symlink_target": "" }
"""xgboost init!""" import json from pathlib import Path from typing import cast, TYPE_CHECKING import warnings import wandb from wandb.sdk.lib import telemetry as wb_telemetry import xgboost as xgb # type: ignore from xgboost import Booster MINIMIZE_METRICS = [ "rmse", "rmsle", "mae", "mape", "mphe", "logloss", "error", "error@t", "merror", ] MAXIMIZE_METRICS = ["auc", "aucpr", "ndcg", "map", "ndcg@n", "map@n"] if TYPE_CHECKING: from typing import List, NamedTuple, Callable class CallbackEnv(NamedTuple): evaluation_result_list: List def wandb_callback() -> "Callable": """Old style callback that will be deprecated in favor of WandbCallback. Please try the new logger for more features.""" warnings.warn( "wandb_callback will be deprecated in favor of WandbCallback. Please use WandbCallback for more features.", UserWarning, stacklevel=2, ) with wb_telemetry.context() as tel: tel.feature.xgboost_old_wandb_callback = True def callback(env: "CallbackEnv") -> None: for k, v in env.evaluation_result_list: wandb.log({k: v}, commit=False) wandb.log({}) return callback class WandbCallback(xgb.callback.TrainingCallback): """`WandbCallback` automatically integrates XGBoost with wandb. Arguments: log_model: (boolean) if True save and upload the model to Weights & Biases Artifacts log_feature_importance: (boolean) if True log a feature importance bar plot importance_type: (str) one of {weight, gain, cover, total_gain, total_cover} for tree model. weight for linear model. define_metric: (boolean) if True (default) capture model performance at the best step, instead of the last step, of training in your `wandb.summary`. Passing `WandbCallback` to XGBoost will: - log the booster model configuration to Weights & Biases - log evaluation metrics collected by XGBoost, such as rmse, accuracy etc to Weights & Biases - log training metric collected by XGBoost (if you provide training data to eval_set) - log the best score and the best iteration - save and upload your trained model to to Weights & Biases Artifacts (when `log_model = True`) - log feature importance plot when `log_feature_importance=True` (default). - Capture the best eval metric in `wandb.summary` when `define_metric=True` (default). Example: ```python bst_params = dict( objective ='reg:squarederror', colsample_bytree = 0.3, learning_rate = 0.1, max_depth = 5, alpha = 10, n_estimators = 10, tree_method = 'hist' ) xg_reg = xgb.XGBRegressor(**bst_params) xg_reg.fit(X_train, y_train, eval_set=[(X_test, y_test)], callbacks=[WandbCallback()]) ) ``` """ def __init__( self, log_model: bool = False, log_feature_importance: bool = True, importance_type: str = "gain", define_metric: bool = True, ): self.log_model: bool = log_model self.log_feature_importance: bool = log_feature_importance self.importance_type: str = importance_type self.define_metric: bool = define_metric if wandb.run is None: raise wandb.Error("You must call wandb.init() before WandbCallback()") with wb_telemetry.context() as tel: tel.feature.xgboost_wandb_callback = True def before_training(self, model: Booster) -> Booster: """Run before training is finished.""" # Update W&B config config = model.save_config() wandb.config.update(json.loads(config)) return model def after_training(self, model: Booster) -> Booster: """Run after training is finished.""" # Log the booster model as artifacts if self.log_model: self._log_model_as_artifact(model) # Plot feature importance if self.log_feature_importance: self._log_feature_importance(model) # Log the best score and best iteration if model.attr("best_score") is not None: wandb.log( { "best_score": float(cast(str, model.attr("best_score"))), "best_iteration": int(cast(str, model.attr("best_iteration"))), } ) return model def after_iteration(self, model: Booster, epoch: int, evals_log: dict) -> bool: """Run after each iteration. Return True when training should stop.""" # Log metrics for data, metric in evals_log.items(): for metric_name, log in metric.items(): if self.define_metric: self._define_metric(data, metric_name) wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False) else: wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False) wandb.log({"epoch": epoch}) self.define_metric = False return False def _log_model_as_artifact(self, model: Booster) -> None: model_name = f"{wandb.run.id}_model.json" # type: ignore model_path = Path(wandb.run.dir) / model_name # type: ignore model.save_model(str(model_path)) model_artifact = wandb.Artifact(name=model_name, type="model") model_artifact.add_file(model_path) wandb.log_artifact(model_artifact) def _log_feature_importance(self, model: Booster) -> None: fi = model.get_score(importance_type=self.importance_type) fi_data = [[k, fi[k]] for k in fi] table = wandb.Table(data=fi_data, columns=["Feature", "Importance"]) wandb.log( { "Feature Importance": wandb.plot.bar( table, "Feature", "Importance", title="Feature Importance" ) } ) def _define_metric(self, data: str, metric_name: str) -> None: if "loss" in str.lower(metric_name): wandb.define_metric(f"{data}-{metric_name}", summary="min") elif str.lower(metric_name) in MINIMIZE_METRICS: wandb.define_metric(f"{data}-{metric_name}", summary="min") elif str.lower(metric_name) in MAXIMIZE_METRICS: wandb.define_metric(f"{data}-{metric_name}", summary="max") else: pass
{ "content_hash": "95dd18859a8455d60e8779853960eed7", "timestamp": "", "source": "github", "line_count": 189, "max_line_length": 157, "avg_line_length": 34.37566137566137, "alnum_prop": 0.6024318916422965, "repo_name": "wandb/client", "id": "8baa9c30239c65e00a561ef4c111365f43b091fb", "size": "6497", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wandb/integration/xgboost/xgboost.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "4902" }, { "name": "Dockerfile", "bytes": "3491" }, { "name": "Jupyter Notebook", "bytes": "7751" }, { "name": "Makefile", "bytes": "1863" }, { "name": "Objective-C", "bytes": "80764" }, { "name": "Python", "bytes": "3634228" }, { "name": "Shell", "bytes": "4662" } ], "symlink_target": "" }
"""Routines for character coverage of fonts.""" __author__ = "roozbeh@google.com (Roozbeh Pournader)" import argparse import codecs from os import path import re from fontTools import ttLib from nototools import unicode_data from nototools import lint_config from nototools.py23 import unichr def character_set(font): """Returns the character coverage of a font. Args: font: The input font's file name, or a TTFont. Returns: A frozenset listing the characters supported in the font. """ if isinstance(font, str): font = ttLib.TTFont(font, fontNumber=0) cmap_table = font["cmap"] cmaps = {} for table in cmap_table.tables: if (table.format, table.platformID, table.platEncID) in [ (4, 3, 1), (12, 3, 10), ]: cmaps[table.format] = table.cmap if 12 in cmaps: cmap = cmaps[12] elif 4 in cmaps: cmap = cmaps[4] else: cmap = {} return frozenset(cmap.keys()) def convert_set_to_ranges(charset): """Converts a set of characters to a list of ranges.""" working_set = set(charset) output_list = [] while working_set: start = min(working_set) end = start + 1 while end in working_set: end += 1 output_list.append((start, end - 1)) working_set.difference_update(range(start, end)) return output_list def _print_char_info(chars): for char in chars: try: name = unicode_data.name(char) except ValueError: name = "<Unassigned>" print("U+%04X %s" % (char, name)) def _write_char_text(chars, filepath, chars_per_line, sep): def accept_cp(cp): cat = unicode_data.category(cp) return cat[0] not in ["M", "C", "Z"] or cat == "Co" text = [unichr(cp) for cp in chars if accept_cp(cp)] filename, _ = path.splitext(path.basename(filepath)) m = re.match(r"(.*)-(?:Regular|Bold|Italic|BoldItalic)", filename) if m: filename = m.group(1) filename += "_chars.txt" print("writing file: %s" % filename) print("%d characters (of %d)" % (len(text), len(chars))) if chars_per_line > 0: lines = [] for n in range(0, len(text), chars_per_line): substr = text[n : n + chars_per_line] lines.append(sep.join(cp for cp in substr)) text = "\n".join(lines) with codecs.open(filename, "w", "utf-8") as f: f.write(text) def _process_font(filepath, args): char_set = character_set(filepath) if args.limit_set: char_set = char_set & args.limit_set if not char_set: print("limit excludes all chars in %s" % filepath) return sorted_chars = sorted(char_set) if args.info: _print_char_info(sorted_chars) if args.text: _write_char_text(sorted_chars, filepath, args.chars_per_line, args.sep) if args.ranges: print("ranges:\n " + lint_config.write_int_ranges(sorted_chars, True)) def main(): parser = argparse.ArgumentParser() parser.add_argument("files", help="Files to dump", metavar="file", nargs="+") parser.add_argument("--ranges", help="Dump cmap as hex ranges", action="store_true") parser.add_argument("--text", help="Dump cmap as sample text", action="store_true") parser.add_argument( "--sep", help="Separator between chars in text, default space", default=" " ) parser.add_argument( "--info", help="Dump cmap as cp and unicode name, one per line", action="store_true", ) parser.add_argument( "--chars_per_line", help="Format text in lines of at most this " "many codepoints, 0 to format as a single line", type=int, metavar="N", default=32, ) parser.add_argument( "--limit", help="string of hex codepoint ranges limiting cmap " "to output", metavar="ranges", ) args = parser.parse_args() if not (args.ranges or args.text or args.info): args.info = True if args.limit: args.limit_set = lint_config.parse_int_ranges(args.limit) print("limit to: " + lint_config.write_int_ranges(args.limit_set)) else: # make sure it exists so checks don't have to care args.limit_set = None for fontpath in args.files: print("Font: " + path.normpath(fontpath)) _process_font(fontpath, args) if __name__ == "__main__": main()
{ "content_hash": "71fc6a691365cbc6b99e87a56959d464", "timestamp": "", "source": "github", "line_count": 151, "max_line_length": 88, "avg_line_length": 29.794701986754966, "alnum_prop": 0.5943543009557679, "repo_name": "googlefonts/nototools", "id": "4d6e47dd2da98216addaf432940563dc93a52372", "size": "5119", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "nototools/coverage.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "4871" }, { "name": "CSS", "bytes": "895" }, { "name": "Dockerfile", "bytes": "665" }, { "name": "HTML", "bytes": "86714" }, { "name": "JavaScript", "bytes": "6032" }, { "name": "Makefile", "bytes": "3745" }, { "name": "Python", "bytes": "1277966" }, { "name": "Shell", "bytes": "15353" } ], "symlink_target": "" }
class KeywordValueParse: def parseline(self,line): import pyre.units uparser = pyre.units.parser() # print "Hello from KeywordValueParse.parseline!" self.keyvals[3] = False comment = line.find('#') if comment == 0: return self.keyvals stest = line[:comment].split('=') if len(stest) != 2: return self.keyvals key = stest[0].strip() rawvalue = stest[1].strip() try: uvalue = uparser.parse(rawvalue) value =uvalue.value except (NameError, AttributeError): try: value = eval(rawvalue) except (NameError): value = rawvalue except: return self.keyvals uvalue = value except: return self.keyvals self.keyvals[0] = key self.keyvals[1] = value self.keyvals[2] = uvalue self.keyvals[3] = True return self.keyvals def __init__(self): print "" print "Hello from KeywordValueParse.__init__!" self.keyvals = [None, None, None,None] return # version # $Id: KeywordValueParse.py,v 1.4 2005/01/06 01:45:13 willic3 Exp $ # End of file
{ "content_hash": "63f7ae8b0b694ba9516353337bd3da14", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 67, "avg_line_length": 24.1, "alnum_prop": 0.5560165975103735, "repo_name": "geodynamics/lithomop", "id": "f6cbb6189ed68c57d510243fd4a6bd5f29b018e9", "size": "2599", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lithomop3d/lithomop3d/KeywordValueParse.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "8186" }, { "name": "C++", "bytes": "262543" }, { "name": "FORTRAN", "bytes": "1551771" }, { "name": "Objective-C", "bytes": "48611" }, { "name": "Python", "bytes": "323578" }, { "name": "Shell", "bytes": "2302" } ], "symlink_target": "" }
from MafiaBot.MafiaRole import MafiaRole from MafiaBot.MafiaAction import MafiaAction class Inventor(MafiaRole): def __init__(self, settings=dict()): super(Inventor, self).__init__(settings) self.items = {'gun': 1, 'vest': 1, 'check': 1, 'syringe': 1} def GetRolePM(self): ret = 'You are an Inventor. You may give another player a item of your choice during the night. You only have one copy of each item. Possible items are: Background Check, Bulletproof Vest, Gun, Syringe' if self.limiteduses > -1: ret += ' You may only use this ability '+str(self.limiteduses)+' times.' return ret @staticmethod def GetRoleName(): return 'Inventor' @staticmethod def GetRoleDescription(): return 'Inventors hand out items to other players at night. They only have one copy of each item. The possible items are: Background Check, Bulletproof Vest, Gun, Syringe' def HandleCommand(self, command, param, mb, player): if self.requiredaction: if command == 'send': if not self.limiteduses == 0: splits = param.split(' ', 1) if len(splits) == 2: target = mb.GetPlayer(splits[0]) if target is not None: if not target.IsDead(): if target is player: return 'You cannot give an item to yourself!' else: itemstr = splits[1].lower() if not (itemstr == 'gun' or itemstr == 'vest' or itemstr == 'check' or itemstr == 'syringe'): return 'I do not know the item '+splits[1] if self.items[itemstr] == 0: return 'You have already handed out your copy of '+splits[1] mb.actionlist.append(MafiaAction(MafiaAction.SENDITEM, player, target, True, {'item': itemstr})) self.items[itemstr] = 0 self.requiredaction = False player.UpdateActions() ret = 'You send a ' + splits[1] + ' to '+str(target)+' tonight.' self.limiteduses -= 1 if self.limiteduses > -1: ret += ' You have '+str(self.limiteduses)+' uses remaining.' return ret return 'Cannot find player '+splits[0] return 'The command syntax is wrong. Use !send <target> <check/gun/syringe/vest>.' return None def BeginNightPhase(self, mb, player): if not self.limiteduses == 0: self.requiredaction = True ret = 'Inventor: You may send another player an item tonight. Use !send <player> <check/gun/syringe/vest> to give that item to that player.' if self.limiteduses > -1: ret += ' You have '+str(self.limiteduses)+' uses remaining.' ret += ' Your remaining items are: ' items = [] if self.items['check'] == 1: items.append('Background Check') if self.items['vest'] == 1: items.append('Bulletproof Vest') if self.items['gun'] == 1: items.append('Gun') if self.items['syringe'] == 1: items.append('Syringe') ret += ', '.join(items) return ret.rstrip() else: return ''
{ "content_hash": "e43d0bbe7ff18f173193f9f99dbf97c0", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 210, "avg_line_length": 50.04, "alnum_prop": 0.49480415667466027, "repo_name": "LLCoolDave/MafiaBot", "id": "9c890790581b8816faad42585d532edea2a2c052", "size": "3753", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "MafiaBot/Roles/Inventor.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "158720" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('campaign', '0013_merge'), ] operations = [ migrations.AlterField( model_name='campaign', name='created', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0, max_digits=6), ), ]
{ "content_hash": "2304e95e5ca47816248bfc494c71c323", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 81, "avg_line_length": 25.458333333333332, "alnum_prop": 0.602291325695581, "repo_name": "toast38coza/FlashGiving", "id": "a72bfdde335d4d784318b623090afeb67a767aff", "size": "683", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "campaign/migrations/0014_auto_20160717_2119.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1956" }, { "name": "HTML", "bytes": "16964" }, { "name": "JavaScript", "bytes": "273886" }, { "name": "Python", "bytes": "34975" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('digest', '0030_item_additionally'), ] operations = [ migrations.AlterModelOptions( name='parsingrules', options={'ordering': ['-weight'], 'verbose_name': 'Правило обработки', 'verbose_name_plural': 'Правила обработки'}, ), ]
{ "content_hash": "c45594193f28e35f6d153ff055a87602", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 65, "avg_line_length": 25.88888888888889, "alnum_prop": 0.5665236051502146, "repo_name": "pythondigest/pythondigest", "id": "8181a92df4178066550075d2140b518fcf5ef97c", "size": "522", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "digest/migrations/0031_auto_20150903_0550.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "91851" }, { "name": "HTML", "bytes": "81009" }, { "name": "JavaScript", "bytes": "4941" }, { "name": "Makefile", "bytes": "240" }, { "name": "Python", "bytes": "326333" } ], "symlink_target": "" }
import heapq import time import config import logbot from gridspace import GridSpace from axisbox import AABB log = logbot.getlogger("ASTAR") class BaseEx(Exception): def __init__(self, value=None): self.value = value def __repr__(self): if self.value is None: return self.__class__.__name__ else: return "%s %s" % (self.__class__.__name__, self.value) @property def message(self): return str(self) class PathNotFound(BaseEx): pass class PathFound(BaseEx): pass class PathOverLimit(BaseEx): pass class PathNode(object): def __init__(self, coords=None, cost=1): self.coords = coords self.cost = cost self.g = 0 self.h = 0 self.f = 0 self.step = 0 self._parent = None self.hash = hash(self.coords) def __repr__(self): #return "%s:st%s:cost:%s:g%s:h%s:f%s" % (str(self.coords), self.step, self.cost, self.g, self.h, self.f) return str(self.coords) def __lt__(self, other): return self.f < other.f def __eq__(self, other): return self.coords == other.coords def __hash__(self): return self.hash @property def parent(self): return self._parent @parent.setter def parent(self, p): self._parent = p self.step = p.step + 1 def set_score(self, g, h): self.g = g self.h = h self.f = g + h class AStarCoords(object): def __init__(self, dimension, start_coords, goal_coords): self.goal_coords = goal_coords self.goal_node = PathNode(goal_coords) self.start_node = PathNode(start_coords) self.astar = AStarAlgo(graph=GridSpace(dimension.grid), start_node=self.start_node, goal_node=self.goal_node, is_goal=self.is_goal, heuristics=self.heuristics) self.t_start = time.time() self.path = None def heuristics(self, start, goal): adx = abs(start.coords.x - goal.coords.x) adz = abs(start.coords.z - goal.coords.z) h_diagonal = min(adx, adz) h_straight = adx + adz h = config.COST_DIAGONAL * h_diagonal + config.COST_DIRECT * (h_straight - 2 * h_diagonal) return h def is_goal(self, current): return current == self.goal_node def time_sice_start(self): return time.time() - self.t_start def next(self): count = 0 try: while count < 1000: self.astar.next() except PathNotFound: log.err("did not find path between %s and %s" % (self.start_node.coords, self.goal_node.coords)) log.msg('time consumed %s sec, made %d iterations' % (self.time_sice_start(), self.astar.iter_count)) raise StopIteration() except PathFound: log.msg('found path %d steps long' % len(self.astar.path)) log.msg('time consumed %s sec, made %d iterations' % (self.time_sice_start(), self.astar.iter_count)) self.path = self.astar.path raise StopIteration() except PathOverLimit: log.err("finding path over limit between %s and %s" % (self.start_node.coords, self.goal_node.coords)) log.msg('time consumed %s sec, made %d iterations' % (self.time_sice_start(), self.astar.iter_count)) raise StopIteration() except: raise class AStarMultiCoords(AStarCoords): def __init__(self, multiple_goals=None, **kwargs): self.multiple_goals = [PathNode(g) for g in multiple_goals] super(AStarMultiCoords, self).__init__(**kwargs) def is_goal(self, current): for g in self.multiple_goals: if current == g: return True else: return False class AStarBBCol(AStarCoords): def __init__(self, bb=None, **kwargs): self.bb = bb super(AStarBBCol, self).__init__(goal_coords=bb.bottom_center, **kwargs) def is_goal(self, current): x = current.coords.x y = current.coords.y z = current.coords.z return self.bb.collides(AABB(x, y, z, x + 1, y + config.PLAYER_HEIGHT, z + 1)) class AStarAlgo(object): def __init__(self, graph=None, start_node=None, goal_node=None, heuristics=None, is_goal=None, max_cost=None): self.graph = graph self.start_node = start_node self.goal_node = goal_node self.heuristics = heuristics self.is_goal = is_goal if max_cost is None: vdist = start_node.coords - goal_node.coords self.max_cost = int(max(32, min(vdist.manhatan_size * 2, config.PATHFIND_LIMIT))) else: self.max_cost = int(max_cost) log.msg("limit for astar is %s" % self.max_cost) self.path = None self.closed_set = set() self.open_heap = [self.start_node] self.open_set = set([self.start_node]) self.start_node.set_score(0, self.heuristics(self.start_node, self.goal_node)) self.iter_count = 0 def reconstruct_path(self, current): nodes = [] nodes.append(current) while current.parent is not None: nodes.append(current.parent) current = current.parent return nodes def get_edge_cost(self, node_from, node_to): return config.COST_DIRECT def neighbours(self, node): for state in self.graph.neighbours_of(node.coords): if state.coords not in self.closed_set: yield PathNode(state.coords) def next(self): self.iter_count += 1 if not self.open_set: raise PathNotFound() x = heapq.heappop(self.open_heap) if self.is_goal(x): self.path = self.reconstruct_path(x) self.graph = None raise PathFound() self.open_set.remove(x) self.closed_set.add(x.coords) for y in self.neighbours(x): tentative_g_core = x.g + self.get_edge_cost(x, y) if y not in self.open_set: y.set_score(tentative_g_core, self.heuristics(y, self.goal_node)) y.parent = x heapq.heappush(self.open_heap, y) self.open_set.add(y) if y.step > self.max_cost: raise PathOverLimit() elif tentative_g_core < y.g: y.set_score(tentative_g_core, self.heuristics(y, self.goal_node)) y.parent = x if y.step > self.max_cost: raise PathOverLimit()
{ "content_hash": "703dcc8b15ea42a4fc37b7ad91840606", "timestamp": "", "source": "github", "line_count": 210, "max_line_length": 167, "avg_line_length": 31.32857142857143, "alnum_prop": 0.573187414500684, "repo_name": "lukleh/TwistedBot", "id": "97d75221b8da3a78e8b09d2e063d952d72152695", "size": "6580", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "twistedbot/pathfinding.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1944295" } ], "symlink_target": "" }
from django.http import HttpResponse from django.template import RequestContext from django.shortcuts import render_to_response, get_object_or_404, Http404, redirect from django.views.decorators.csrf import csrf_exempt def home(request): return render_to_response('home.html', {}, context_instance=RequestContext(request))
{ "content_hash": "877f50e966d260cc207a70b7de24e524", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 88, "avg_line_length": 41, "alnum_prop": 0.8079268292682927, "repo_name": "antofik/Wartech", "id": "fca78c2181b993bd711411b6d28f8f32234a2763", "size": "343", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "WartechWeb/main/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2920" }, { "name": "CoffeeScript", "bytes": "2141" }, { "name": "JavaScript", "bytes": "217988" }, { "name": "PHP", "bytes": "144" }, { "name": "Python", "bytes": "58357" }, { "name": "Shell", "bytes": "178" } ], "symlink_target": "" }
""" [2017-11-08] Challenge #339 [Intermediate] A car renting problem https://www.reddit.com/r/dailyprogrammer/comments/7btzrw/20171108_challenge_339_intermediate_a_car_renting/ # Description A carriage company is renting cars and there is a particular car for which the interest is the highest so the company decides to book the requests one year in advance. We represent a request with a tuple (x, y) where x is the first day of the renting and y is the last. **Your goal** is to come up with an optimum strategy where you serve the most number of requests. # Input Description The first line of the input will be *n* the number of requests. The following two lines will consist of n numbers for the starting day of the renting, followed by another n numbers for the last day of the renting corresponding. For all lines 0 < ^x i < ^y i <= 365 inequality holds, where i=1, 2, ..., n. 10 1 12 5 12 13 40 30 22 70 19 23 10 10 29 25 66 35 33 100 65 # Output Description The output should be the maximum number of the feasable requests and the list of these requests. One possible result may look like this: 4 (1,23) (30,35) (40,66) (70,100) But we can do better: 5 (5,10) (13,25) (30,35) (40,66) (70,100) Remember your goal is to find the scenario where you serve the most number of costumers. # Credit This challenge was suggested by user /u/bessaai, many thanks. If you have a challenge idea, please share it in /r/dailyprogrammer_ideas and there's a good chance we'll use it. """ def main(): pass if __name__ == "__main__": main()
{ "content_hash": "f88c6784fa1e3ccfd483162d962f4d0f", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 119, "avg_line_length": 41.36842105263158, "alnum_prop": 0.7251908396946565, "repo_name": "DayGitH/Python-Challenges", "id": "7929ec72c9a5733923dadba411acf0cddb735310", "size": "1572", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "DailyProgrammer/DP20171108B.py", "mode": "33188", "license": "mit", "language": [ { "name": "OpenEdge ABL", "bytes": "5002" }, { "name": "Python", "bytes": "2471582" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('exercise', '0010_exercise_steps'), ] operations = [ migrations.AlterModelOptions( name='exerciserecord', options={'ordering': ('start', 'end')}, ), migrations.AddField( model_name='exercise', name='enabled', field=models.BooleanField(default=True), ), migrations.AddField( model_name='exerciserecord', name='enabled', field=models.BooleanField(default=True), ), migrations.AddField( model_name='plan', name='enabled', field=models.BooleanField(default=True), ), migrations.AddField( model_name='planexercise', name='enabled', field=models.BooleanField(default=True), ), ]
{ "content_hash": "b7c7aaea0ea0d9acfb776d7ed2ffd03e", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 52, "avg_line_length": 26.7027027027027, "alnum_prop": 0.5455465587044535, "repo_name": "DADco/convalesense-web", "id": "7bc510446fbbc81b7c00fd16e52cfd5f7606366c", "size": "1061", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "convalesense/exercise/migrations/0011_auto_20170127_0925.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "5264" }, { "name": "HTML", "bytes": "30851" }, { "name": "JavaScript", "bytes": "3917" }, { "name": "Python", "bytes": "80988" }, { "name": "Shell", "bytes": "4200" } ], "symlink_target": "" }
""" ANACONDA DISTRIBUTION IDE : SPYDER Created on Sun Jun 4 18:44:29 2017 @author: LALIT ARORA """ from PyQt5 import QtCore, QtGui, QtWidgets from pycricbuzz import Cricbuzz class Ui_PyCricket(object): def setupUi(self, PyCricket): c = Cricbuzz() matches = c.matches() livematches=[] affected=[] for match in matches: if(match['mchstate']=='inprogress'): livematches.append(match['mchdesc']) elif ('rain' in match['mchstate']): affected.append(match['mchdesc']) if len(affected)==0: ui.totalaffected=ui.totalaffected+"No Match Affected.." else: for i in range(len(affected)): ui.totalaffected=ui.totalaffected+affected[i]+" " PyCricket.setObjectName("PyCricket") PyCricket.resize(544,570) PyCricket.setMaximumSize(QtCore.QSize(544, 16777215)) self.raina = QtWidgets.QLabel(PyCricket) self.raina.setGeometry(QtCore.QRect(20,550, 501, 16)) font = QtGui.QFont() font.setPointSize(9) self.raina.setFont(font) self.raina.setObjectName("rainaffected") self.raina.setText(ui.totalaffected) self.label = QtWidgets.QLabel(PyCricket) self.label.setGeometry(QtCore.QRect(80, 30, 121, 20)) font = QtGui.QFont() font.setPointSize(12) self.label.setFont(font) self.label.setObjectName("label") self.livematches = QtWidgets.QComboBox(PyCricket) self.livematches.setGeometry(QtCore.QRect(230, 30, 231, 22)) self.livematches.setObjectName("livematches") self.livematches.addItems(livematches) self.pushButton = QtWidgets.QPushButton(PyCricket) self.pushButton.setGeometry(QtCore.QRect(210, 80,130, 23)) self.pushButton.setObjectName("pushButton") self.pushButton.clicked.connect(self.score) self.label_2 = QtWidgets.QLabel(PyCricket) self.label_2.setGeometry(QtCore.QRect(20,530, 151, 16)) font = QtGui.QFont() font.setPointSize(10) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.label_3 = QtWidgets.QLabel(PyCricket) self.label_3.setGeometry(QtCore.QRect(80, 130, 401, 301)) font = QtGui.QFont() font.setPointSize(8) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.label_3.hide()==True self.retranslateUi(PyCricket) QtCore.QMetaObject.connectSlotsByName(PyCricket) def retranslateUi(self, PyCricket): _translate = QtCore.QCoreApplication.translate PyCricket.setWindowTitle(_translate("PyCricket", "PYCRICKET v1.0")) self.label.setText(_translate("PyCricket", "LIVE MATCHES")) self.pushButton.setText(_translate("PyCricket", "GET SCORE AND UPDATE")) self.label_2.setText(_translate("PyCricket", "Matches Affected By Rain")) self.raina.setText(_translate("PyCricket", " ")) self.label_3.setText(_translate("PyCricket",ui.totalaffected)) def score(self): self.raina.setText(ui.totalaffected) c=Cricbuzz() matches=c.matches() finalscorecard="" ui.selectedmatch=self.livematches.currentText() if ui.selectedmatch!="": for match in matches: if ui.selectedmatch in match['mchdesc']: identity=match['id'] finalscorecard=finalscorecard+str(match['srs']) finalscorecard=finalscorecard+"\n"+str(match['mchdesc']) finalscorecard=finalscorecard+"\n"+str(match['type'])+str(match['mnum']) finalscorecard=finalscorecard+"\n"+str(match['status']) scorecard=c.scorecard(identity) seperator="-------------------------------------------------------------------------------" finalscorecard=finalscorecard+"\n"+seperator finalscorecard=finalscorecard+"\n"+"BATTING TEAM :"+str(scorecard['scorecard'][0]['batteam']) for i in range(len(scorecard['scorecard'][0]['batcard'])): temp="" temp=str(scorecard['scorecard'][0]['batcard'][i]['name'])+" "+str(scorecard['scorecard'][0]['batcard'][i]['runs'])+"of"+str(scorecard['scorecard'][0]['batcard'][i]['balls'])+", Fours :"+str(scorecard['scorecard'][0]['batcard'][i]['fours'])+", Sixes :"+str(scorecard['scorecard'][0]['batcard'][i]['six'])+", Dismissal :"+str(scorecard['scorecard'][0]['batcard'][i]['dismissal']) finalscorecard=finalscorecard+"\n"+temp finalscorecard=finalscorecard+"\n"+"Score :"+str(scorecard['scorecard'][0]['runs'])+"/"+str(scorecard['scorecard'][0]['wickets']) finalscorecard=finalscorecard+"\n"+"Runrate :"+str(scorecard['scorecard'][0]['runrate']) finalscorecard=finalscorecard+"\n"+seperator finalscorecard=finalscorecard+"\n"+"BOWLING TEAM :"+str(scorecard['scorecard'][0]['bowlteam']) for i in range(len(scorecard['scorecard'][0]['bowlcard'])): temp="" temp=str(scorecard['scorecard'][0]['bowlcard'][i]['name'])+"Overs :"+str(scorecard['scorecard'][0]['bowlcard'][i]['overs'])+", Runs :"+str(scorecard['scorecard'][0]['bowlcard'][i]['runs'])+", Wickets :"+str(scorecard['scorecard'][0]['bowlcard'][i]['wickets']) finalscorecard=finalscorecard+"\n"+temp self.label_3.setText(finalscorecard) self.label_3.show()==True else: self.label_3.setText("Select the Live Match ") self.label_3.show()==True if __name__ == '__main__': import sys app = QtWidgets.QApplication(sys.argv) Dialog=QtWidgets.QDialog() ui=Ui_PyCricket() ui.selectedmatch="" ui.totalaffected="" p = QtGui.QPalette() gradient = QtGui.QLinearGradient(0, 0, 0, 400) gradient.setColorAt(0.0, QtGui.QColor(240, 240, 240)) gradient.setColorAt(1.0, QtGui.QColor(240, 160, 160)) p.setBrush(QtGui.QPalette.Window,QtGui.QBrush(gradient)) Dialog.setPalette(p) ui.setupUi(Dialog) Dialog.show() sys.exit(app.exec_())
{ "content_hash": "586f45349c71080ff460a8af7ea6afcc", "timestamp": "", "source": "github", "line_count": 137, "max_line_length": 402, "avg_line_length": 47.91970802919708, "alnum_prop": 0.5771515613099771, "repo_name": "MCodez/PyQt", "id": "23ca09f0ca43a090b3d13f42c259aacc715efe40", "size": "6590", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "PyCricket v1.0/ui_pycricket.py", "mode": "33188", "license": "mit", "language": [ { "name": "Arduino", "bytes": "440" }, { "name": "Python", "bytes": "38772" } ], "symlink_target": "" }
from __future__ import print_function import pyart from rdkit.sping.pid import * from rdkit.sping.PDF import pdfmetrics import Fontmapping # helps by mapping pid font classes to Pyart font names # note for now I'm just going to do the standard PDF fonts & forget the rest class PyartCanvas(Canvas): "note the default face is 'times' and is set in Fontmapping.py" def __init__(self,size=(300,300),name='PyartCanvas.png'): self._pycan = pyart.Canvas(size[0], size[1], dpi=72) self.filename = name Canvas.__init__(self, size, name) # self.defaultFillColor = transparent # now we need to setup our tracking of the defaults vs the current state # see if the __setattr__ approach is any better than the _updateXX strategy def __setattr__(self, name, value): if name == 'defaultLineColor': if value: # print('setting defaultLineColor to %s, 0x%x' % (value, value.toHexRGB())) if value != transparent: self._pycan.gstate.stroke = value.toHexRGB() self.__dict__[name] = value elif name == 'defaultFillColor': if value: if value != transparent: self._pycan.gstate.fill = value.toHexRGB() self.__dict__[name] = value elif name == 'defaultLineWidth' : if value: self._pycan.gstate.stroke_width = value self.__dict__[name] = value elif name == 'defaultFont': if value: self.__dict__[name] = value self._setPyartFont(value) else: # received None so set to default font face & size=12 self.__dict__[name] = Font(face='times') self._setPyartFont(self.__dict__[name]) else: self.__dict__[name] = value ## Private methods ## def _protectArtState(self, bool): if bool: self._pycan.gsave() return bool def _restoreArtState(self, bool): if bool: self._pycan.grestore() def _setPyartFont(self, fontInstance): # accounts for "None" option # does not act on self.defaultFont at all fontsize = fontInstance.size self._pycan.gstate.font_size = fontsize # map pid name for font to Pyart name pyartname = Fontmapping.getPyartName(fontInstance) self._pycan.gstate.setfont(pyartname) # # # # # ### public PID Canvas methods ## def clear(self): pass def flush(self): pass def save(self, file=None, format=None): # fileobj = getFileObject(file) if not file: file = self.filename if isinstance(file, StringType): self._pycan.save(file) else: raise NotImplementedError def _findExternalFontName(self, font): #copied from piddlePDF by cwl- hack away! """Attempts to return proper font name. PDF uses a standard 14 fonts referred to by name. Default to self.defaultFont('Helvetica'). The dictionary allows a layer of indirection to support a standard set of PIDDLE font names.""" piddle_font_map = { 'Times':'Times', 'times':'Times', 'Courier':'Courier', 'courier':'Courier', 'helvetica':'Helvetica', 'Helvetica':'Helvetica', 'symbol':'Symbol', 'Symbol':'Symbol', 'monospaced':'Courier', 'serif':'Times', 'sansserif':'Helvetica', 'ZapfDingbats':'ZapfDingbats', 'zapfdingbats':'ZapfDingbats', 'arial':'Helvetica' } try: face = piddle_font_map[string.lower(font.face)] except Exception: return 'Helvetica' name = face + '-' if font.bold and face in ['Courier','Helvetica','Times']: name = name + 'Bold' if font.italic and face in ['Courier', 'Helvetica']: name = name + 'Oblique' elif font.italic and face == 'Times': name = name + 'Italic' if name == 'Times-': name = name + 'Roman' # symbol and ZapfDingbats cannot be modified! #trim and return if name[-1] == '-': name = name[0:-1] return name def stringWidth(self, s, font=None): if not font: font = self.defaultFont fontname = Fontmapping.getPdfName(font) return pdfmetrics.stringwidth(s, fontname) * font.size * 0.001 def fontAscent(self, font=None): if not font: font = self.defaultFont fontname = Fontmapping.getPdfName(font) return pdfmetrics.ascent_descent[fontname][0] * 0.001 * font.size def fontDescent(self, font=None): if not font: font = self.defaultFont fontname = Fontmapping.getPdfName(font) return -pdfmetrics.ascent_descent[fontname][1] * 0.001 * font.size def drawLine(self, x1, y1, x2, y2, color=None, width=None): ## standard code ## color = color or self.defaultLineColor width = width or self.defaultLineWidth if color != transparent: changed = self._protectArtState( (color != self.defaultLineColor) or (width != self.defaultLineWidth) ) if color != self.defaultLineColor: self._pycan.gstate.stroke = color.toHexRGB() # print("color is %s <-> %s" % (color, color.toHexStr())) if width != self.defaultLineWidth: self._pycan.gstate.stroke_width = width ################### # actual drawing p = pyart.VectorPath(3) p.moveto_open(x1,y1) p.lineto(x2,y2) self._pycan.stroke(p) ## standard code ## if changed: self._pycan.grestore() ################### # def drawLines(self, lineList, color=None, width=None): # pass def drawString(self, s, x, y, font=None, color=None, angle=0): # start w/ the basics self._pycan.drawString(x,y, s) def drawPolygon(self, pointlist, edgeColor=None, edgeWidth=None, fillColor=None, closed=0): eColor = edgeColor or self.defaultLineColor fColor = fillColor or self.defaultFillColor eWidth = edgeWidth or self.defaultLineWidth changed = self._protectArtState( (eColor != self.defaultLineColor) or (eWidth != self.defaultLineWidth) or (fColor != self.defaultFillColor) ) if eColor != self.defaultLineColor: self._pycan.gstate.stroke = eColor.toHexRGB() if fColor != self.defaultFillColor: self._pycan.gstate.fill = fColor.toHexRGB() if eWidth != self.defaultLineWidth: self._pycan.gstate.stroke_width = eWidth path = pyart.VectorPath(len(pointlist)+1) if closed: path.moveto_closed(pointlist[0][0], pointlist[0][1]) else: path.moveto_open(pointlist[0][0], pointlist[0][1]) for pt in pointlist[1:]: path.lineto(pt[0],pt[1]) if closed: path.close() if fColor != transparent and closed: self._pycan.fill(path) if eColor != transparent: self._pycan.stroke(path) self._restoreArtState(changed) #def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, # edgeColor=None, edgeWidth=None, fillColor=None, closed=0): # pass # def drawRoundRect(self, x1,y1, x2,y2, rx=8, ry=8, # edgeColor=None, edgeWidth=None, fillColor=None): # pass # def drawEllipse(self, x1,y1, x2,y2, edgeColor=None, edgeWidth=None, # fillColor=None): # pass # def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, edgeColor=None, # edgeWidth=None, fillColor=None): # pass # def drawFigure(self, partList, # edgeColor=None, edgeWidth=None, fillColor=None, closed=0): # pass # def drawImage(self, image, x1, y1, x2=None,y2=None): # pass ## basic tests ## if __name__=='__main__': import rdkit.sping.tests.pidtest can = PyartCanvas(size=(300,300), name='basictest.png') #can.defaultLineColor = Color(0.7, 0.7, 1.0) #can.drawLine(10,10, 290,290) #can.drawLine(10,10, 50, 10, color=green, width = 4.5) rdkit.sping.tests.pidtest.drawBasics(can) can.save(file='basicTest.png') print('saving basicTest.png') can = PyartCanvas(size=(400,400), name='test-strings.png') rdkit.sping.tests.pidtest.drawStrings(can) can.save()
{ "content_hash": "95ad97efedaf5c9db40d1540198244ce", "timestamp": "", "source": "github", "line_count": 294, "max_line_length": 91, "avg_line_length": 30.95578231292517, "alnum_prop": 0.5447752994176465, "repo_name": "adalke/rdkit", "id": "61f9ec466bc37d2d1f24cef054df0de147b3058d", "size": "9324", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rdkit/sping/Pyart/pidPyart.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "385" }, { "name": "C", "bytes": "226290" }, { "name": "C#", "bytes": "6745" }, { "name": "C++", "bytes": "7847294" }, { "name": "CMake", "bytes": "611343" }, { "name": "CSS", "bytes": "3231" }, { "name": "FORTRAN", "bytes": "7661" }, { "name": "HTML", "bytes": "63047" }, { "name": "Java", "bytes": "291444" }, { "name": "JavaScript", "bytes": "11595" }, { "name": "LLVM", "bytes": "29594" }, { "name": "Lex", "bytes": "4508" }, { "name": "Makefile", "bytes": "15435" }, { "name": "Objective-C", "bytes": "298" }, { "name": "Python", "bytes": "3138951" }, { "name": "QMake", "bytes": "389" }, { "name": "SMT", "bytes": "3010" }, { "name": "Shell", "bytes": "12651" }, { "name": "Smarty", "bytes": "5864" }, { "name": "Yacc", "bytes": "49429" } ], "symlink_target": "" }
import sys from googletrans import Translator import clipboard import keyboard import time """ Requerimentos: pip install googletrans pip install clipboard pip install keyboard """ """Abre um arquivo""" def abrirArquivo(): try: caminho = sys.argv[1] except: print("Passe o arquivo como arguemento na chamada do programa!" ) exit() try: f = open(caminho, 'r', encoding='utf-8') except: print("Arquivo não encontrado!!") exit() string = f.read() f.close return string """Trata a string tirando \n indesejados""" def tratarString(string): string = string.replace("-\n", "") string = string.replace("\n", " ") string = string.replace("\r\n", " ") string = string.replace("\r", "") return string """Grava o que foi passado como parâmetro no arquivo especificado como agumento na chamada do programa""" def gravaArquivo(string): caminho = sys.argv[1] f = open(caminho, 'w', encoding='utf-8') f.write(string) f.close """Traduz o texto requerido""" def traduzir(string): translator = Translator(service_urls=['translate.google.com.br',]) string = translator.translate(string, dest='pt').text return string """Copia o conteúdo da área de transferência""" def areaTransferencia(): string = clipboard.paste() return string """Cola o conteúdo traduzido na área de trasnferência""" def colarAreaTransf(string): clipboard.copy(string) """Fica esperando a area de trasnferencia mudar""" def listener(string): while clipboard.paste() == string: time.sleep(1) if __name__ == "__main__": while True: """Descomente caso queira usar a tecla de atalho""" #keyboard.wait('ctrl+x') string = areaTransferencia() """Descomente caso queria ler de um arquivo""" #string = abrirArquivo() string = tratarString(string) string = traduzir(string) colarAreaTransf(string) """" Descomente para gravar num arquivo""" #gravaArquivo(string) print("Pronto!") """Comente caso queira usar telcas de atalho""" listener(string)
{ "content_hash": "b4685273b3ceff82411c57dd645ec250", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 105, "avg_line_length": 25.13953488372093, "alnum_prop": 0.6401480111008325, "repo_name": "HoussemCharf/FunUtils", "id": "9c4b0f7277e34fc18ef8381c9f2d310327f9d51d", "size": "2170", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Clipboard/tradutor.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "431" }, { "name": "HTML", "bytes": "1777" }, { "name": "JavaScript", "bytes": "475" }, { "name": "Python", "bytes": "150133" }, { "name": "Shell", "bytes": "341" } ], "symlink_target": "" }
import logging import os try: from alerta.plugins import app # alerta >= 5.0 except ImportError: from alerta.app import app # alerta < 5.0 from alerta.plugins import PluginBase LOG = logging.getLogger('alerta.plugins.timeout') TIMEOUT = os.environ.get('ALERT_TIMEOUT') or app.config.get('ALERT_TIMEOUT', '2600') class Timeout(PluginBase): def pre_receive(self, alert): LOG.debug("Setting timeout for alert to %s ",TIMEOUT) alert.timeout = TIMEOUT return alert def post_receive(self, alert): return def status_change(self, alert, status, text): return
{ "content_hash": "aa4cbf944707346659a5b89695f8996a", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 84, "avg_line_length": 23, "alnum_prop": 0.677938808373591, "repo_name": "alerta/alerta-contrib", "id": "3218eacf6e3b6b8bb1905a938ab0834ff393e44d", "size": "622", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plugins/timeout/alerta_timeout.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jinja", "bytes": "2178" }, { "name": "PHP", "bytes": "7739" }, { "name": "Python", "bytes": "300134" }, { "name": "Shell", "bytes": "1278" } ], "symlink_target": "" }
from typing import Tuple, Set, Union, MutableMapping, Any, Mapping, Iterable, Generator, List from datapipelines import Query, PipelineContext, QueryValidationError from ..data import Region, Platform, Queue, Tier, Division, Position from ..dto.champion import ChampionRotationDto from ..dto.championmastery import ChampionMasteryDto, ChampionMasteryListDto, ChampionMasteryScoreDto from ..dto.league import LeagueEntriesDto, LeagueSummonerEntriesDto, LeagueEntryDto from ..dto.staticdata import ChampionDto, ChampionListDto, ItemDto, ItemListDto, LanguageStringsDto, LanguagesDto, ProfileIconDataDto, ProfileIconDetailsDto, RealmDto, RuneDto, RuneListDto, SummonerSpellDto, SummonerSpellListDto, MapDto, MapListDto, VersionListDto from ..dto.status import ShardStatusDto from ..dto.match import MatchDto, MatchReferenceDto, TimelineDto from ..dto.spectator import CurrentGameInfoDto, FeaturedGamesDto from ..dto.summoner import SummonerDto from ..core.championmastery import ChampionMastery, ChampionMasteries from ..core.league import LeagueSummonerEntries, ChallengerLeague, GrandmasterLeague, MasterLeague, League, LeagueEntry from ..core.staticdata import Champion, Rune, Item, SummonerSpell, Map, Locales, LanguageStrings, ProfileIcon, ProfileIcons, Realms, Versions, Items, Champions, Maps, SummonerSpells, Runes from ..core.status import ShardStatus from ..core.match import Match, MatchHistory, Timeline from ..core.summoner import Summoner from ..core.spectator import CurrentMatch, FeaturedMatches, CurrentGameParticipantData from ..core.champion import ChampionRotation, ChampionRotationData from ..core.staticdata.champion import ChampionData from ..core.staticdata.item import ItemData from ..core.staticdata.summonerspell import SummonerSpellData from ..core.staticdata.rune import RuneData from ..core.staticdata.map import MapData from ..core.summoner import SummonerData ############# # Utilities # ############# def _rgetattr(obj, key): """Recursive getattr for handling dots in keys.""" for k in key.split("."): obj = getattr(obj, k) return obj def _hash_included_data(included_data: Set[str]) -> int: return hash(tuple(included_data)) def _get_default_version(query: Mapping[str, Any], context: PipelineContext) -> str: try: pipeline = context[PipelineContext.Keys.PIPELINE] versions = pipeline.get(VersionListDto, {"platform": query["platform"]}) return versions["versions"][0] except TypeError as error: raise KeyError("`version` must be provided in query") from error def _get_default_locale(query: Mapping[str, Any], context: PipelineContext) -> str: return query["platform"].default_locale def _region_to_platform_generator(regions: Iterable[Region]) -> Generator[Platform, None, None]: for region in regions: try: yield Region(region).platform except ValueError as e: raise QueryValidationError from e def convert_region_to_platform(query: MutableMapping[str, Any]) -> None: if "region" in query and "platform" not in query: try: query["platform"] = Region(query["region"]).platform except ValueError as e: raise QueryValidationError from e if "regions" in query and "platforms" not in query: query["platforms"] = _region_to_platform_generator(query["regions"]) if "region" in query and not isinstance(query["region"], Region): query["region"] = Region(query["region"]) ####### # DTO # ####### ################ # Champion API # ################ validate_champion_rotation_dto_query = Query. \ has("platform").as_(Platform) validate_many_champion_rotation_dto_query = Query. \ has("platform").as_(Platform) def for_champion_rotation_dto(champion_rotation: ChampionRotationDto) -> str: return champion_rotation["platform"] def for_champion_rotation_dto_query(query: Query) -> str: return query["platform"].value def for_many_champion_rotation_dto_query(query: Query) -> Generator[str, None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value except ValueError as e: raise QueryValidationError from e ######################## # Champion Mastery API # ######################## validate_champion_mastery_dto_query = Query. \ has("platform").as_(Platform).also. \ has("playerId").as_(str).also. \ has("championId").as_(int) validate_many_champion_mastery_dto_query = Query. \ has("platform").as_(Platform).also. \ has("playerId").as_(str).also. \ has("championIds").as_(Iterable) def for_champion_mastery_dto(champion_mastery: ChampionMasteryDto) -> Tuple[str, str, int]: return champion_mastery["platform"], champion_mastery["playerId"], champion_mastery["championId"] def for_champion_mastery_dto_query(query: Query) -> Tuple[str, str, int]: return query["platform"].value, query["playerId"], query["championId"] def for_many_champion_mastery_dto_query(query: Query) -> Generator[Tuple[str, str, int], None, None]: for champion_id in query["championIds"]: try: champion_id = int(champion_id) yield query["platform"].value, query["playerId"], champion_id except ValueError as e: raise QueryValidationError from e validate_champion_mastery_list_dto_query = Query. \ has("platform").as_(Platform).also. \ has("playerId").as_(str) validate_many_champion_mastery_list_dto_query = Query. \ has("platform").as_(Platform).also. \ has("playerIds").as_(Iterable) def for_champion_mastery_list_dto(champion_mastery_list: ChampionMasteryListDto) -> Tuple[str, str]: return champion_mastery_list["platform"], champion_mastery_list["playerId"] def for_champion_mastery_list_dto_query(query: Query) -> Tuple[str, str]: return query["platform"].value, query["playerId"] def for_many_champion_mastery_list_dto_query(query: Query) -> Generator[Tuple[str, str], None, None]: for summoner_id in query["playerIds"]: try: yield query["platform"].value, summoner_id except ValueError as e: raise QueryValidationError from e validate_champion_mastery_score_dto_query = Query. \ has("platform").as_(Platform).also. \ has("playerId").as_(str) validate_many_champion_mastery_score_dto_query = Query. \ has("platform").as_(Platform).also. \ has("playerIds").as_(Iterable) def for_champion_mastery_score_dto(champion_mastery_score: ChampionMasteryScoreDto) -> Tuple[str, str]: return champion_mastery_score["platform"], champion_mastery_score["playerId"] def for_champion_mastery_score_dto_query(query: Query) -> Tuple[str, str]: return query["platform"].value, query["playerId"] def for_many_champion_mastery_score_dto_query(query: Query) -> Generator[Tuple[str, str], None, None]: for summoner_id in query["playerIds"]: try: yield query["platform"].value, summoner_id except ValueError as e: raise QueryValidationError from e ############## # League API # ############## validate_league_entries_dto_query = Query. \ has("platform").as_(Platform).also. \ has("queue").as_(Queue).also. \ has("tier").as_(Tier).also. \ has("page").as_(int).also. \ has("id").as_(int) # League ID def for_league_entries_dto(league_entries: LeagueEntriesDto) -> Tuple[str, str, str, int, int]: return league_entries["platform"], league_entries["queue"], league_entries["tier"], league_entries["id"], league_entries["page"] def for_league_entries_dto_query(query: Query) -> Tuple[str, str, str, int, int]: return query["platform"].value, query["queue"].value, query["tier"].value, query["id"], query["page"] validate_league_summoner_entries_dto_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int) # Summoner ID def for_league_summoner_entries_dto(league_entries: LeagueEntriesDto) -> Tuple[str, int]: return league_entries["platform"], league_entries["id"] def for_league_summoner_entries_dto_query(query: Query) -> Tuple[str, int]: return query["platform"].value, query["id"] ################### # Static Data API # ################### # Champion validate_champion_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_champion_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_champion_dto(champion: ChampionDto, identifier: str = "id") -> Tuple[str, str, str, int, Union[int, str]]: return champion["platform"], champion["version"], champion["locale"], _hash_included_data(champion["includedData"]), champion[identifier] def for_champion_dto_query(query: Query) -> Tuple[str, str, str, int, Union[int, str]]: identifier = "id" if "id" in query else "name" return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]), query[identifier] def for_many_champion_dto_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: identifiers, identifier_type = (query["ids"], int) if "ids" in query else (query["names"], str) included_data_hash = _hash_included_data(query["includedData"]) for identifier in identifiers: try: identifier = identifier_type(identifier) yield query["platform"].value, query["version"], query["locale"], included_data_hash, identifier except ValueError as e: raise QueryValidationError from e validate_champion_list_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_champion_list_dto_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_champion_list_dto(champion_list: ChampionListDto) -> Tuple[str, str, str, int]: return champion_list["platform"], champion_list["version"], champion_list["locale"], _hash_included_data(champion_list["includedData"]) def for_champion_list_dto_query(query: Query) -> Tuple[str, str, str, int]: return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]) def for_many_champion_list_dto_query(query: Query) -> Generator[Tuple[str, str, str, int], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value, query["version"], query["locale"], included_data_hash except ValueError as e: raise QueryValidationError from e # Item validate_item_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_item_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("name").as_(Iterable) def for_item_dto(item: ItemDto, identifier: str = "id") -> Tuple[str, str, str, int, Union[int, str]]: return item["platform"], item["version"], item["locale"], _hash_included_data(item["includedData"]), item[identifier] def for_item_dto_query(query: Query) -> Tuple[str, str, str, int, Union[int, str]]: identifier = "id" if "id" in query else "name" return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]), query[identifier] def for_many_item_dto_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: identifiers, identifier_type = (query["ids"], int) if "ids" in query else (query["names"], str) included_data_hash = _hash_included_data(query["includedData"]) for identifier in identifiers: try: identifier = identifier_type(identifier) yield query["platform"].value, query["version"], query["locale"], included_data_hash, identifier except ValueError as e: raise QueryValidationError from e validate_item_list_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_item_list_dto_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_item_list_dto(item_list: ItemListDto) -> Tuple[str, str, str, int]: return item_list["platform"], item_list["version"], item_list["locale"], _hash_included_data(item_list["includedData"]) def for_item_list_dto_query(query: Query) -> Tuple[str, str, str, int]: return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]) def for_many_item_list_dto_query(query: Query) -> Generator[Tuple[str, str, str, int], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value, query["version"], query["locale"], included_data_hash except ValueError as e: raise QueryValidationError from e # Language validate_language_strings_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) validate_many_language_strings_dto_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) def for_language_strings_dto(language_strings: LanguageStringsDto) -> Tuple[str, str, str]: return language_strings["platform"], language_strings["version"], language_strings["locale"] def for_language_strings_dto_query(query: Query) -> Tuple[str, str, str]: return query["platform"].value, query["version"], query["locale"] def for_many_language_strings_dto_query(query: Query) -> Generator[Tuple[str, str, str], None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value, query["version"], query["locale"] except ValueError as e: raise QueryValidationError from e validate_languages_dto_query = Query. \ has("platform").as_(Platform) validate_many_languages_dto_query = Query. \ has("platforms").as_(Iterable) def for_languages_dto(languages: LanguagesDto) -> str: return languages["platform"] def for_languages_dto_query(query: Query) -> str: return query["platform"].value def for_many_languages_dto_query(query: Query) -> Generator[str, None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value except ValueError as e: raise QueryValidationError from e # Map validate_map_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("mapId").as_(int).or_("mapName").as_(str) validate_many_map_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("mapIds").as_(Iterable).or_("mapNames").as_(Iterable) def for_map_dto(map: MapDto, identifier: str = "mapId") -> Tuple[str, str, str, Union[int, str]]: return map["platform"], map["version"], map["locale"], map[identifier] def for_map_dto_query(query: Query) -> Tuple[str, str, str, Union[int, str]]: identifier = "mapId" if "mapId" in query else "mapName" return query["platform"].value, query["version"], query["locale"], query[identifier] def for_many_map_dto_query(query: Query) -> Generator[Tuple[str, str, str, Union[int, str]], None, None]: identifiers, identifier_type = (query["mapIds"], int) if "mapIds" in query else (query["mapNames"], str) for identifier in identifiers: try: identifier = identifier_type(identifier) yield query["platform"].value, query["version"], query["locale"], identifier except ValueError as e: raise QueryValidationError from e validate_map_list_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) validate_many_map_list_dto_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) def for_map_list_dto(map_list: MapListDto) -> Tuple[str, str, str]: return map_list["platform"], map_list["version"], map_list["locale"] def for_map_list_dto_query(query: Query) -> Tuple[str, str, str]: return query["platform"].value, query["version"], query["locale"] def for_many_map_list_dto_query(query: Query) -> Generator[Tuple[str, str, str], None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value, query["version"], query["locale"] except ValueError as e: raise QueryValidationError from e # Profile Icon validate_profile_icon_data_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) validate_many_profile_icon_data_dto_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) def for_profile_icon_data_dto(profile_icon_data: ProfileIconDataDto) -> Tuple[str, str, str]: return profile_icon_data["platform"], profile_icon_data["version"], profile_icon_data["locale"] def for_profile_icon_data_dto_query(query: Query) -> Tuple[str, str, str]: return query["platform"].value, query["version"], query["locale"] def for_many_profile_icon_data_dto_query(query: Query) -> Generator[Tuple[str, str, str], None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value, query["version"], query["locale"] except ValueError as e: raise QueryValidationError from e validate_profile_icon_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("id").as_(int) validate_many_profile_icon_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("ids").as_(Iterable) def for_profile_icon_dto(profile_icon: ProfileIconDetailsDto) -> Tuple[str, str, str, int]: return profile_icon["platform"], profile_icon["version"], profile_icon["locale"], profile_icon["id"] def for_profile_icon_dto_query(query: Query) -> Tuple[str, str, str, int]: return query["platform"].value, query["version"], query["locale"], query["id"] def for_many_profile_icon_dto_query(query: Query) -> Generator[Tuple[str, str, str, int], None, None]: for id in query["ids"]: try: id = int(id) yield query["platform"].value, query["version"], query["locale"], id except ValueError as e: raise QueryValidationError from e # Realm validate_realm_dto_query = Query. \ has("platform").as_(Platform) validate_many_realm_dto_query = Query. \ has("platforms").as_(Iterable) def for_realm_dto(realm: RealmDto) -> str: return realm["platform"] def for_realm_dto_query(query: Query) -> str: return query["platform"].value def for_many_realm_dto_query(query: Query) -> Generator[str, None, None]: return query["platform"].value # Rune validate_rune_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_rune_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_rune_dto(rune: RuneDto, identifier: str = "id") -> Tuple[str, str, str, int, Union[int, str]]: return rune["platform"], rune["version"], rune["locale"], _hash_included_data(rune["includedData"]), rune[identifier] def for_rune_dto_query(query: Query) -> Tuple[str, str, str, int, Union[int, str]]: identifier = "id" if "id" in query else "name" return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]), query[identifier] def for_many_rune_dto_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: identifiers, identifier_type = (query["ids"], int) if "ids" in query else (query["names"], str) included_data_hash = _hash_included_data(query["includedData"]) for identifier in identifiers: try: identifier = identifier_type(identifier) yield query["platform"].value, query["version"], query["locale"], included_data_hash, identifier except ValueError as e: raise QueryValidationError from e validate_rune_list_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_rune_list_dto_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_rune_list_dto(rune_list: RuneListDto) -> Tuple[str, str, str, int]: return rune_list["platform"], rune_list["version"], rune_list["locale"], _hash_included_data(rune_list["includedData"]) def for_rune_list_dto_query(query: Query) -> Tuple[str, str, str, int]: return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]) def for_many_rune_list_dto_query(query: Query) -> Generator[Tuple[str, str, str, int], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value, query["version"], query["locale"], included_data_hash except ValueError as e: raise QueryValidationError from e # Summoner Spell validate_summoner_spell_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_summoner_spell_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_summoner_spell_dto(summoner_spell: SummonerSpellDto, identifier: str = "id") -> Tuple[str, str, str, int, Union[int, str]]: return summoner_spell["platform"], summoner_spell["version"], summoner_spell["locale"], _hash_included_data(summoner_spell["includedData"]), summoner_spell[identifier] def for_summoner_spell_dto_query(query: Query) -> Tuple[str, str, str, int, Union[int, str]]: identifier = "id" if "id" in query else "name" return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]), query[identifier] def for_many_summoner_spell_dto_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: identifiers, identifier_type = (query["ids"], int) if "ids" in query else (query["names"], str) included_data_hash = _hash_included_data(query["includedData"]) for identifier in identifiers: try: identifier = identifier_type(identifier) yield query["platform"].value, query["version"], query["locale"], included_data_hash, identifier except ValueError as e: raise QueryValidationError from e validate_summoner_spell_list_dto_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_summoner_spell_list_dto_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_summoner_spell_list_dto(summoner_spell_list: SummonerSpellListDto) -> Tuple[str, str, str, int]: return summoner_spell_list["platform"], summoner_spell_list["version"], summoner_spell_list["locale"], _hash_included_data(summoner_spell_list["includedData"]) def for_summoner_spell_list_dto_query(query: Query) -> Tuple[str, str, str, int]: return query["platform"].value, query["version"], query["locale"], _hash_included_data(query["includedData"]) def for_many_summoner_spell_list_dto_query(query: Query) -> Generator[Tuple[str, str, str, int], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value, query["version"], query["locale"], included_data_hash except ValueError as e: raise QueryValidationError from e # Version validate_version_list_dto_query = Query. \ has("platform").as_(Platform) validate_many_version_list_dto_query = Query. \ has("platforms").as_(Iterable) def for_version_list_dto(version_list: VersionListDto) -> str: return version_list["platform"] def for_version_list_dto_query(query: Query) -> str: return query["platform"].value def for_many_version_list_dto_query(query: Query) -> Generator[str, None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value except ValueError as e: raise QueryValidationError from e ############## # Status API # ############## validate_shard_status_dto_query = Query. \ has("platform").as_(Platform) validate_many_shard_status_dto_query = Query. \ has("platforms").as_(Iterable) def for_shard_status_dto(shard_status: ShardStatusDto) -> str: return shard_status["platform"] def for_shard_status_dto_query(query: Query) -> str: return query["platform"].value def for_many_shard_status_dto_query(query: Query) -> Generator[str, None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value except ValueError as e: raise QueryValidationError from e ############# # Match API # ############# validate_match_dto_query = Query. \ has("platform").as_(Platform).also. \ has("gameId").as_(int) validate_many_match_dto_query = Query. \ has("platform").as_(Platform).also. \ has("gameIds").as_(Iterable) def for_match_dto(match: MatchDto) -> Tuple[str, int]: return match["platform"], match["gameId"] def for_match_dto_query(query: Query) -> Tuple[str, int]: return query["platform"].value, query["gameId"] def for_many_match_dto_query(query: Query) -> Generator[Tuple[str, int], None, None]: for game_id in query["gameIds"]: try: game_id = int(game_id) yield query["platform"].value, game_id except ValueError as e: raise QueryValidationError from e validate_match_reference_dto_query = Query. \ has("platform").as_(Platform).also. \ has("gameId").as_(int) validate_many_match_reference_dto_query = Query. \ has("platform").as_(Platform).also. \ has("gameIds").as_(Iterable) def for_match_reference_dto(match_reference: MatchReferenceDto) -> Tuple[str, int]: return match_reference["platform"], match_reference["gameId"] def for_match_reference_dto_query(query: Query) -> Tuple[str, int]: return query["platform"].value, query["gameId"] def for_many_match_reference_dto_query(query: Query) -> Generator[Tuple[str, int], None, None]: for game_id in query["gameIds"]: try: game_id = int(game_id) yield query["platform"].value, game_id except ValueError as e: raise QueryValidationError from e validate_match_timeline_dto_query = Query. \ has("platform").as_(Platform).also. \ has("matchId").as_(int) validate_many_match_timeline_dto_query = Query. \ has("platform").as_(Platform).also. \ has("matchIds").as_(Iterable) def for_match_timeline_dto(match_timeline: TimelineDto) -> Tuple[str, int]: return match_timeline["platform"], match_timeline["matchId"] def for_match_timeline_dto_query(query: Query) -> Tuple[str, int]: return query["platform"].value, query["matchId"] def for_many_match_timeline_dto_query(query: Query) -> Generator[Tuple[str, int], None, None]: for match_id in query["matchIds"]: try: match_id = int(match_id) yield query["platform"].value, match_id except ValueError as e: raise QueryValidationError from e ################# # Spectator API # ################# validate_current_game_info_dto_query = Query. \ has("platform").as_(Platform).also. \ has("gameId").as_(int) validate_many_current_game_info_dto_query = Query. \ has("platform").as_(Platform).also. \ has("gameIds").as_(Iterable) def for_current_game_info_dto(current_game_info: CurrentGameInfoDto) -> Tuple[str, int]: return current_game_info["platform"], current_game_info["gameId"] def for_current_game_info_dto_query(query: Query) -> Tuple[str, int]: return query["platform"].value, query["gameId"] def for_many_current_game_info_dto_query(query: Query) -> Generator[Tuple[str, int], None, None]: for game_id in query["gameIds"]: try: game_id = int(game_id) yield query["platform"].value, game_id except ValueError as e: raise QueryValidationError from e validate_featured_game_dto_query = Query. \ has("platform").as_(Platform) validate_many_featured_game_dto_query = Query. \ has("platforms").as_(Iterable) def for_featured_games_dto(featured_games: FeaturedGamesDto) -> str: return featured_games["platform"] def for_featured_games_dto_query(query: Query) -> str: return query["platform"].value def for_many_featured_games_dto_query(query: Query) -> Generator[str, None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield platform.value except ValueError as e: raise QueryValidationError from e ################ # Summoner API # ################ validate_summoner_dto_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int).or_("accountId").as_(int).or_("name").as_(str) validate_many_summoner_dto_query = Query. \ has("platform").as_(Platform).also. \ has("ids").as_(Iterable).or_("accountIds").as_(Iterable).or_("names").as_(Iterable) def for_summoner_dto(summoner: SummonerDto, identifier: str = "id") -> Tuple[str, Union[int, str]]: return summoner["platform"], summoner[identifier] def for_summoner_dto_query(query: Query) -> Tuple[str, Union[int, str]]: if "id" in query: identifier = "id" elif "accountId" in query: identifier = "accountId" else: identifier = "name" return query["platform"].value, query[identifier] def for_many_summoner_dto_query(query: Query) -> Generator[Tuple[str, Union[int, str]], None, None]: if "ids" in query: identifiers, identifier_type = query["ids"], int elif "accountIds" in query: identifiers, identifier_type = query["accountIds"], int else: identifiers, identifier_type = query["names"], str for identifier in identifiers: try: identifier = identifier_type(identifier) yield query["platform"].value, identifier except ValueError as e: raise QueryValidationError from e ######## # Core # ######## ################ # Champion API # ################ validate_champion_rotation_query = Query. \ has("platform").as_(Platform) validate_many_champion_rotation_query = Query. \ has("platform").as_(Platform) def for_champion_rotation(champion_rotation: ChampionRotationData) -> List[Region]: keys = [champion_rotation.platform] return keys def for_champion_rotation_query(query: Query) -> List[str]: keys = [query["platform"].value] return keys ######################## # Champion Mastery API # ######################## validate_champion_mastery_query = Query. \ has("platform").as_(Platform).also. \ has("summoner.id").as_(str).or_("summoner.accountId").as_(str).or_("summoner.name").as_(str).also. \ has("champion.id").as_(int).or_("champion.name").as_(str) validate_many_champion_mastery_query = Query. \ has("platform").as_(Platform).also. \ has("summoner.id").as_(str).or_("summoner.accountId").as_(str).or_("summoner.name").as_(str).also. \ has("champions.id").as_(Iterable).or_("champions.name").as_(Iterable) def for_champion_mastery(champion_mastery: ChampionMastery) -> List[Tuple]: keys = [] try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].id, champion_mastery.champion._data[ChampionData].id)) except AttributeError: pass try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].id, champion_mastery.champion._data[ChampionData].name)) except AttributeError: pass try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].name, champion_mastery.champion._data[ChampionData].id)) except AttributeError: pass try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].name, champion_mastery.champion._data[ChampionData].name)) except AttributeError: pass try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].account_id, champion_mastery.champion._data[ChampionData].id)) except AttributeError: pass try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].account_id, champion_mastery.champion._data[ChampionData].name)) except AttributeError: pass return keys def for_champion_mastery_query(query: Query) -> List[Tuple]: keys = [] if "summoner.id" in query and "champion.id" in query: keys.append((query["platform"].value, query["summoner.id"], query["champion.id"])) if "summoner.id" in query and "champion.name" in query: keys.append((query["platform"].value, query["summoner.id"], query["champion.name"])) if "summoner.name" in query and "champion.id" in query: keys.append((query["platform"].value, query["summoner.name"], query["champion.id"])) if "summoner.name" in query and "champion.name" in query: keys.append((query["platform"].value, query["summoner.name"], query["champion.name"])) if "summoner.accountId" in query and "champion.id" in query: keys.append((query["platform"].value, query["summoner.accountId"], query["champion.id"])) if "summoner.accountId" in query and "champion.name" in query: keys.append((query["platform"].value, query["summoner.accountId"], query["champion.name"])) return keys def for_many_champion_mastery_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: grouped_identifiers = [] identifier_types = [] if "champions.id" in query: grouped_identifiers.append(query["ids"]) identifier_types.append(int) if "champions.name" in query: grouped_identifiers.append(query["names"]) identifier_types.append(str) for identifiers in zip(*grouped_identifiers): keys = [] for identifier, identifier_type in zip(identifiers, identifier_types): identifier = identifier_type(identifier) if "summoner.id" in query: keys.append((query["platform"].value, query["summoner.id"], identifier)) if "summoner.name" in query: keys.append((query["platform"].value, query["summoner.name"], identifier)) if "summoner.accountId" in query: keys.append((query["platform"].value, query["summoner.accountId"], identifier)) if len(keys) == 0: raise QueryValidationError yield keys validate_champion_masteries_query = Query. \ has("platform").as_(Platform).also. \ has("summoner.id").as_(str).or_("summoner.accountId").as_(int).or_("summoner.name") validate_many_champion_masteries_query = Query. \ has("platform").as_(Platform).also. \ has("summoner.id").as_(str).or_("summoner.accountId").as_(int).or_("summoner.name") def for_champion_masteries(champion_mastery: ChampionMasteries) -> List[Tuple]: keys = [] try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].id)) except AttributeError: pass try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].name)) except AttributeError: pass try: keys.append((champion_mastery.platform.value, champion_mastery.summoner._data[SummonerData].account_id)) except AttributeError: pass return keys def for_champion_masteries_query(query: Query) -> List[Tuple]: keys = [] if "summoner.id" in query: keys.append((query["platform"].value, query["summoner.id"])) if "summoner.name" in query: keys.append((query["platform"].value, query["summoner.name"])) if "summoner.accountId" in query: keys.append((query["platform"].value, query["summoner.accountId"])) return keys ############## # League API # ############## # League Entries validate_league_entries_query = Query. \ has("platform").as_(Platform).also. \ has("summoner.id").as_(str) validate_many_league_entries_query = Query. \ has("platform").as_(Platform).also. \ has("summoners.id").as_(Iterable) def for_league_summoner_entries(entries: LeagueSummonerEntries) -> List[Tuple[str, str]]: return [(entries.platform.value, entries._LeagueSummonerEntries__summoner.id)] def for_league_summoner_entries_query(query: Query) -> List[Tuple[str, str]]: return [(query["platform"].value, query["summoner.id"])] def for_many_league_summoner_entries_query(query: Query) -> Generator[List[Tuple[str, str]], None, None]: for id in query["summoners.id"]: try: yield [(query["platform"].value, id)] except ValueError as e: raise QueryValidationError from e # Leagues validate_league_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(str) validate_many_league_query = Query. \ has("platform").as_(Platform).also. \ has("ids").as_(Iterable) def for_league(league: League) -> List[Tuple[str, str]]: return [(league.platform.value, league.id)] def for_league_query(query: Query) -> List[Tuple[str, str]]: return [(query["platform"].value, query["id"])] def for_many_league_query(query: Query) -> Generator[List[Tuple[str, str]], None, None]: for id in query["ids"]: try: yield [(query["platform"].value, id)] except ValueError as e: raise QueryValidationError from e # Challenger validate_challenger_league_query = Query. \ has("platform").as_(Platform).also. \ has("queue").as_(Queue) validate_many_challenger_league_query = Query. \ has("platform").as_(Platform).also. \ has("queues").as_(Iterable) def for_challenger_league(league: ChallengerLeague) -> List[Tuple[str, str]]: return [(league.platform.value, league.queue.value)] def for_challenger_league_query(query: Query) -> List[Tuple[str, str]]: return [(query["platform"].value, query["queue"].value)] def for_many_challenger_league_query(query: Query) -> Generator[List[Tuple[str, str]], None, None]: for queue in query["queues"]: try: yield [(query["platform"].value, queue.value)] except ValueError as e: raise QueryValidationError from e # Grandmaster validate_grandmaster_league_query = Query. \ has("platform").as_(Platform).also. \ has("queue").as_(Queue) validate_many_grandmaster_league_query = Query. \ has("platform").as_(Platform).also. \ has("queues").as_(Iterable) def for_grandmaster_league(league: GrandmasterLeague) -> List[Tuple[str, str]]: return [(league.platform.value, league.queue.value)] def for_grandmaster_league_query(query: Query) -> List[Tuple[str, str]]: return [(query["platform"].value, query["queue"].value)] def for_many_grandmaster_league_query(query: Query) -> Generator[List[Tuple[str, str]], None, None]: for queue in query["queues"]: try: yield [(query["platform"].value, queue.value)] except ValueError as e: raise QueryValidationError from e # Master validate_master_league_query = Query. \ has("platform").as_(Platform).also. \ has("queue").as_(Queue) validate_many_master_league_query = Query. \ has("platform").as_(Platform).also. \ has("queues").as_(Iterable) def for_master_league(league: MasterLeague) -> List[Tuple[str, str]]: return [(league.platform.value, league.queue.value)] def for_master_league_query(query: Query) -> List[Tuple[str, str]]: return [(query["platform"].value, query["queue"].value)] def for_many_master_league_query(query: Query) -> Generator[List[Tuple[str, str]], None, None]: for queue in query["queues"]: try: yield [(query["platform"].value, queue.value)] except ValueError as e: raise QueryValidationError from e # League Entries List validate_league_entries_list_query = Query. \ has("queue").as_(Queue).also. \ has("tier").as_(Tier).also. \ has("division").as_(Division).also. \ has("platform").as_(Platform) def for_league_entries_list(lel: LeagueSummonerEntries) -> List[Tuple[str, str, str, str]]: return [(lel.platform.value, lel.queue.value, lel.tier.value, lel.division.value)] def for_league_entries_list_query(query: Query) -> List[Tuple[str, str, str, str]]: return [(query["platform"].value, query["queue"].value, query["tier"].value, query["division"].value)] ################### # Static Data API # ################### # Champion validate_champion_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_champion_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_champion(champion: Champion) -> List[Tuple]: keys = [] try: keys.append((champion.platform.value, champion.version, champion.locale, _hash_included_data(champion.included_data), champion._data[ChampionData].id)) except AttributeError: pass try: keys.append((champion.platform.value, champion.version, champion.locale, _hash_included_data(champion.included_data), champion._data[ChampionData].name)) except AttributeError: pass return keys def for_champion_query(query: Query) -> List[Tuple]: keys = [] included_data_hash = _hash_included_data(query["includedData"]) if "id" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["id"])) if "name" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["name"])) return keys def for_many_champion_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) grouped_identifiers = [] identifier_types = [] if "ids" in query: grouped_identifiers.append(query["ids"]) identifier_types.append(int) if "names" in query: grouped_identifiers.append(query["names"]) identifier_types.append(str) for identifiers in zip(*grouped_identifiers): keys = [] for identifier, identifier_type in zip(identifiers, identifier_types): try: identifier = identifier_type(identifier) keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, identifier)) except ValueError as e: raise QueryValidationError from e yield keys validate_champions_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_champions_query = Query. \ has("platforms").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_champions(champions: Champions) -> List[Tuple[str, str, str, int]]: return [(champions.platform.value, champions.version, champions.locale, _hash_included_data(champions.included_data))] def for_champions_query(query: Query) -> List[Tuple[str, str, str, int]]: included_data_hash = _hash_included_data(query["includedData"]) return [(query["platform"].value, query["version"], query["locale"], included_data_hash)] def for_many_champions_query(query: Query) -> Generator[List[Tuple[str, str, str, int, Union[int, str]]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: yield [(platform.value, query["version"], query["locale"], included_data_hash)] except ValueError as e: raise QueryValidationError from e # Item validate_item_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_item_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_item(item: Item) -> List[Tuple]: keys = [] try: keys.append((item.platform.value, item.version, item.locale, _hash_included_data(item.included_data), item._data[ItemData].id)) except AttributeError: pass try: keys.append((item.platform.value, item.version, item.locale, _hash_included_data(item.included_data), item._data[ItemData].name)) except AttributeError: pass return keys def for_item_query(query: Query) -> List[Tuple]: keys = [] included_data_hash = _hash_included_data(query["includedData"]) if "id" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["id"])) if "name" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["name"])) return keys def for_many_item_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) grouped_identifiers = [] identifier_types = [] if "ids" in query: grouped_identifiers.append(query["ids"]) identifier_types.append(int) if "names" in query: grouped_identifiers.append(query["names"]) identifier_types.append(str) for identifiers in zip(*grouped_identifiers): keys = [] for identifier, identifier_type in zip(identifiers, identifier_types): try: identifier = identifier_type(identifier) keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, identifier)) except ValueError as e: raise QueryValidationError from e yield keys validate_items_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_items_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_items(items: Items) -> List[Tuple[str, str, str, int]]: return [(items.platform.value, items.version, items.locale, _hash_included_data(items.included_data))] def for_items_query(query: Query) -> List[Tuple[str, str, str, int]]: included_data_hash = _hash_included_data(query["includedData"]) return [(query["platform"].value, query["version"], query["locale"], included_data_hash)] def for_many_items_query(query: Query) -> Generator[List[Tuple[str, str, str, int, Union[int, str]]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: yield [(platform.value, query["version"], query["locale"], included_data_hash)] except ValueError as e: raise QueryValidationError from e # Language validate_languages_query = Query. \ has("platform").as_(Platform) validate_many_languages_query = Query. \ has("platforms").as_(Iterable) def for_languages(languages: Locales) -> List[str]: return [languages.platform.value] def for_languages_query(query: Query) -> List[str]: return [query["platform"].value] def for_many_languages_query(query: Query) -> Generator[List[str], None, None]: for platform in query["platforms"]: yield [platform.value] validate_language_strings_query = Query. \ has("platform").as_(Platform) validate_many_language_strings_query = Query. \ has("platforms").as_(Iterable) def for_language_strings(languages: LanguageStrings) -> List[str]: return [languages.platform.value] def for_language_strings_query(query: Query) -> List[str]: return [query["platform"].value] def for_many_language_strings_query(query: Query) -> Generator[List[str], None, None]: for platform in query["platforms"]: yield [platform.value] # Map validate_map_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("id").as_(int).or_("name").as_(str) validate_many_map_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_map(map: Map) -> List[Tuple]: keys = [] try: keys.append((map.platform.value, map.version, map.locale, map._data[MapData].id)) except AttributeError: pass try: keys.append((map.platform.value, map.version, map.locale, map._data[MapData].name)) except AttributeError: pass return keys def for_map_query(query: Query) -> List[Tuple]: keys = [] if "id" in query: keys.append((query["platform"].value, query["version"], query["locale"], query["id"])) if "name" in query: keys.append((query["platform"].value, query["version"], query["locale"], query["name"])) return keys def for_many_map_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: grouped_identifiers = [] identifier_types = [] if "ids" in query: grouped_identifiers.append(query["ids"]) identifier_types.append(int) if "names" in query: grouped_identifiers.append(query["names"]) identifier_types.append(str) for identifiers in zip(*grouped_identifiers): keys = [] for identifier, identifier_type in zip(identifiers, identifier_types): try: identifier = identifier_type(identifier) keys.append((query["platform"].value, query["version"], query["locale"], identifier)) except ValueError as e: raise QueryValidationError from e yield keys validate_maps_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) validate_many_maps_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) def for_maps(maps: Maps) -> List[Tuple[str, str, str]]: return [(maps.platform.value, maps.version, maps.locale)] def for_maps_query(query: Query) -> List[Tuple[str, str, str]]: return [(query["platform"].value, query["version"], query["locale"])] def for_many_maps_query(query: Query) -> Generator[List[Tuple[str, str, str]], None, None]: for platform in query["platforms"]: yield [(platform.value, query["version"], query["locale"])] # Profile Icon validate_profile_icons_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) validate_many_profile_icons_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str) def for_profile_icons(profile_icon: ProfileIcons) -> List[Tuple[str, str, str]]: return [(Region(profile_icon.region).platform.value, profile_icon.version, profile_icon.locale)] def for_profile_icons_query(query: Query) -> List[Tuple[str, str, str]]: return [(query["platform"].value, query["version"], query["locale"])] def for_many_profile_icons_query(query: Query) -> Generator[List[Tuple[str, str, str]], None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield [(platform.value, query["version"], query["locale"])] except ValueError as e: raise QueryValidationError from e validate_profile_icon_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("id").as_(int) validate_many_profile_icon_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ has("ids").as_(Iterable) def for_profile_icon(profile_icon: ProfileIcon) -> List[Tuple[str, str, str, int]]: return [(Region(profile_icon.region).platform.value, profile_icon.version, profile_icon.locale, profile_icon.id)] def for_profile_icon_query(query: Query) -> List[Tuple[str, str, str, int]]: return [(query["platform"].value, query["version"], query["locale"], query["id"])] def for_many_profile_icon_query(query: Query) -> Generator[List[Tuple[str, str, str, int]], None, None]: for id in query["ids"]: try: id = int(id) yield [(query["platform"].value, query["version"], query["locale"], id)] except ValueError as e: raise QueryValidationError from e # Realm validate_realms_query = Query. \ has("platform").as_(Platform) validate_many_realms_query = Query. \ has("platforms").as_(Iterable) def for_realms(realm: Realms) -> List[str]: return [(realm.platform.value)] def for_realms_query(query: Query) -> List[str]: return [(query["platform"].value)] def for_many_realms_query(query: Query) -> Generator[List[str], None, None]: for platform in query["platforms"]: yield [(platform.value)] # Rune validate_rune_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_rune_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_rune(rune: Rune) -> List[Tuple]: keys = [] try: keys.append((rune.platform.value, rune.version, rune.locale, _hash_included_data(rune.included_data), rune._data[RuneData].id)) except AttributeError: pass try: keys.append((rune.platform.value, rune.version, rune.locale, _hash_included_data(rune.included_data), rune._data[RuneData].name)) except AttributeError: pass return keys def for_rune_query(query: Query) -> List[Tuple]: keys = [] included_data_hash = _hash_included_data(query["includedData"]) if "id" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["id"])) if "name" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["name"])) return keys def for_many_rune_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) grouped_identifiers = [] identifier_types = [] if "ids" in query: grouped_identifiers.append(query["ids"]) identifier_types.append(int) if "names" in query: grouped_identifiers.append(query["names"]) identifier_types.append(str) for identifiers in zip(*grouped_identifiers): keys = [] for identifier, identifier_type in zip(identifiers, identifier_types): try: identifier = identifier_type(identifier) keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, identifier)) except ValueError as e: raise QueryValidationError from e yield keys validate_runes_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_runes_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_runes(runes: Runes) -> List[Tuple[str, str, str, int]]: return [(runes.platform.value, runes.version, runes.locale, _hash_included_data(runes.included_data))] def for_runes_query(query: Query) -> List[Tuple[str, str, str, int]]: included_data_hash = _hash_included_data(query["includedData"]) return [(query["platform"].value, query["version"], query["locale"], included_data_hash)] def for_many_runes_query(query: Query) -> Generator[List[Tuple[str, str, str, int, Union[int, str]]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: yield [(platform.value, query["version"], query["locale"], included_data_hash)] except ValueError as e: raise QueryValidationError from e # Summoner Spell validate_summoner_spell_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("id").as_(int).or_("name").as_(str) validate_many_summoner_spell_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}).also. \ has("ids").as_(Iterable).or_("names").as_(Iterable) def for_summoner_spell(summoner_spell: SummonerSpell) -> List[Tuple]: keys = [] try: keys.append((summoner_spell.platform.value, summoner_spell.version, summoner_spell.locale, _hash_included_data(summoner_spell.included_data), summoner_spell._data[SummonerSpellData].id)) except AttributeError: pass try: keys.append((summoner_spell.platform.value, summoner_spell.version, summoner_spell.locale, _hash_included_data(summoner_spell.included_data), summoner_spell._data[SummonerSpellData].name)) except AttributeError: pass return keys def for_summoner_spell_query(query: Query) -> List[Tuple]: keys = [] included_data_hash = _hash_included_data(query["includedData"]) if "id" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["id"])) if "name" in query: keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, query["name"])) return keys def for_many_summoner_spell_query(query: Query) -> Generator[Tuple[str, str, str, int, Union[int, str]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) grouped_identifiers = [] identifier_types = [] if "ids" in query: grouped_identifiers.append(query["ids"]) identifier_types.append(int) if "names" in query: grouped_identifiers.append(query["names"]) identifier_types.append(str) for identifiers in zip(*grouped_identifiers): keys = [] for identifier, identifier_type in zip(identifiers, identifier_types): try: identifier = identifier_type(identifier) keys.append((query["platform"].value, query["version"], query["locale"], included_data_hash, identifier)) except ValueError as e: raise QueryValidationError from e yield keys validate_summoner_spells_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) validate_many_summoner_spells_query = Query. \ has("platforms").as_(Iterable).also. \ can_have("version").with_default(_get_default_version, supplies_type=str).also. \ can_have("locale").with_default(_get_default_locale, supplies_type=str).also. \ can_have("includedData").with_default({"all"}) def for_summoner_spells(summoner_spells: SummonerSpells) -> List[Tuple[str, str, str, int]]: return [(summoner_spells.platform.value, summoner_spells.version, summoner_spells.locale, _hash_included_data(summoner_spells.included_data))] def for_summoner_spells_query(query: Query) -> List[Tuple[str, str, str, int]]: included_data_hash = _hash_included_data(query["includedData"]) return [(query["platform"].value, query["version"], query["locale"], included_data_hash)] def for_many_summoner_spells_query(query: Query) -> Generator[List[Tuple[str, str, str, int, Union[int, str]]], None, None]: included_data_hash = _hash_included_data(query["includedData"]) for platform in query["platforms"]: try: yield [(platform.value, query["version"], query["locale"], included_data_hash)] except ValueError as e: raise QueryValidationError from e # Version validate_versions_query = Query. \ has("platform").as_(Platform) validate_many_versions_query = Query. \ has("platforms").as_(Iterable) def for_versions(versions: Versions) -> List[str]: return [versions.platform.value] def for_versions_query(query: Query) -> List[str]: return [query["platform"].value] def for_many_versions_query(query: Query) -> Generator[List[str], None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield [platform.value] except ValueError as e: raise QueryValidationError from e ############## # Status API # ############## validate_shard_status_query = Query. \ has("platform").as_(Platform) validate_many_shard_status_query = Query. \ has("platforms").as_(Iterable) def for_shard_status(shard_status: ShardStatus) -> List[str]: return [shard_status.platform.value] def for_shard_status_query(query: Query) -> List[str]: return [query["platform"].value] def for_many_shard_status_query(query: Query) -> Generator[List[str], None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield [platform.value] except ValueError as e: raise QueryValidationError from e ############# # Match API # ############# validate_match_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int) validate_many_match_query = Query. \ has("platform").as_(Platform).also. \ has("ids").as_(Iterable) def for_match(match: Match) -> List[Tuple[str, int]]: return [(match.platform.value, match.id)] def for_match_query(query: Query) -> List[Tuple[str, int]]: return [(query["platform"].value, query["id"])] def for_many_match_query(query: Query) -> Generator[List[Tuple[str, int]], None, None]: for id in query["ids"]: try: id = int(id) yield [(query["platform"].value, id)] except ValueError as e: raise QueryValidationError from e validate_match_timeline_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int) validate_many_match_timeline_query = Query. \ has("platform").as_(Platform).also. \ has("ids").as_(Iterable) def for_match_timeline(timeline: Timeline) -> List[Tuple[str, int]]: return [(timeline.platform.value, timeline.id)] def for_match_timeline_query(query: Query) -> List[Tuple[str, int]]: return [(query["platform"].value, query["id"])] def for_many_match_timeline_query(query: Query) -> Generator[List[Tuple[str, int]], None, None]: for id in query["ids"]: try: id = int(id) yield [(query["platform"].value, id)] except ValueError as e: raise QueryValidationError from e ################# # Spectator API # ################# validate_current_match_query = Query. \ has("platform").as_(Platform).also. \ has("summoner.id").as_(str) validate_many_current_match_query = Query. \ has("platform").as_(Platform).also. \ has("summoner.ids").as_(Iterable) def for_current_match(current_match_info: CurrentMatch) -> List[Tuple[str, str]]: # Reach into the data for the summoner ids so we don't create the Summoner objects # This stores the current match for every summoner in the match, so if a different summoner is # requested, the match isn't pulled a second time. return [(current_match_info.platform.value, participant._data[CurrentGameParticipantData].summonerId) for participant in current_match_info.participants] + [(current_match_info.platform.value, current_match_info.id)] def for_current_match_query(query: Query) -> List[Tuple[str, str]]: return [(query["platform"].value, query["summoner.id"])] def for_many_current_match_query(query: Query) -> Generator[List[Tuple[str, str]], None, None]: for summoner_id in query["summoner.ids"]: try: summoner_id = int(summoner_id) yield [(query["platform"].value, summoner_id)] except ValueError as e: raise QueryValidationError from e validate_featured_matches_query = Query. \ has("platform").as_(Platform) validate_many_featured_matches_query = Query. \ has("platforms").as_(Iterable) def for_featured_matches(featured_matches: FeaturedMatches) -> List[str]: return [featured_matches.platform] def for_featured_matches_query(query: Query) -> List[str]: return [query["platform"].value] def for_many_featured_matches_query(query: Query) -> Generator[List[str], None, None]: for platform in query["platforms"]: try: platform = Platform(platform) yield [platform.value] except ValueError as e: raise QueryValidationError from e ################ # Summoner API # ################ validate_summoner_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(str).or_("accountId").as_(str).or_("name").as_(str).or_("puuid").as_(str) validate_many_summoner_query = Query. \ has("platform").as_(Platform).also. \ has("ids").as_(Iterable).or_("accountIds").as_(Iterable).or_("names").as_(Iterable).or_("puuids").as_(Iterable) def for_summoner(summoner: Summoner) -> List[Tuple]: keys = [] try: keys.append((summoner.platform.value, "id", summoner._data[SummonerData].id)) except AttributeError: pass try: keys.append((summoner.platform.value, "name", summoner._data[SummonerData].name)) except AttributeError: pass try: keys.append((summoner.platform.value, "accountId", summoner._data[SummonerData].accountId)) except AttributeError: pass try: keys.append((summoner.platform.value, "puuid", summoner._data[SummonerData].puuid)) except AttributeError: pass return keys def for_summoner_query(query: Query) -> List[Tuple]: keys = [] if "id" in query: keys.append((query["platform"].value, "id", query["id"])) if "name" in query: keys.append((query["platform"].value, "name", query["name"])) if "accountId" in query: keys.append((query["platform"].value, "accountId", query["accountId"])) if "puuid" in query: keys.append((query["platform"].value, "puuid", query["puuid"])) return keys def for_many_summoner_query(query: Query) -> Generator[List[Tuple], None, None]: grouped_identifiers = [] identifier_types = [] if "ids" in query: grouped_identifiers.append(query["ids"]) identifier_types.append(str) elif "accountIds" in query: grouped_identifiers.append(query["accountIds"]) identifier_types.append(str) elif "puuids" in query: grouped_identifiers.append(query["puuids"]) identifier_types.append(str) elif "names" in query: grouped_identifiers.append(query["names"]) identifier_types.append(str) for identifiers in zip(*grouped_identifiers): keys = [] for identifier, identifier_type in zip(identifiers, identifier_types): try: identifier = identifier_type(identifier) keys.append((query["platform"].value, identifier)) except ValueError as e: raise QueryValidationError from e yield keys
{ "content_hash": "a26ee1ad19f2c2d7b03dea15c71e9308", "timestamp": "", "source": "github", "line_count": 2121, "max_line_length": 264, "avg_line_length": 36.17774634606318, "alnum_prop": 0.660146221312864, "repo_name": "sserrot/champion_relationships", "id": "2f2e49932a980d7b9d6267846adfebb8f1f4abc4", "size": "76733", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "venv/Lib/site-packages/cassiopeia/datastores/uniquekeys.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "128" }, { "name": "HTML", "bytes": "18324224" }, { "name": "Jupyter Notebook", "bytes": "9131072" }, { "name": "Python", "bytes": "10702" } ], "symlink_target": "" }
from django.utils.translation import ugettext_lazy as _ from horizon_lib import exceptions from horizon_lib import tabs from openstack_horizon import api from openstack_horizon.dashboards.project.loadbalancers import tables class PoolsTab(tabs.TableTab): table_classes = (tables.PoolsTable,) name = _("Pools") slug = "pools" template_name = "horizon_lib/common/_detail_table.html" def get_poolstable_data(self): try: tenant_id = self.request.user.tenant_id pools = api.lbaas.pool_list(self.tab_group.request, tenant_id=tenant_id) except Exception: pools = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve pools list.')) for p in pools: p.set_id_as_name_if_empty() return pools class MembersTab(tabs.TableTab): table_classes = (tables.MembersTable,) name = _("Members") slug = "members" template_name = "horizon_lib/common/_detail_table.html" def get_memberstable_data(self): try: tenant_id = self.request.user.tenant_id members = api.lbaas.member_list(self.tab_group.request, tenant_id=tenant_id) except Exception: members = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve member list.')) for m in members: m.set_id_as_name_if_empty() return members class MonitorsTab(tabs.TableTab): table_classes = (tables.MonitorsTable,) name = _("Monitors") slug = "monitors" template_name = "horizon_lib/common/_detail_table.html" def get_monitorstable_data(self): try: tenant_id = self.request.user.tenant_id monitors = api.lbaas.pool_health_monitor_list( self.tab_group.request, tenant_id=tenant_id) except Exception: monitors = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve monitor list.')) return monitors class LoadBalancerTabs(tabs.TabGroup): slug = "lbtabs" tabs = (PoolsTab, MembersTab, MonitorsTab) sticky = True class PoolDetailsTab(tabs.Tab): name = _("Pool Details") slug = "pooldetails" template_name = "project/loadbalancers/_pool_details.html" def get_context_data(self, request): pool = self.tab_group.kwargs['pool'] return {'pool': pool} class VipDetailsTab(tabs.Tab): name = _("VIP Details") slug = "vipdetails" template_name = "project/loadbalancers/_vip_details.html" def get_context_data(self, request): vid = self.tab_group.kwargs['vip_id'] try: vip = api.lbaas.vip_get(request, vid) except Exception: vip = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve VIP details.')) return {'vip': vip} class MemberDetailsTab(tabs.Tab): name = _("Member Details") slug = "memberdetails" template_name = "project/loadbalancers/_member_details.html" def get_context_data(self, request): member = self.tab_group.kwargs['member'] return {'member': member} class MonitorDetailsTab(tabs.Tab): name = _("Monitor Details") slug = "monitordetails" template_name = "project/loadbalancers/_monitor_details.html" def get_context_data(self, request): monitor = self.tab_group.kwargs['monitor'] return {'monitor': monitor} class PoolDetailsTabs(tabs.TabGroup): slug = "pooltabs" tabs = (PoolDetailsTab,) class VipDetailsTabs(tabs.TabGroup): slug = "viptabs" tabs = (VipDetailsTab,) class MemberDetailsTabs(tabs.TabGroup): slug = "membertabs" tabs = (MemberDetailsTab,) class MonitorDetailsTabs(tabs.TabGroup): slug = "monitortabs" tabs = (MonitorDetailsTab,)
{ "content_hash": "83e49287110fb4f6b1d68e230fd20336", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 69, "avg_line_length": 29.094202898550726, "alnum_prop": 0.6079701120797011, "repo_name": "mrunge/openstack_horizon", "id": "192d4cc2ff3962c9455b7b233c0d20e175bd5a77", "size": "4638", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "openstack_horizon/dashboards/project/loadbalancers/tabs.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "63809" }, { "name": "JavaScript", "bytes": "40" }, { "name": "Python", "bytes": "3460539" }, { "name": "Shell", "bytes": "16000" } ], "symlink_target": "" }
""" Created on 24 Dec 2018 @author: Bruno Beloff (bruno.beloff@southcoastscience.com) example: {"upload": "2018-12-24T13:09:03Z", "rec": "2018-12-24T13:09:01Z", "offset": 2} """ from collections import OrderedDict from scs_core.data.datetime import LocalizedDatetime from scs_core.data.json import JSONable from scs_core.data.path_dict import PathDict # -------------------------------------------------------------------------------------------------------------------- class UploadInterval(JSONable): """ classdocs """ UPLOAD_FIELD = 'upload' REC_FIELD = 'payload.rec' INCLUDE_MILLIS = False # ---------------------------------------------------------------------------------------------------------------- @classmethod def construct_from_jstr(cls, jstr): if not jstr: return None # document... document = PathDict.construct_from_jstr(jstr) if not document: return None # upload... upload_node = document.node(cls.UPLOAD_FIELD) upload = LocalizedDatetime.construct_from_iso8601(upload_node) if upload is None: raise ValueError(upload_node) # rec... rec_node = document.node(cls.REC_FIELD) rec = LocalizedDatetime.construct_from_iso8601(rec_node) if rec is None: raise ValueError(rec_node) # offset... offset = upload - rec return UploadInterval(upload, rec, offset) # ---------------------------------------------------------------------------------------------------------------- def __init__(self, upload, rec, offset): """ Constructor """ self.__upload = upload # LocalizedDatetime self.__rec = rec # LocalizedDatetime self.__offset = offset # Timedelta # ---------------------------------------------------------------------------------------------------------------- def as_json(self): jdict = OrderedDict() jdict['upload'] = self.upload.as_iso8601(include_millis=self.INCLUDE_MILLIS) jdict['rec'] = self.rec.as_iso8601(include_millis=self.INCLUDE_MILLIS) jdict['offset'] = self.offset return jdict # ---------------------------------------------------------------------------------------------------------------- @property def upload(self): return self.__upload @property def rec(self): return self.__rec @property def offset(self): return self.__offset # ---------------------------------------------------------------------------------------------------------------- def __str__(self, *args, **kwargs): return "UploadInterval:{upload:%s, rec:%s, offset:%s}" % (self.upload, self.rec, self.offset)
{ "content_hash": "5e2982a9593c18bb7f41ce209797b0ae", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 118, "avg_line_length": 26.703703703703702, "alnum_prop": 0.44140083217753123, "repo_name": "south-coast-science/scs_core", "id": "84f7cd0aee09b93216afc74cca56ca38e16d3625", "size": "2884", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "src/scs_core/aws/data/upload_interval.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1461551" } ], "symlink_target": "" }
"""Script to parse Windows Jump List files: * .automaticDestinations-ms * .customDestinations-ms """ import argparse import logging import sys import pyolecf from dtformats import jump_list from dtformats import output_writers def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Extracts information from Windows Jump List files.')) argument_parser.add_argument( '-d', '--debug', dest='debug', action='store_true', default=False, help='enable debug output.') argument_parser.add_argument( 'source', nargs='?', action='store', metavar='PATH', default=None, help='path of the Windows Jump List file.') options = argument_parser.parse_args() if not options.source: print('Source file missing.') print('') argument_parser.print_help() print('') return False logging.basicConfig( level=logging.INFO, format='[%(levelname)s] %(message)s') output_writer = output_writers.StdoutWriter() try: output_writer.Open() except IOError as exception: print(f'Unable to open output writer with error: {exception!s}') print('') return False if pyolecf.check_file_signature(options.source): jump_list_file = jump_list.AutomaticDestinationsFile( debug=options.debug, output_writer=output_writer) else: jump_list_file = jump_list.CustomDestinationsFile( debug=options.debug, output_writer=output_writer) jump_list_file.Open(options.source) print('Windows Jump List information:') number_of_entries = len(jump_list_file.entries) print(f'Number of entries:\t\t{number_of_entries:d}') number_of_entries = len(jump_list_file.recovered_entries) print(f'Number of recovered entries:\t{number_of_entries:d}') print('') for lnk_file_entry in jump_list_file.entries: print(f'LNK file entry: {lnk_file_entry.identifier:s}') for shell_item in lnk_file_entry.GetShellItems(): print(f'Shell item: 0x{shell_item.class_type:02x}') print('') jump_list_file.Close() output_writer.Close() return True if __name__ == '__main__': if not Main(): sys.exit(1) else: sys.exit(0)
{ "content_hash": "5b72072093924b072a1cda14e19e3c6a", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 72, "avg_line_length": 24.369565217391305, "alnum_prop": 0.6855486173059768, "repo_name": "libyal/dtformats", "id": "729d188b1bce97330bfd813c317e06e11bfb7df0", "size": "2288", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "scripts/jump_list.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "122" }, { "name": "PowerShell", "bytes": "827" }, { "name": "Python", "bytes": "700241" }, { "name": "Shell", "bytes": "1139" } ], "symlink_target": "" }
import sys import os from distutils.sysconfig import get_python_lib from version import VERSION # Development Status Trove Classifiers significant for Connector/Python DEVELOPMENT_STATUSES = { 'a': '3 - Alpha', 'b': '4 - Beta', None: '5 - Production/Stable' } if sys.version_info >= (3, 1): sys.path = ['python3/'] + sys.path package_dir = { '': 'python3' } elif sys.version_info >= (2, 4) and sys.version_info < (3, 0): sys.path = ['python2/'] + sys.path package_dir = { '': 'python2' } else: raise RuntimeError( "Python v%d.%d is not supported" % sys.version_info[0:2]) name = 'mysql-connector-python' version = '.'.join(map(str, VERSION[0:3])) if VERSION[3] and VERSION[4]: version += VERSION[3] + str(VERSION[4]) try: from support.distribution.commands import sdist, bdist, dist_rpm, build cmdclasses = { 'build': build.Build, 'sdist': sdist.GenericSourceGPL, 'sdist_gpl': sdist.SourceGPL, 'bdist_com': bdist.BuiltCommercial, 'bdist_com_rpm': dist_rpm.BuiltCommercialRPM, 'sdist_gpl_rpm': dist_rpm.SDistGPLRPM, } if sys.version_info >= (2, 7): # MSI only supported for Python 2.7 and greater from support.distribution.commands import (dist_msi) cmdclasses.update({ 'bdist_com': bdist.BuiltCommercial, 'bdist_com_msi': dist_msi.BuiltCommercialMSI, 'sdist_gpl_msi': dist_msi.SourceMSI, }) except ImportError: # Part of Source Distribution cmdclasses = {} packages = [ 'mysql', 'mysql.connector', 'mysql.connector.locales', 'mysql.connector.locales.eng', ] description = "MySQL driver written in Python" long_description = """\ MySQL driver written in Python which does not depend on MySQL C client libraries and implements the DB API v2.0 specification (PEP-249). """ author = 'Oracle and/or its affiliates' author_email = '' maintainer = 'Geert Vanderkelen' maintainer_email = 'geert.vanderkelen@oracle.com' license = "GNU GPLv2 (with FOSS License Exception)" keywords = "mysql db", url = 'http://dev.mysql.com/usingmysql/python/' download_url = 'http://dev.mysql.com/usingmysql/python/' url = 'http://dev.mysql.com/doc/connector-python/en/index.html' download_url = 'http://dev.mysql.com/downloads/connector/python/' classifiers = [ 'Development Status :: %s' % (DEVELOPMENT_STATUSES[VERSION[3]]), 'Environment :: Other Environment', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Topic :: Database', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules' ]
{ "content_hash": "a3f54557781495b14936b920b9a32641", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 75, "avg_line_length": 34.583333333333336, "alnum_prop": 0.6536144578313253, "repo_name": "rcosnita/fantastico", "id": "8017ec8cbfb23d7c195190ed4fc057e884a0858c", "size": "4452", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "virtual_env/libs/mysql-connector/metasetupinfo.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "6802" }, { "name": "Python", "bytes": "2168052" }, { "name": "Shell", "bytes": "13309" } ], "symlink_target": "" }
"""Implements ThreadPoolExecutor.""" __author__ = 'Brian Quinlan (brian@sweetapp.com)' import atexit from concurrent.futures import _base import itertools import queue import threading import weakref import os # Workers are created as daemon threads. This is done to allow the interpreter # to exit when there are still idle threads in a ThreadPoolExecutor's thread # pool (i.e. shutdown() was not called). However, allowing workers to die with # the interpreter has two undesirable properties: # - The workers would still be running during interpreter shutdown, # meaning that they would fail in unpredictable ways. # - The workers could be killed while evaluating a work item, which could # be bad if the callable being evaluated has external side-effects e.g. # writing to a file. # # To work around this problem, an exit handler is installed which tells the # workers to exit when their work queues are empty and then waits until the # threads finish. _threads_queues = weakref.WeakKeyDictionary() _shutdown = False def _python_exit(): global _shutdown _shutdown = True items = list(_threads_queues.items()) for t, q in items: q.put(None) for t, q in items: t.join() atexit.register(_python_exit) class _WorkItem(object): def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn self.args = args self.kwargs = kwargs def run(self): if not self.future.set_running_or_notify_cancel(): return try: result = self.fn(*self.args, **self.kwargs) except BaseException as exc: self.future.set_exception(exc) # Break a reference cycle with the exception 'exc' self = None else: self.future.set_result(result) def _worker(executor_reference, work_queue, initializer, initargs): if initializer is not None: try: initializer(*initargs) except BaseException: _base.LOGGER.critical('Exception in initializer:', exc_info=True) executor = executor_reference() if executor is not None: executor._initializer_failed() return try: while True: work_item = work_queue.get(block=True) if work_item is not None: work_item.run() # Delete references to object. See issue16284 del work_item continue executor = executor_reference() # Exit if: # - The interpreter is shutting down OR # - The executor that owns the worker has been collected OR # - The executor that owns the worker has been shutdown. if _shutdown or executor is None or executor._shutdown: # Flag the executor as shutting down as early as possible if it # is not gc-ed yet. if executor is not None: executor._shutdown = True # Notice other workers work_queue.put(None) return del executor except BaseException: _base.LOGGER.critical('Exception in worker', exc_info=True) class BrokenThreadPool(_base.BrokenExecutor): """ Raised when a worker thread in a ThreadPoolExecutor failed initializing. """ class ThreadPoolExecutor(_base.Executor): # Used to assign unique thread names when thread_name_prefix is not supplied. _counter = itertools.count().__next__ def __init__(self, max_workers=None, thread_name_prefix='', initializer=None, initargs=()): """Initializes a new ThreadPoolExecutor instance. Args: max_workers: The maximum number of threads that can be used to execute the given calls. thread_name_prefix: An optional name prefix to give our threads. initializer: An callable used to initialize worker threads. initargs: A tuple of arguments to pass to the initializer. """ if max_workers is None: # Use this number because ThreadPoolExecutor is often # used to overlap I/O instead of CPU work. max_workers = (os.cpu_count() or 1) * 5 if max_workers <= 0: raise ValueError("max_workers must be greater than 0") if initializer is not None and not callable(initializer): raise TypeError("initializer must be a callable") self._max_workers = max_workers self._work_queue = queue.SimpleQueue() self._threads = set() self._broken = False self._shutdown = False self._shutdown_lock = threading.Lock() self._thread_name_prefix = (thread_name_prefix or ("ThreadPoolExecutor-%d" % self._counter())) self._initializer = initializer self._initargs = initargs def submit(self, fn, *args, **kwargs): with self._shutdown_lock: if self._broken: raise BrokenThreadPool(self._broken) if self._shutdown: raise RuntimeError('cannot schedule new futures after shutdown') if _shutdown: raise RuntimeError('cannot schedule new futures after' 'interpreter shutdown') f = _base.Future() w = _WorkItem(f, fn, args, kwargs) self._work_queue.put(w) self._adjust_thread_count() return f submit.__doc__ = _base.Executor.submit.__doc__ def _adjust_thread_count(self): # When the executor gets lost, the weakref callback will wake up # the worker threads. def weakref_cb(_, q=self._work_queue): q.put(None) # TODO(bquinlan): Should avoid creating new threads if there are more # idle threads than items in the work queue. num_threads = len(self._threads) if num_threads < self._max_workers: thread_name = '%s_%d' % (self._thread_name_prefix or self, num_threads) t = threading.Thread(name=thread_name, target=_worker, args=(weakref.ref(self, weakref_cb), self._work_queue, self._initializer, self._initargs)) t.daemon = True t.start() self._threads.add(t) _threads_queues[t] = self._work_queue def _initializer_failed(self): with self._shutdown_lock: self._broken = ('A thread initializer failed, the thread pool ' 'is not usable anymore') # Drain work queue and mark pending futures failed while True: try: work_item = self._work_queue.get_nowait() except queue.Empty: break if work_item is not None: work_item.future.set_exception(BrokenThreadPool(self._broken)) def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True self._work_queue.put(None) if wait: for t in self._threads: t.join() shutdown.__doc__ = _base.Executor.shutdown.__doc__
{ "content_hash": "dbb26575c21f5b23c7fa9930db742d7d", "timestamp": "", "source": "github", "line_count": 202, "max_line_length": 82, "avg_line_length": 36.881188118811885, "alnum_prop": 0.5793288590604027, "repo_name": "Microsoft/PTVS", "id": "b65dee11f727279df53d4723eb3be40976534c5c", "size": "7553", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "Python/Product/Miniconda/Miniconda3-x64/Lib/concurrent/futures/thread.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ASP", "bytes": "109" }, { "name": "Batchfile", "bytes": "10898" }, { "name": "C", "bytes": "23236" }, { "name": "C#", "bytes": "12235396" }, { "name": "C++", "bytes": "212001" }, { "name": "CSS", "bytes": "7025" }, { "name": "HTML", "bytes": "34251" }, { "name": "JavaScript", "bytes": "87257" }, { "name": "PowerShell", "bytes": "44322" }, { "name": "Python", "bytes": "847130" }, { "name": "Rich Text Format", "bytes": "260880" }, { "name": "Smarty", "bytes": "8156" }, { "name": "Tcl", "bytes": "24968" } ], "symlink_target": "" }
from django.contrib.gis.geos import Point, MultiPolygon, Polygon from django.test import TestCase from addressbase.models import UprnToCouncil, Address from addressbase.tests.factories import UprnToCouncilFactory from councils.tests.factories import CouncilFactory from data_importers.management.commands.misc_fixes import ( update_station_point, update_station_address, assign_addresses_by_district, unassign_addresses_by_district, unassign_uprns, remove_points_from_addressbase, delete_council_data, ) from pollingstations.models import PollingStation, PollingDistrict from pollingstations.tests.factories import ( PollingStationFactory, PollingDistrictFactory, ) class XpressDemocracyClubImportTests(TestCase): def setUp(self): CouncilFactory.create(council_id="FOO", identifiers=["E000001"]) self.station_123 = PollingStationFactory.create( council_id="FOO", internal_council_id="123", address="station address", postcode="AA1 1AA", location=Point(-4, 50, srid=4326), ) self.station_456 = PollingStationFactory.create( council_id="FOO", internal_council_id="456", ) self.district_AB = PollingDistrictFactory.create( council_id="FOO", internal_council_id="AB", area=MultiPolygon( Polygon(((-5, 45), (-5, 60), (-1, 60), (-1, 45), (-5, 45))) ), ) UprnToCouncilFactory.create( uprn__uprn="000000000", lad="E000001", polling_station_id="123", uprn__location=Point(-3, 48), ) UprnToCouncilFactory.create( uprn__uprn="000000001", lad="E000001", polling_station_id="123", uprn__location=Point(-6, 48), ) UprnToCouncilFactory.create( uprn__uprn="000000002", lad="E000001", polling_station_id="456", uprn__location=Point(), ) def test_update_station_point(self): self.assertEqual(self.station_123.location.coords, (-4, 50)) update_station_point("FOO", "123", Point(-2, 55, srid=4326)) self.station_123.refresh_from_db() self.assertEqual(self.station_123.location.coords, (-2, 55)) def test_update_station_address(self): update_station_address("FOO", "123", address="new address") self.station_123.refresh_from_db() self.assertEqual(self.station_123.address, "new address") self.assertEqual(self.station_123.postcode, "") def test_update_station_address_and_postcode(self): update_station_address( "FOO", "123", address="another new address", postcode="AA2 2AA" ) self.station_123.refresh_from_db() self.assertEqual(self.station_123.address, "another new address") self.assertEqual(self.station_123.postcode, "AA2 2AA") def test_update_station_postcode(self): update_station_address("FOO", "123", postcode="AA3 3AA") self.station_123.refresh_from_db() self.assertEqual(self.station_123.address, "station address") self.assertEqual(self.station_123.postcode, "AA3 3AA") def test_assign_addresses_by_district(self): self.assertEqual( set( UprnToCouncil.objects.filter(polling_station_id="123").values_list( "uprn", flat=True ) ), {"000000000", "000000001"}, ) assign_addresses_by_district("FOO", "AB", "456") self.assertEqual( set( UprnToCouncil.objects.filter(polling_station_id="123").values_list( "uprn", flat=True ) ), {"000000001"}, ) self.assertEqual( set( UprnToCouncil.objects.filter(polling_station_id="456").values_list( "uprn", flat=True ) ), {"000000000", "000000002"}, ) def test_unassign_addresses_by_district(self): self.assertEqual( len( UprnToCouncil.objects.filter(polling_station_id="123").values_list( "uprn", flat=True ) ), 2, ) unassign_addresses_by_district("FOO", "AB") self.assertEqual( len( UprnToCouncil.objects.filter(polling_station_id="123").values_list( "uprn", flat=True ) ), 1, ) def test_unassign_uprns(self): self.assertEqual( len( UprnToCouncil.objects.filter(polling_station_id="").values_list( "uprn", flat=True ) ), 0, ) unassign_uprns(["000000000", "000000001", "000000002"]) self.assertEqual( len( UprnToCouncil.objects.filter(polling_station_id="").values_list( "uprn", flat=True ) ), 3, ) def test_remove_points_from_addressbase(self): self.assertTrue(Address.objects.filter(uprn="000000000").exists()) self.assertTrue(UprnToCouncil.objects.filter(uprn="000000000").exists()) remove_points_from_addressbase(["000000000"]) self.assertFalse(Address.objects.filter(uprn="000000000").exists()) self.assertFalse(UprnToCouncil.objects.filter(uprn="000000000").exists()) def test_delete_council_data(self): self.assertTrue(PollingStation.objects.filter(council_id="FOO").exists()) self.assertTrue(PollingDistrict.objects.filter(council_id="FOO").exists()) self.assertTrue( UprnToCouncil.objects.filter(lad="E000001") .exclude(polling_station_id="") .exists() ) delete_council_data("FOO") self.assertFalse(PollingStation.objects.filter(council_id="FOO").exists()) self.assertFalse(PollingDistrict.objects.filter(council_id="FOO").exists()) self.assertFalse( UprnToCouncil.objects.filter(lad="E000001") .exclude(polling_station_id="") .exists() )
{ "content_hash": "53521c346ad7a4de36e6aebd51223450", "timestamp": "", "source": "github", "line_count": 180, "max_line_length": 83, "avg_line_length": 35.24444444444445, "alnum_prop": 0.5744010088272383, "repo_name": "DemocracyClub/UK-Polling-Stations", "id": "39dccd573bce9ffaee6bde0542afb32ec9e56f21", "size": "6344", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "polling_stations/apps/data_importers/tests/test_misc_fixes.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "32" }, { "name": "HTML", "bytes": "85540" }, { "name": "JavaScript", "bytes": "3399" }, { "name": "Procfile", "bytes": "49" }, { "name": "Python", "bytes": "1111337" }, { "name": "SCSS", "bytes": "5742" } ], "symlink_target": "" }
__author__ = 'gzs2478' from PyQt4.QtCore import * from PyQt4.QtGui import * class Service(QObject): def __init__(self,sid,parent = None): QObject.__init__(self,None) self.service_id = sid self.commands = {} self.parent = parent def setParent(self,dispatcher): self.parent = dispatcher def handle(self,msg,owner): commandID = msg['command_id'] if not commandID in self.commands: raise Exception('Wrong Command %s'%commandID) function = self.commands[commandID] return function(msg,owner) def register(self,commandID,function): self.commands[commandID] = function def registers(self,CommandDict): for commandID in CommandDict: self.register(commandID,CommandDict[commandID]) return 0
{ "content_hash": "00f053bcde7586dc3c1a506f2274c303", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 59, "avg_line_length": 30.59259259259259, "alnum_prop": 0.6343825665859564, "repo_name": "kelvict/Online-GoBang-Center", "id": "4ebcacfaee3cdd86cca5260651b7da0baec1ec99", "size": "850", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Client/Service.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3724765" } ], "symlink_target": "" }
''' _ _ _ _ _ | | | | |_(_) |___ | |_| | _| | (_-< \___/ \__|_|_/__/ Behavioral Cloning Project Utilities ''' import os import h5py import numpy as np from scipy.misc import imresize from sklearn.utils import shuffle try: from sklearn.model_selection import train_test_split except ImportError: from sklearn.cross_validation import train_test_split def loadDataset(path): f = h5py.File(path, 'r') dataset = {} for key in f.keys(): dataset[key] = f[key][...] f.close() return dataset def augmentAndSplitData(images, angles, size=0.2): mask = angles != 0 images_new = images[mask].copy() angles_new = angles[mask] * -1. for i in range(len(images_new)): images_new[i, ...] = np.fliplr(images_new[i, ...]) images = np.concatenate((images, images_new)) angles = np.concatenate((angles, angles_new)) X1, X2, y1, y2 = train_test_split(images, angles, test_size=size, random_state=0) return (X1, y1), (X2, y2) def normalize(img): '''Normalize (Inception Style)''' img = img.astype('float32') img -= 128. img /= 128. return img def preprocessRawImage(img): '''Preprocess a raw simulator image''' img = img[60:140, ...] # crop, discard parts of the image that may confuse the model img = imresize(img, (80, 80)) # resize (80x80) return normalize(img) # normalize def batchGenerator(dataset, batch_size=32, training=False): '''batch generator function''' X, y = dataset N = len(X) total_batches = int(np.ceil(float(N) / batch_size)) while True: indices = np.arange(N) # shuffle indices if training is True if training: indices = shuffle(indices) for i in range(total_batches): S = i * batch_size E = min((i + 1) * batch_size, N) # select indices selected_indices = indices[S:E] nb = len(selected_indices) # actual batch X_batch = np.zeros((nb, 80, 80, 3), dtype='float32') y_batch = np.zeros((nb,), dtype='float32') j = 0 for index in selected_indices: img, angle = X[index], y[index] X_batch[j], y_batch[j] = normalize(img), angle j += 1 yield X_batch, y_batch
{ "content_hash": "91556c8e3e8689ae797d749d57feae31", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 89, "avg_line_length": 28.357142857142858, "alnum_prop": 0.5575146935348446, "repo_name": "andrescv/BehavioralCloning", "id": "668fe9255ffefa0f627263ba29acff1fb2f4dcb1", "size": "2382", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "9875" }, { "name": "Shell", "bytes": "417" } ], "symlink_target": "" }
from foo import foo
{ "content_hash": "165ec81ae6e660c99eb1ec4ce8cd01f2", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 19, "avg_line_length": 20, "alnum_prop": 0.8, "repo_name": "bazelbuild/rules_python", "id": "532f11a8893226a9444d4120e64ec327917209d5", "size": "59", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "gazelle/testdata/from_imports/import_top_level_var/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Go", "bytes": "78630" }, { "name": "Python", "bytes": "116552" }, { "name": "Shell", "bytes": "676" }, { "name": "Starlark", "bytes": "196289" } ], "symlink_target": "" }
from .__about__ import __version__ from .mysql import MySql __all__ = ['__version__', 'MySql']
{ "content_hash": "28415761db64b83045068c4c1d5fb23a", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 34, "avg_line_length": 24, "alnum_prop": 0.59375, "repo_name": "DataDog/integrations-core", "id": "375f5393b9a804133e962df48b3c7cf0ed82a6cd", "size": "211", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mysql/datadog_checks/mysql/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "578" }, { "name": "COBOL", "bytes": "12312" }, { "name": "Dockerfile", "bytes": "22998" }, { "name": "Erlang", "bytes": "15518" }, { "name": "Go", "bytes": "6988" }, { "name": "HCL", "bytes": "4080" }, { "name": "HTML", "bytes": "1318" }, { "name": "JavaScript", "bytes": "1817" }, { "name": "Kotlin", "bytes": "430" }, { "name": "Lua", "bytes": "3489" }, { "name": "PHP", "bytes": "20" }, { "name": "PowerShell", "bytes": "2398" }, { "name": "Python", "bytes": "13020828" }, { "name": "Roff", "bytes": "359" }, { "name": "Ruby", "bytes": "241" }, { "name": "Scala", "bytes": "7000" }, { "name": "Shell", "bytes": "83227" }, { "name": "Swift", "bytes": "203" }, { "name": "TSQL", "bytes": "29972" }, { "name": "TypeScript", "bytes": "1019" } ], "symlink_target": "" }
"""Support for EverLights lights.""" import logging from datetime import timedelta from typing import Tuple import voluptuous as vol from homeassistant.const import CONF_HOSTS from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_EFFECT, SUPPORT_BRIGHTNESS, SUPPORT_EFFECT, SUPPORT_COLOR, Light, PLATFORM_SCHEMA, ) import homeassistant.helpers.config_validation as cv import homeassistant.util.color as color_util from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.exceptions import PlatformNotReady _LOGGER = logging.getLogger(__name__) SUPPORT_EVERLIGHTS = SUPPORT_EFFECT | SUPPORT_BRIGHTNESS | SUPPORT_COLOR SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_HOSTS): vol.All(cv.ensure_list, [cv.string])} ) NAME_FORMAT = "EverLights {} Zone {}" def color_rgb_to_int(red: int, green: int, blue: int) -> int: """Return a RGB color as an integer.""" return red * 256 * 256 + green * 256 + blue def color_int_to_rgb(value: int) -> Tuple[int, int, int]: """Return an RGB tuple from an integer.""" return (value >> 16, (value >> 8) & 0xFF, value & 0xFF) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the EverLights lights from configuration.yaml.""" import pyeverlights lights = [] for ipaddr in config[CONF_HOSTS]: api = pyeverlights.EverLights(ipaddr, async_get_clientsession(hass)) try: status = await api.get_status() effects = await api.get_all_patterns() except pyeverlights.ConnectionError: raise PlatformNotReady else: lights.append(EverLightsLight(api, pyeverlights.ZONE_1, status, effects)) lights.append(EverLightsLight(api, pyeverlights.ZONE_2, status, effects)) async_add_entities(lights) class EverLightsLight(Light): """Representation of a Flux light.""" def __init__(self, api, channel, status, effects): """Initialize the light.""" self._api = api self._channel = channel self._status = status self._effects = effects self._mac = status["mac"] self._error_reported = False self._hs_color = [255, 255] self._brightness = 255 self._effect = None self._available = True @property def unique_id(self) -> str: """Return a unique ID.""" return f"{self._mac}-{self._channel}" @property def available(self) -> bool: """Return True if entity is available.""" return self._available @property def name(self): """Return the name of the device.""" return NAME_FORMAT.format(self._mac, self._channel) @property def is_on(self): """Return true if device is on.""" return self._status[f"ch{self._channel}Active"] == 1 @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._brightness @property def hs_color(self): """Return the color property.""" return self._hs_color @property def effect(self): """Return the effect property.""" return self._effect @property def supported_features(self): """Flag supported features.""" return SUPPORT_EVERLIGHTS @property def effect_list(self): """Return the list of supported effects.""" return self._effects async def async_turn_on(self, **kwargs): """Turn the light on.""" hs_color = kwargs.get(ATTR_HS_COLOR, self._hs_color) brightness = kwargs.get(ATTR_BRIGHTNESS, self._brightness) effect = kwargs.get(ATTR_EFFECT) if effect is not None: colors = await self._api.set_pattern_by_id(self._channel, effect) rgb = color_int_to_rgb(colors[0]) hsv = color_util.color_RGB_to_hsv(*rgb) hs_color = hsv[:2] brightness = hsv[2] / 100 * 255 else: rgb = color_util.color_hsv_to_RGB(*hs_color, brightness / 255 * 100) colors = [color_rgb_to_int(*rgb)] await self._api.set_pattern(self._channel, colors) self._hs_color = hs_color self._brightness = brightness self._effect = effect async def async_turn_off(self, **kwargs): """Turn the light off.""" await self._api.clear_pattern(self._channel) async def async_update(self): """Synchronize state with control box.""" import pyeverlights try: self._status = await self._api.get_status() except pyeverlights.ConnectionError: if self._available: _LOGGER.warning("EverLights control box connection lost.") self._available = False else: if not self._available: _LOGGER.warning("EverLights control box connection restored.") self._available = True
{ "content_hash": "57a0eca539214adf17c019b71c6cd744", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 86, "avg_line_length": 29.352601156069365, "alnum_prop": 0.620126033871603, "repo_name": "Cinntax/home-assistant", "id": "506617e4c6028d82b84484acd47c83c180592e63", "size": "5078", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/everlights/light.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "17374056" }, { "name": "Shell", "bytes": "6792" } ], "symlink_target": "" }
"""Tests for slim.nasnet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nets.nasnet import nasnet slim = tf.contrib.slim class NASNetTest(tf.test.TestCase): def testBuildLogitsCifarModel(self): batch_size = 5 height, width = 32, 32 num_classes = 10 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): logits, end_points = nasnet.build_nasnet_cifar(inputs, num_classes) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes]) def testBuildLogitsMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): logits, end_points = nasnet.build_nasnet_mobile(inputs, num_classes) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes]) def testBuildLogitsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_large_arg_scope()): logits, end_points = nasnet.build_nasnet_large(inputs, num_classes) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes]) def testBuildPreLogitsCifarModel(self): batch_size = 5 height, width = 32, 32 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): net, end_points = nasnet.build_nasnet_cifar(inputs, num_classes) self.assertFalse('AuxLogits' in end_points) self.assertFalse('Predictions' in end_points) self.assertTrue(net.op.name.startswith('final_layer/Mean')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 768]) def testBuildPreLogitsMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): net, end_points = nasnet.build_nasnet_mobile(inputs, num_classes) self.assertFalse('AuxLogits' in end_points) self.assertFalse('Predictions' in end_points) self.assertTrue(net.op.name.startswith('final_layer/Mean')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1056]) def testBuildPreLogitsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_large_arg_scope()): net, end_points = nasnet.build_nasnet_large(inputs, num_classes) self.assertFalse('AuxLogits' in end_points) self.assertFalse('Predictions' in end_points) self.assertTrue(net.op.name.startswith('final_layer/Mean')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 4032]) def testAllEndPointsShapesCifarModel(self): batch_size = 5 height, width = 32, 32 num_classes = 10 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): _, end_points = nasnet.build_nasnet_cifar(inputs, num_classes) endpoints_shapes = {'Stem': [batch_size, 32, 32, 96], 'Cell_0': [batch_size, 32, 32, 192], 'Cell_1': [batch_size, 32, 32, 192], 'Cell_2': [batch_size, 32, 32, 192], 'Cell_3': [batch_size, 32, 32, 192], 'Cell_4': [batch_size, 32, 32, 192], 'Cell_5': [batch_size, 32, 32, 192], 'Cell_6': [batch_size, 16, 16, 384], 'Cell_7': [batch_size, 16, 16, 384], 'Cell_8': [batch_size, 16, 16, 384], 'Cell_9': [batch_size, 16, 16, 384], 'Cell_10': [batch_size, 16, 16, 384], 'Cell_11': [batch_size, 16, 16, 384], 'Cell_12': [batch_size, 8, 8, 768], 'Cell_13': [batch_size, 8, 8, 768], 'Cell_14': [batch_size, 8, 8, 768], 'Cell_15': [batch_size, 8, 8, 768], 'Cell_16': [batch_size, 8, 8, 768], 'Cell_17': [batch_size, 8, 8, 768], 'Reduction_Cell_0': [batch_size, 16, 16, 256], 'Reduction_Cell_1': [batch_size, 8, 8, 512], 'global_pool': [batch_size, 768], # Logits and predictions 'AuxLogits': [batch_size, num_classes], 'Logits': [batch_size, num_classes], 'Predictions': [batch_size, num_classes]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: tf.logging.info('Endpoint name: {}'.format(endpoint_name)) expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) def testNoAuxHeadCifarModel(self): batch_size = 5 height, width = 32, 32 num_classes = 10 for use_aux_head in (True, False): tf.reset_default_graph() inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = nasnet.cifar_config() config.set_hparam('use_aux_head', int(use_aux_head)) with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): _, end_points = nasnet.build_nasnet_cifar(inputs, num_classes, config=config) self.assertEqual('AuxLogits' in end_points, use_aux_head) def testAllEndPointsShapesMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): _, end_points = nasnet.build_nasnet_mobile(inputs, num_classes) endpoints_shapes = {'Stem': [batch_size, 28, 28, 88], 'Cell_0': [batch_size, 28, 28, 264], 'Cell_1': [batch_size, 28, 28, 264], 'Cell_2': [batch_size, 28, 28, 264], 'Cell_3': [batch_size, 28, 28, 264], 'Cell_4': [batch_size, 14, 14, 528], 'Cell_5': [batch_size, 14, 14, 528], 'Cell_6': [batch_size, 14, 14, 528], 'Cell_7': [batch_size, 14, 14, 528], 'Cell_8': [batch_size, 7, 7, 1056], 'Cell_9': [batch_size, 7, 7, 1056], 'Cell_10': [batch_size, 7, 7, 1056], 'Cell_11': [batch_size, 7, 7, 1056], 'Reduction_Cell_0': [batch_size, 14, 14, 352], 'Reduction_Cell_1': [batch_size, 7, 7, 704], 'global_pool': [batch_size, 1056], # Logits and predictions 'AuxLogits': [batch_size, num_classes], 'Logits': [batch_size, num_classes], 'Predictions': [batch_size, num_classes]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: tf.logging.info('Endpoint name: {}'.format(endpoint_name)) expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) def testNoAuxHeadMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 for use_aux_head in (True, False): tf.reset_default_graph() inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = nasnet.mobile_imagenet_config() config.set_hparam('use_aux_head', int(use_aux_head)) with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): _, end_points = nasnet.build_nasnet_mobile(inputs, num_classes, config=config) self.assertEqual('AuxLogits' in end_points, use_aux_head) def testAllEndPointsShapesLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_large_arg_scope()): _, end_points = nasnet.build_nasnet_large(inputs, num_classes) endpoints_shapes = {'Stem': [batch_size, 42, 42, 336], 'Cell_0': [batch_size, 42, 42, 1008], 'Cell_1': [batch_size, 42, 42, 1008], 'Cell_2': [batch_size, 42, 42, 1008], 'Cell_3': [batch_size, 42, 42, 1008], 'Cell_4': [batch_size, 42, 42, 1008], 'Cell_5': [batch_size, 42, 42, 1008], 'Cell_6': [batch_size, 21, 21, 2016], 'Cell_7': [batch_size, 21, 21, 2016], 'Cell_8': [batch_size, 21, 21, 2016], 'Cell_9': [batch_size, 21, 21, 2016], 'Cell_10': [batch_size, 21, 21, 2016], 'Cell_11': [batch_size, 21, 21, 2016], 'Cell_12': [batch_size, 11, 11, 4032], 'Cell_13': [batch_size, 11, 11, 4032], 'Cell_14': [batch_size, 11, 11, 4032], 'Cell_15': [batch_size, 11, 11, 4032], 'Cell_16': [batch_size, 11, 11, 4032], 'Cell_17': [batch_size, 11, 11, 4032], 'Reduction_Cell_0': [batch_size, 21, 21, 1344], 'Reduction_Cell_1': [batch_size, 11, 11, 2688], 'global_pool': [batch_size, 4032], # Logits and predictions 'AuxLogits': [batch_size, num_classes], 'Logits': [batch_size, num_classes], 'Predictions': [batch_size, num_classes]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: tf.logging.info('Endpoint name: {}'.format(endpoint_name)) expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) def testNoAuxHeadLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 for use_aux_head in (True, False): tf.reset_default_graph() inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = nasnet.large_imagenet_config() config.set_hparam('use_aux_head', int(use_aux_head)) with slim.arg_scope(nasnet.nasnet_large_arg_scope()): _, end_points = nasnet.build_nasnet_large(inputs, num_classes, config=config) self.assertEqual('AuxLogits' in end_points, use_aux_head) def testVariablesSetDeviceMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() # Force all Variables to reside on the device. with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): nasnet.build_nasnet_mobile(inputs, num_classes) with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): nasnet.build_nasnet_mobile(inputs, num_classes) for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): self.assertDeviceEqual(v.device, '/cpu:0') for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): self.assertDeviceEqual(v.device, '/gpu:0') def testUnknownBatchSizeMobileModel(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 with self.test_session() as sess: inputs = tf.placeholder(tf.float32, (None, height, width, 3)) with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): logits, _ = nasnet.build_nasnet_mobile(inputs, num_classes) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) def testEvaluationMobileModel(self): batch_size = 2 height, width = 224, 224 num_classes = 1000 with self.test_session() as sess: eval_inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): logits, _ = nasnet.build_nasnet_mobile(eval_inputs, num_classes, is_training=False) predictions = tf.argmax(logits, 1) sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) def testOverrideHParamsCifarModel(self): batch_size = 5 height, width = 32, 32 num_classes = 10 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = nasnet.cifar_config() config.set_hparam('data_format', 'NCHW') with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): _, end_points = nasnet.build_nasnet_cifar( inputs, num_classes, config=config) self.assertListEqual( end_points['Stem'].shape.as_list(), [batch_size, 96, 32, 32]) def testOverrideHParamsMobileModel(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = nasnet.mobile_imagenet_config() config.set_hparam('data_format', 'NCHW') with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): _, end_points = nasnet.build_nasnet_mobile( inputs, num_classes, config=config) self.assertListEqual( end_points['Stem'].shape.as_list(), [batch_size, 88, 28, 28]) def testOverrideHParamsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = nasnet.large_imagenet_config() config.set_hparam('data_format', 'NCHW') with slim.arg_scope(nasnet.nasnet_large_arg_scope()): _, end_points = nasnet.build_nasnet_large( inputs, num_classes, config=config) self.assertListEqual( end_points['Stem'].shape.as_list(), [batch_size, 336, 42, 42]) def testCurrentStepCifarModel(self): batch_size = 5 height, width = 32, 32 num_classes = 10 inputs = tf.random_uniform((batch_size, height, width, 3)) global_step = tf.train.create_global_step() with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): logits, end_points = nasnet.build_nasnet_cifar(inputs, num_classes, current_step=global_step) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes]) if __name__ == '__main__': tf.test.main()
{ "content_hash": "61501cf66e2da9a0db3f943dac97471a", "timestamp": "", "source": "github", "line_count": 380, "max_line_length": 78, "avg_line_length": 46.41842105263158, "alnum_prop": 0.5770735302454788, "repo_name": "cshallue/models", "id": "252618120a02e8ef286c38aa1edd9a9de62cb67f", "size": "18324", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "research/slim/nets/nasnet/nasnet_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "1523636" }, { "name": "Dockerfile", "bytes": "9821" }, { "name": "GLSL", "bytes": "976" }, { "name": "HTML", "bytes": "147010" }, { "name": "JavaScript", "bytes": "33208" }, { "name": "Jupyter Notebook", "bytes": "2829707" }, { "name": "Makefile", "bytes": "4933" }, { "name": "Python", "bytes": "13149300" }, { "name": "Shell", "bytes": "146035" } ], "symlink_target": "" }
import os from setuptools import find_packages, setup with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme: README = readme.read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='voltaire.website', version='0.0.0+git', packages=find_packages(exclude=['ez_setup']), namespace_packages=['voltaire'], include_package_data=True, license='MIT', description='website', long_description=README, url='https://github.com/voltaire/website', author='Jon Chen', author_email='bsd@voltaire.sh', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], install_requires=[ 'alembic==0.7.6', 'Flask==0.10.1', 'Flask-Migrate==1.4.0', 'Flask-OpenID==1.2.4', 'Flask-Script==2.0.5', 'Flask-SQLAlchemy==2.0', 'itsdangerous==0.24', 'Jinja2==2.7.3', 'Mako==1.0.1', 'MarkupSafe==0.23', 'SQLAlchemy==1.0.4', 'Werkzeug==0.10.4', 'podhub.meh==0.1.12', 'psycopg2==2.6', ], scripts=[ 'scripts/voltairemc_site', ], )
{ "content_hash": "da95c0872852bd8939040ce38955a4e9", "timestamp": "", "source": "github", "line_count": 51, "max_line_length": 78, "avg_line_length": 29.862745098039216, "alnum_prop": 0.5712409717662508, "repo_name": "voltaire/website", "id": "2e6b88368f3fae3a717e49394f7113211f7bc956", "size": "1546", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33261", "license": "mit", "language": [ { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "14581" } ], "symlink_target": "" }
"""Tests for prog_sequential.""" import random as py_rand import time from absl import logging from jax import random import jax.numpy as jnp from absl.testing import absltest as test from abstract_nas.abstract import depth from abstract_nas.abstract import linear from abstract_nas.abstract import shape from abstract_nas.model import Model from abstract_nas.model import subgraph from abstract_nas.model.subgraph import SubgraphModel from abstract_nas.synthesis.prog_sequential import ProgressiveSequentialSynthesizer from abstract_nas.zoo import cnn class ProgSequentialTest(test.TestCase): def setUp(self): super().setUp() seed = int(time.time()) logging.info("Seed: %d", seed) py_rand.seed(seed) self.graph, self.constants, _ = cnn.CifarNet() self.m = Model(self.graph, self.constants) self.input = {"input": jnp.ones((5, 32, 32, 3))} self.state = self.m.init(random.PRNGKey(0), self.input) self.out = self.m.apply(self.state, self.input)["fc/logits"] self.max_size = int(10e8) self.hard = False def _synthesize(self, subg, props): synthesizer = ProgressiveSequentialSynthesizer( [(subg, props)], generation=0, mode=ProgressiveSequentialSynthesizer.Mode.WEIGHTED, max_len=3) subg = synthesizer.synthesize()[0] subg_spec = subg.subgraph for node in subg_spec: print(node.op.name) print(node.output_names) m = Model(subg.graph, self.constants) state = m.init(random.PRNGKey(0), self.input) out = m.apply(state, self.input)["fc/logits"] self.assertTrue((out != self.out).any()) def test_synthesizer_easy_one(self): """Replacing [conv3x3(features = 64)].""" subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:5]] subg[-1].output_names = self.graph.ops[5].input_names subgraph_model = SubgraphModel( self.graph, self.constants, self.state, self.input, subg) sp = shape.ShapeProperty().infer(subgraph_model, max_size=self.max_size) dp = depth.DepthProperty().infer(subgraph_model) # lp = linear.LinopProperty().infer(subgraph) self._synthesize(subgraph_model, [sp, dp]) def test_synthesizer_easy_two(self): """Replacing [conv3x3(features = 64)].""" subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:5]] subg[-1].output_names = self.graph.ops[5].input_names subgraph_model = SubgraphModel( self.graph, self.constants, self.state, self.input, subg) sp = shape.ShapeProperty().infer(subgraph_model, max_size=self.max_size) dp = depth.DepthProperty().infer(subgraph_model) lp = linear.LinopProperty().infer(subgraph_model) self._synthesize(subgraph_model, [sp, dp, lp]) def test_synthesizer_one(self): """Replacing [conv3x3(features = 64), ReLU].""" subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:6]] subg[-1].output_names = self.graph.ops[6].input_names subgraph_model = SubgraphModel( self.graph, self.constants, self.state, self.input, subg) sp = shape.ShapeProperty().infer(subgraph_model, max_size=self.max_size) dp = depth.DepthProperty().infer(subgraph_model) # lp = linear.LinopProperty().infer(subgraph_model) self._synthesize(subgraph_model, [sp, dp]) def test_synthesizer_two(self): """Replacing [conv3x3(features = 64), ReLU, avgpool2x2(strides=2x2)].""" subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:7]] subg[-1].output_names = self.graph.ops[7].input_names subgraph_model = SubgraphModel( self.graph, self.constants, self.state, self.input, subg) sp = shape.ShapeProperty().infer(subgraph_model, max_size=self.max_size) # dp = depth.DepthProperty().infer(subgraph_model) lp = linear.LinopProperty().infer(subgraph_model) self._synthesize(subgraph_model, [sp, lp]) def test_synthesizer_hard(self): if not self.hard: return subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:7]] subg[-1].output_names = self.graph.ops[7].input_names subgraph_model = SubgraphModel( self.graph, self.constants, self.state, self.input, subg) sp = shape.ShapeProperty().infer(subgraph_model, max_size=self.max_size) dp = depth.DepthProperty().infer(subgraph_model) lp = linear.LinopProperty().infer(subgraph_model) self._synthesize(subgraph_model, [sp, dp, lp]) if __name__ == "__main__": test.main()
{ "content_hash": "58fe53c81d4bd4394fbdc793c3faaeea", "timestamp": "", "source": "github", "line_count": 115, "max_line_length": 83, "avg_line_length": 38.321739130434786, "alnum_prop": 0.6898116632629907, "repo_name": "google-research/google-research", "id": "f88bf05b15f00b85356ce289436e370818cacbdf", "size": "5015", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "abstract_nas/synthesis/prog_sequential_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "9817" }, { "name": "C++", "bytes": "4166670" }, { "name": "CMake", "bytes": "6412" }, { "name": "CSS", "bytes": "27092" }, { "name": "Cuda", "bytes": "1431" }, { "name": "Dockerfile", "bytes": "7145" }, { "name": "Gnuplot", "bytes": "11125" }, { "name": "HTML", "bytes": "77599" }, { "name": "ImageJ Macro", "bytes": "50488" }, { "name": "Java", "bytes": "487585" }, { "name": "JavaScript", "bytes": "896512" }, { "name": "Julia", "bytes": "67986" }, { "name": "Jupyter Notebook", "bytes": "71290299" }, { "name": "Lua", "bytes": "29905" }, { "name": "MATLAB", "bytes": "103813" }, { "name": "Makefile", "bytes": "5636" }, { "name": "NASL", "bytes": "63883" }, { "name": "Perl", "bytes": "8590" }, { "name": "Python", "bytes": "53790200" }, { "name": "R", "bytes": "101058" }, { "name": "Roff", "bytes": "1208" }, { "name": "Rust", "bytes": "2389" }, { "name": "Shell", "bytes": "730444" }, { "name": "Smarty", "bytes": "5966" }, { "name": "Starlark", "bytes": "245038" } ], "symlink_target": "" }
import csv import os import glob from os import listdir from os.path import isfile, join, split import json import sys import io #Set filepath variables filePath_1 = str(sys.argv[1]) filePath_2 = str(sys.argv[2]) videoDict = {} #Open RS metadata CSV and put the data in the dictionary to be held in memory with open(filePath_1, 'rU') as f: videoData = csv.reader(f, dialect='excel', delimiter=',') for row in videoData: source_id = row[0] original_format = row[1] date = row[2] date_freetext = row[3] opas_id = row[4] series = row[5] venue = row[6] description = row[7] archives_collection = row[8] notes_containerinfo = row[9] generation = row[10] recording_standard = row[11] stereo_mono = row[12] preservation_filename = row[13] preservation_cksum = row[14] videoDict[str(preservation_filename)] = {} videoDict[str(preservation_filename)]['Source ID'] = source_id videoDict[str(preservation_filename)]['Original Format'] = original_format videoDict[str(preservation_filename)]['Date'] = date videoDict[str(preservation_filename)]['Date (FreeText)'] = date_freetext videoDict[str(preservation_filename)]['OPAS EVENT ID'] = opas_id videoDict[str(preservation_filename)]['Series'] = series videoDict[str(preservation_filename)]['Venue'] = venue videoDict[str(preservation_filename)]['Description'] = description videoDict[str(preservation_filename)]['Carnegie Hall Archives Collection'] = archives_collection videoDict[str(preservation_filename)]['Notes/Container Annotation'] = notes_containerinfo videoDict[str(preservation_filename)]['Generation'] = generation videoDict[str(preservation_filename)]['Recording Standard'] = recording_standard videoDict[str(preservation_filename)]['Stereo/Mono'] = stereo_mono videoDict[str(preservation_filename)]['Preservation Master Filename'] = preservation_filename videoDict[str(preservation_filename)]['pres master checksum value'] = preservation_cksum # print(json.dumps(videoDict, indent=4)) # sys.argv is a list of all the filepaths we provided as arguments. "For everything this list starting at position 2, do xyz" for hddFilePath in sys.argv[2:]: #Clear centralDict for each directory centralDict = {} #Set variable for volume # per directory volume = os.path.split(hddFilePath)[1] # directory = os.path.split(hddFilePath)[0] # Extract filenames from the full file path of each file onlyfiles = [f for f in listdir(str(hddFilePath)) if isfile(join(str(hddFilePath),f))] for i in range(len(onlyfiles)): filename = onlyfiles[i] # print(filename) # Test each file to make sure it's a preservation master; pass over the 'Thumbs.db' file stored with each volume if filename.endswith("_pm.mov"): # make videoDict into centralDict centralDict[str(filename)] = {} centralDict[str(filename)] = videoDict[str(filename)] # This writes the nested dictionary to a CSV file csv_path = "".join([hddFilePath, '/', 'Central_RobertShaw_', volume, '.csv']) fields = ['Source ID', 'Original Format', 'Date', 'Date (FreeText)', 'OPAS EVENT ID', 'Series', 'Venue', 'Description', 'Carnegie Hall Archives Collection', 'Notes/Container Annotation', 'Generation', 'Recording Standard', 'Stereo/Mono', 'Preservation Master Filename', 'pres master checksum value'] with open(csv_path, 'w', newline='') as csvfile: w = csv.DictWriter(csvfile, fields) w.writeheader() for k in centralDict: w.writerow({field: centralDict[k].get(field) for field in fields}) print("Central CSVs created for ", volume)
{ "content_hash": "dc09448beeba16c28563f77b725cde3b", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 302, "avg_line_length": 42.01149425287356, "alnum_prop": 0.7056087551299589, "repo_name": "CarnegieHall/metadata-matching", "id": "092bdca2110ae90b44cf5c77f4e8af36df37951e", "size": "4100", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "idMatching_video.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "35192" } ], "symlink_target": "" }
import tensorflow as tf import pandas import librosa import numpy as np import os import logging import subprocess from collections import Counter OS_USER = subprocess.check_output(["whoami"], universal_newlines=True).splitlines()[0] def load_graph(frozen_graph_filename): """ Load graph/model to be used """ logging.info('Loading frozen model-graph: ' + frozen_graph_filename) # We load the protobuf file from the disk and parse it to retrieve the # unserialized graph_def logging.debug('Reading model file') with tf.gfile.GFile(frozen_graph_filename, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # Then, we can use again a convenient built-in function to import a graph_def into the # current default Graph logging.debug('Importing graph') with tf.Graph().as_default() as graph: tf.import_graph_def( graph_def, input_map=None, return_elements=None, name="prefix", op_dict=None, producer_op_list=None ) return graph def load_audio(audio_path): """ Take the input audio file and assemble it to be handled by the CNN """ logging.info('Loading audio file: ' + audio_path) dataset = pandas.DataFrame(columns=['data', 'mels', 'mfcc']) y, sr = librosa.core.load(audio_path, sr=48000, mono=True) mel = librosa.feature.melspectrogram(y=y, sr=sr) mfcc = librosa.feature.mfcc(y=y, sr=sr) mapping = { 'data': [y], 'mels': [mel], 'mfcc': [mfcc] } return dataset.append(pandas.DataFrame(mapping), ignore_index=True) def predict(audio_path, graph, mapping): """ Predict the class of the given audio file according the provided model :param mapping: dictionary mapping the numeric output of the network to a label :param audio_path: path to audio file :param graph: already loaded tensor graph """ logging.info('Prediction START') # Loading audio ds = load_audio(audio_path) logging.info('Audio and model loading DONE') ### Tensorflow # Prepare CNN input audio_feature = np.asanyarray(list(ds.mels[0].flatten()), dtype=np.float32) x = graph.get_tensor_by_name('prefix/input:0') y = graph.get_tensor_by_name('prefix/softmax_tensor:0') # prediction with tf.Session(graph=graph) as sess: y_out = sess.run(y, feed_dict={ x: [audio_feature] # < 45 }) logging.info('predictions:' + str(y_out)) return mapping[y_out[0].argmax()], Counter({mapping[i]: y_out[0][i] for i in range(len(mapping))}) @DeprecationWarning def hierarchical_predict(audio_path): """ Predict the class of the given audio file according the whole hierarchical model """ logging.info('Hierarchical prediction START') model_door_not_door = load_graph(os.path.join('Classifier', 'model', 'door_not_door', 'frozen', 'frozen_model.pb')) model_person_not_person = load_graph( os.path.join('Classifier', 'model', 'person_not_person', 'frozen', 'frozen_model.pb')) model_only_people = load_graph(os.path.join('Classifier', 'model', 'only_people', 'frozen', 'frozen_model.pb')) mapping_door_not_door = { 0: 'nobody', 1: 'door', } mapping_person_not_person = { 2: 'person', 0: 'exit', 1: 'bell', } mapping_only_people = { 0: 'alessio', 1: 'andrea', 2: 'debora', 3: 'mamma', 4: 'papa', } if predict(audio_path, model_door_not_door, mapping_door_not_door) == 'nobody': return 'nobody' else: intermediate_prediction = predict(audio_path, model_person_not_person, mapping_person_not_person) if intermediate_prediction in ['bell', 'exit']: return intermediate_prediction else: return predict(audio_path, model_only_people, mapping_only_people)
{ "content_hash": "83c9b73c545a0a4497aeac4e2b39cc37", "timestamp": "", "source": "github", "line_count": 116, "max_line_length": 119, "avg_line_length": 33.956896551724135, "alnum_prop": 0.6339172378776339, "repo_name": "Oneiroe/SmartSlam", "id": "f002ada2970b7de4b3a5a5b28fddda24f362ac35", "size": "3939", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Classifier/classifier.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "5877" }, { "name": "Python", "bytes": "49253" }, { "name": "Shell", "bytes": "5274" } ], "symlink_target": "" }
from django.contrib import admin from .models import BadgeAward @admin.register(BadgeAward) class BadgeAwardAdmin(admin.ModelAdmin): THUMB_SIZE = 40 list_display = ('user', 'slug', 'name', 'level', 'points', 'awarded_on') raw_id_fields = ('user', ) list_filter = ('slug', 'level', 'points', ) search_fields = ('slug', 'level')
{ "content_hash": "34fb8004e5f560341490308b97651413", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 76, "avg_line_length": 27, "alnum_prop": 0.6467236467236467, "repo_name": "fgmacedo/django-awards", "id": "a300fdb4a120090de268cfc886c93d0bab78cffa", "size": "351", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "awards/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1561" }, { "name": "Python", "bytes": "17577" } ], "symlink_target": "" }
""" Cache directory auto-detection for Windows OS. """ import os from emdu.cache_detector.base_loader import BaseCacheLoader class WindowsCacheDetector(BaseCacheLoader): """ Cache detection for Windows. """ def _get_eve_dirs(self): """ Platform-specific searching for EVE installation directories. :rtype: generator :returns: A generator of path strings to EVE installations. """ # The install locations are pretty consistent with Windows, since # there's an official installer.fd home_dir = os.path.expanduser('~/') default_path = os.path.join( home_dir, "AppData/Local/CCP/EVE/" ) if os.path.exists(default_path): return [default_path] else: return []
{ "content_hash": "79f9de28208f905d27583a1ef5e37e9e", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 73, "avg_line_length": 24.87878787878788, "alnum_prop": 0.6065773447015834, "repo_name": "gtaylor/EVE-Market-Data-Uploader", "id": "2f2aa25dd1217f9c44f8c7cf159edc3803de086a", "size": "821", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "emdu/cache_detector/windows.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "34575" } ], "symlink_target": "" }
from __future__ import unicode_literals import six from .base import BasePipe from ...conf import settings from ...backends import storage from ...backends.exceptions import NodeDoesNotExist class StoragePipe(BasePipe): def get_request(self, request): response = {} stored_nodes = storage.get_many(request.keys()) for uri, stored_node in six.iteritems(stored_nodes): node = response[node.uri] = request.pop(uri) self.materialize_node(node, **stored_node) return response def get_response(self, response): if response: # Redirect nodes without extension (non-persisted) to default for node in response.values(): if not node.uri.ext: node.uri = node.uri.clone(ext=settings.URI_DEFAULT_EXT) return response def set_request(self, request): for node in request.values(): stored_node, _ = storage.set(node.uri, node.content, **node.meta) uri = stored_node['uri'] node.uri = uri node.meta = stored_node['meta'] def delete_request(self, request): deleted_nodes = storage.delete_many(request.keys()) for uri, deleted_node in six.iteritems(deleted_nodes): node = request[uri] deleted_node['content'] = None # Set content to None to signal node has been deleted self.materialize_node(node, **deleted_node) def publish_request(self, request): for uri, node in list(request.items()): try: published_node = storage.publish(uri, **node.meta) except NodeDoesNotExist: request.pop(uri) else: node = request[uri] self.materialize_node(node, **published_node) class NamespaceFallbackPipe(BasePipe): def get_request(self, request): response = {} fallback_uris = {} # Build fallback URI map for uri, node in six.iteritems(request): # if node.uri != node.initial_uri: namespaces = getattr(node.env, uri.scheme)[1:] if namespaces: uris = [uri.clone(namespace=namespace) for namespace in namespaces] fallback_uris[node.uri] = uris # Fetch nodes from storage, each fallback level slice at a time while fallback_uris: level_uris = dict((fallback.pop(0), uri) for uri, fallback in six.iteritems(fallback_uris)) stored_nodes = storage.get_many(level_uris.keys()) # Set node fallback content and add to response for uri, stored_node in six.iteritems(stored_nodes): requested_uri = level_uris[uri] node = response[node.uri] = request.pop(requested_uri) self.materialize_node(node, **stored_node) # Remove exhausted uris that has run out of fallback namespaces for uri, fallback in list(fallback_uris.items()): if not fallback: fallback_uris.pop(uri) return response
{ "content_hash": "cc32fc520510da3c9a1538cff2b4dbd3", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 103, "avg_line_length": 35.735632183908045, "alnum_prop": 0.5956899324541653, "repo_name": "5monkeys/content-io", "id": "ca6807afe95153847b64e964ab9eaf1b6e311678", "size": "3124", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cio/pipeline/pipes/storage.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "717" }, { "name": "Python", "bytes": "112816" } ], "symlink_target": "" }
from django.contrib import admin from django.urls import include, path, re_path from testapp import views urlpatterns = [ re_path(r"^$", views.index), re_path(r"^help$", views.help), re_path(r"^slow$", views.slow, name="slow"), re_path(r"^objection$", views.objection), re_path(r"^sql$", views.sql), re_path(r"^newlawn/(?P<location>[a-zA-Z0-9 ]+)$", views.newlawn), re_path(r"^file$", views.file), path("", include("django_prometheus.urls")), re_path(r"^admin/", admin.site.urls), ]
{ "content_hash": "7b2605456e3df30c0ded31013fcacfcb", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 69, "avg_line_length": 34.666666666666664, "alnum_prop": 0.6307692307692307, "repo_name": "korfuri/django-prometheus", "id": "2aa3d75040856cdd5363c9baf30511cfd32cbbf8", "size": "520", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django_prometheus/tests/end2end/testapp/urls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "5116" }, { "name": "Python", "bytes": "81845" } ], "symlink_target": "" }
from enum import Enum __all__ = ( "ActionResult", "ActionState", "ActionType", "StepStatus", "VersionType", ) class VersionType(Enum): FULL = "FULL RELEASE" DEV = "DEV BUILD" RC = "RELEASE CANDIDATE" class StepStatus(Enum): PASS = "PASS" FAIL = "FAIL" SKIP = "SKIP" class ActionResult(Enum): PASS = "PASS" FAIL = "FAIL" SKIP = "SKIP" class ActionState(Enum): PENDING = "PENDING" STARTED = "STARTED" COMPLETED = "COMPLETED" class ActionType(Enum): CHECK = "CHECK" TASK = "TASK"
{ "content_hash": "86ea690bb9858f9e898de6669b4ff500", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 28, "avg_line_length": 14.842105263157896, "alnum_prop": 0.5851063829787234, "repo_name": "ericmjl/bokeh", "id": "ba80cb6900bc5f916c1fa099a0dc8feef5779f9d", "size": "925", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "release/enums.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "1442" }, { "name": "CSS", "bytes": "102094" }, { "name": "CoffeeScript", "bytes": "462899" }, { "name": "HTML", "bytes": "46193" }, { "name": "JavaScript", "bytes": "24563" }, { "name": "Makefile", "bytes": "1150" }, { "name": "Python", "bytes": "2705341" }, { "name": "Shell", "bytes": "8995" }, { "name": "TypeScript", "bytes": "1468288" } ], "symlink_target": "" }
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, TypeVar, Union import click from valohai_cli.help_texts import EXECUTION_COUNTER_HELP FuncT = TypeVar('FuncT', bound=Callable[..., Any]) def _default_name_formatter(option: Any) -> str: if isinstance(option, dict) and 'name' in option: return str(option['name']) return str(option) def prompt_from_list( options: Sequence[dict], prompt: str, nonlist_validator: Optional[Callable[[str], Optional[Any]]] = None, name_formatter: Callable[[dict], str] = _default_name_formatter, ) -> Union[Any, dict]: for i, option in enumerate(options, 1): number_prefix = click.style(f'[{i:3d}]', fg='cyan') description_suffix = (click.style(f'({option["description"]})', dim=True) if option.get('description') else '') click.echo(f'{number_prefix} {name_formatter(option)} {description_suffix}') while True: answer = click.prompt(prompt) if answer.isdigit() and (1 <= int(answer) <= len(options)): return options[int(answer) - 1] if nonlist_validator: retval = nonlist_validator(answer) if retval: return retval for option in options: if answer == option['name']: return option click.secho('Sorry, try again.') continue class HelpfulArgument(click.Argument): def __init__(self, param_decls: List[str], help: Optional[str] = None, **kwargs: Any) -> None: self.help = help super().__init__(param_decls, **kwargs) def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]: # noqa: U100 if self.name and self.help: return (self.name, self.help) return None def counter_argument(fn: FuncT) -> FuncT: # Extra gymnastics needed because `click.arguments` mutates the kwargs here arg = click.argument('counter', help=EXECUTION_COUNTER_HELP, cls=HelpfulArgument) return arg(fn) def join_with_style(items: Iterable[Any], separator: str = ', ', **style_kwargs: Any) -> str: return separator.join(click.style(str(item), **style_kwargs) for item in items)
{ "content_hash": "ccda81c38a8b7ba002788010b73604b9", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 119, "avg_line_length": 37.186440677966104, "alnum_prop": 0.6362807657247037, "repo_name": "valohai/valohai-cli", "id": "ca99920ade1026af18dff2dcabb70cecfb3e8b04", "size": "2194", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "valohai_cli/utils/cli_utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "411" }, { "name": "Python", "bytes": "279031" } ], "symlink_target": "" }
import unittest from test.test_support import verbose, run_unittest import sys import gc import weakref ### Support code ############################################################################### # Bug 1055820 has several tests of longstanding bugs involving weakrefs and # cyclic gc. # An instance of C1055820 has a self-loop, so becomes cyclic trash when # unreachable. class C1055820(object): def __init__(self, i): self.i = i self.loop = self class GC_Detector(object): # Create an instance I. Then gc hasn't happened again so long as # I.gc_happened is false. def __init__(self): self.gc_happened = False def it_happened(ignored): self.gc_happened = True # Create a piece of cyclic trash that triggers it_happened when # gc collects it. self.wr = weakref.ref(C1055820(666), it_happened) ### Tests ############################################################################### class GCTests(unittest.TestCase): def test_list(self): l = [] l.append(l) gc.collect() del l self.assertEqual(gc.collect(), 1) def test_dict(self): d = {} d[1] = d gc.collect() del d self.assertEqual(gc.collect(), 1) def test_tuple(self): # since tuples are immutable we close the loop with a list l = [] t = (l,) l.append(t) gc.collect() del t del l self.assertEqual(gc.collect(), 2) def test_class(self): class A: pass A.a = A gc.collect() del A self.assertNotEqual(gc.collect(), 0) def test_newstyleclass(self): class A(object): pass gc.collect() del A self.assertNotEqual(gc.collect(), 0) def test_instance(self): class A: pass a = A() a.a = a gc.collect() del a self.assertNotEqual(gc.collect(), 0) def test_newinstance(self): class A(object): pass a = A() a.a = a gc.collect() del a self.assertNotEqual(gc.collect(), 0) class B(list): pass class C(B, A): pass a = C() a.a = a gc.collect() del a self.assertNotEqual(gc.collect(), 0) del B, C self.assertNotEqual(gc.collect(), 0) A.a = A() del A self.assertNotEqual(gc.collect(), 0) self.assertEqual(gc.collect(), 0) def test_method(self): # Tricky: self.__init__ is a bound method, it references the instance. class A: def __init__(self): self.init = self.__init__ a = A() gc.collect() del a self.assertNotEqual(gc.collect(), 0) def test_finalizer(self): # A() is uncollectable if it is part of a cycle, make sure it shows up # in gc.garbage. class A: def __del__(self): pass class B: pass a = A() a.a = a id_a = id(a) b = B() b.b = b gc.collect() del a del b self.assertNotEqual(gc.collect(), 0) for obj in gc.garbage: if id(obj) == id_a: del obj.a break else: self.fail("didn't find obj in garbage (finalizer)") gc.garbage.remove(obj) def test_finalizer_newclass(self): # A() is uncollectable if it is part of a cycle, make sure it shows up # in gc.garbage. class A(object): def __del__(self): pass class B(object): pass a = A() a.a = a id_a = id(a) b = B() b.b = b gc.collect() del a del b self.assertNotEqual(gc.collect(), 0) for obj in gc.garbage: if id(obj) == id_a: del obj.a break else: self.fail("didn't find obj in garbage (finalizer)") gc.garbage.remove(obj) def test_function(self): # Tricky: f -> d -> f, code should call d.clear() after the exec to # break the cycle. d = {} exec("def f(): pass\n") in d gc.collect() del d self.assertEqual(gc.collect(), 2) def test_frame(self): def f(): frame = sys._getframe() gc.collect() f() self.assertEqual(gc.collect(), 1) def test_saveall(self): # Verify that cyclic garbage like lists show up in gc.garbage if the # SAVEALL option is enabled. # First make sure we don't save away other stuff that just happens to # be waiting for collection. gc.collect() # if this fails, someone else created immortal trash self.assertEqual(gc.garbage, []) L = [] L.append(L) id_L = id(L) debug = gc.get_debug() gc.set_debug(debug | gc.DEBUG_SAVEALL) del L gc.collect() gc.set_debug(debug) self.assertEqual(len(gc.garbage), 1) obj = gc.garbage.pop() self.assertEqual(id(obj), id_L) def test_del(self): # __del__ methods can trigger collection, make this to happen thresholds = gc.get_threshold() gc.enable() gc.set_threshold(1) class A: def __del__(self): dir(self) a = A() del a gc.disable() gc.set_threshold(*thresholds) def test_del_newclass(self): # __del__ methods can trigger collection, make this to happen thresholds = gc.get_threshold() gc.enable() gc.set_threshold(1) class A(object): def __del__(self): dir(self) a = A() del a gc.disable() gc.set_threshold(*thresholds) # The following two tests are fragile: # They precisely count the number of allocations, # which is highly implementation-dependent. # For example: # - disposed tuples are not freed, but reused # - the call to assertEqual somehow avoids building its args tuple def test_get_count(self): # Avoid future allocation of method object assertEqual = self.assertEqual gc.collect() assertEqual(gc.get_count(), (0, 0, 0)) a = dict() # since gc.collect(), we created two objects: # the dict, and the tuple returned by get_count() assertEqual(gc.get_count(), (2, 0, 0)) def test_collect_generations(self): # Avoid future allocation of method object assertEqual = self.assertEqual gc.collect() a = dict() gc.collect(0) assertEqual(gc.get_count(), (0, 1, 0)) gc.collect(1) assertEqual(gc.get_count(), (0, 0, 1)) gc.collect(2) assertEqual(gc.get_count(), (0, 0, 0)) def test_trashcan(self): class Ouch: n = 0 def __del__(self): Ouch.n = Ouch.n + 1 if Ouch.n % 17 == 0: gc.collect() # "trashcan" is a hack to prevent stack overflow when deallocating # very deeply nested tuples etc. It works in part by abusing the # type pointer and refcount fields, and that can yield horrible # problems when gc tries to traverse the structures. # If this test fails (as it does in 2.0, 2.1 and 2.2), it will # most likely die via segfault. # Note: In 2.3 the possibility for compiling without cyclic gc was # removed, and that in turn allows the trashcan mechanism to work # via much simpler means (e.g., it never abuses the type pointer or # refcount fields anymore). Since it's much less likely to cause a # problem now, the various constants in this expensive (we force a lot # of full collections) test are cut back from the 2.2 version. gc.enable() N = 150 for count in range(2): t = [] for i in range(N): t = [t, Ouch()] u = [] for i in range(N): u = [u, Ouch()] v = {} for i in range(N): v = {1: v, 2: Ouch()} gc.disable() def test_boom(self): class Boom: def __getattr__(self, someattribute): del self.attr raise AttributeError a = Boom() b = Boom() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b # a<->b are in a trash cycle now. Collection will invoke # Boom.__getattr__ (to see whether a and b have __del__ methods), and # __getattr__ deletes the internal "attr" attributes as a side effect. # That causes the trash cycle to get reclaimed via refcounts falling to # 0, thus mutating the trash graph as a side effect of merely asking # whether __del__ exists. This used to (before 2.3b1) crash Python. # Now __getattr__ isn't called. self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_boom2(self): class Boom2: def __init__(self): self.x = 0 def __getattr__(self, someattribute): self.x += 1 if self.x > 1: del self.attr raise AttributeError a = Boom2() b = Boom2() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b # Much like test_boom(), except that __getattr__ doesn't break the # cycle until the second time gc checks for __del__. As of 2.3b1, # there isn't a second time, so this simply cleans up the trash cycle. # We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get # reclaimed this way. self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_boom_new(self): # boom__new and boom2_new are exactly like boom and boom2, except use # new-style classes. class Boom_New(object): def __getattr__(self, someattribute): del self.attr raise AttributeError a = Boom_New() b = Boom_New() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_boom2_new(self): class Boom2_New(object): def __init__(self): self.x = 0 def __getattr__(self, someattribute): self.x += 1 if self.x > 1: del self.attr raise AttributeError a = Boom2_New() b = Boom2_New() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_get_referents(self): alist = [1, 3, 5] got = gc.get_referents(alist) got.sort() self.assertEqual(got, alist) atuple = tuple(alist) got = gc.get_referents(atuple) got.sort() self.assertEqual(got, alist) adict = {1: 3, 5: 7} expected = [1, 3, 5, 7] got = gc.get_referents(adict) got.sort() self.assertEqual(got, expected) got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0)) got.sort() self.assertEqual(got, [0, 0] + range(5)) self.assertEqual(gc.get_referents(1, 'a', 4j), []) def test_bug1055820b(self): # Corresponds to temp2b.py in the bug report. ouch = [] def callback(ignored): ouch[:] = [wr() for wr in WRs] Cs = [C1055820(i) for i in range(2)] WRs = [weakref.ref(c, callback) for c in Cs] c = None gc.collect() self.assertEqual(len(ouch), 0) # Make the two instances trash, and collect again. The bug was that # the callback materialized a strong reference to an instance, but gc # cleared the instance's dict anyway. Cs = None gc.collect() self.assertEqual(len(ouch), 2) # else the callbacks didn't run for x in ouch: # If the callback resurrected one of these guys, the instance # would be damaged, with an empty __dict__. self.assertEqual(x, None) class GCTogglingTests(unittest.TestCase): def setUp(self): gc.enable() def tearDown(self): gc.disable() def test_bug1055820c(self): # Corresponds to temp2c.py in the bug report. This is pretty # elaborate. c0 = C1055820(0) # Move c0 into generation 2. gc.collect() c1 = C1055820(1) c1.keep_c0_alive = c0 del c0.loop # now only c1 keeps c0 alive c2 = C1055820(2) c2wr = weakref.ref(c2) # no callback! ouch = [] def callback(ignored): ouch[:] = [c2wr()] # The callback gets associated with a wr on an object in generation 2. c0wr = weakref.ref(c0, callback) c0 = c1 = c2 = None # What we've set up: c0, c1, and c2 are all trash now. c0 is in # generation 2. The only thing keeping it alive is that c1 points to # it. c1 and c2 are in generation 0, and are in self-loops. There's a # global weakref to c2 (c2wr), but that weakref has no callback. # There's also a global weakref to c0 (c0wr), and that does have a # callback, and that callback references c2 via c2wr(). # # c0 has a wr with callback, which references c2wr # ^ # | # | Generation 2 above dots #. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . . # | Generation 0 below dots # | # | # ^->c1 ^->c2 has a wr but no callback # | | | | # <--v <--v # # So this is the nightmare: when generation 0 gets collected, we see # that c2 has a callback-free weakref, and c1 doesn't even have a # weakref. Collecting generation 0 doesn't see c0 at all, and c0 is # the only object that has a weakref with a callback. gc clears c1 # and c2. Clearing c1 has the side effect of dropping the refcount on # c0 to 0, so c0 goes away (despite that it's in an older generation) # and c0's wr callback triggers. That in turn materializes a reference # to c2 via c2wr(), but c2 gets cleared anyway by gc. # We want to let gc happen "naturally", to preserve the distinction # between generations. junk = [] i = 0 detector = GC_Detector() while not detector.gc_happened: i += 1 if i > 10000: self.fail("gc didn't happen after 10000 iterations") self.assertEqual(len(ouch), 0) junk.append([]) # this will eventually trigger gc self.assertEqual(len(ouch), 1) # else the callback wasn't invoked for x in ouch: # If the callback resurrected c2, the instance would be damaged, # with an empty __dict__. self.assertEqual(x, None) def test_bug1055820d(self): # Corresponds to temp2d.py in the bug report. This is very much like # test_bug1055820c, but uses a __del__ method instead of a weakref # callback to sneak in a resurrection of cyclic trash. ouch = [] class D(C1055820): def __del__(self): ouch[:] = [c2wr()] d0 = D(0) # Move all the above into generation 2. gc.collect() c1 = C1055820(1) c1.keep_d0_alive = d0 del d0.loop # now only c1 keeps d0 alive c2 = C1055820(2) c2wr = weakref.ref(c2) # no callback! d0 = c1 = c2 = None # What we've set up: d0, c1, and c2 are all trash now. d0 is in # generation 2. The only thing keeping it alive is that c1 points to # it. c1 and c2 are in generation 0, and are in self-loops. There's # a global weakref to c2 (c2wr), but that weakref has no callback. # There are no other weakrefs. # # d0 has a __del__ method that references c2wr # ^ # | # | Generation 2 above dots #. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . . # | Generation 0 below dots # | # | # ^->c1 ^->c2 has a wr but no callback # | | | | # <--v <--v # # So this is the nightmare: when generation 0 gets collected, we see # that c2 has a callback-free weakref, and c1 doesn't even have a # weakref. Collecting generation 0 doesn't see d0 at all. gc clears # c1 and c2. Clearing c1 has the side effect of dropping the refcount # on d0 to 0, so d0 goes away (despite that it's in an older # generation) and d0's __del__ triggers. That in turn materializes # a reference to c2 via c2wr(), but c2 gets cleared anyway by gc. # We want to let gc happen "naturally", to preserve the distinction # between generations. detector = GC_Detector() junk = [] i = 0 while not detector.gc_happened: i += 1 if i > 10000: self.fail("gc didn't happen after 10000 iterations") self.assertEqual(len(ouch), 0) junk.append([]) # this will eventually trigger gc self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked for x in ouch: # If __del__ resurrected c2, the instance would be damaged, with an # empty __dict__. self.assertEqual(x, None) def test_main(): enabled = gc.isenabled() gc.disable() assert not gc.isenabled() debug = gc.get_debug() gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: gc.collect() # Delete 2nd generation garbage run_unittest(GCTests, GCTogglingTests) finally: gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print "restoring automatic collection" # make sure to always test gc.enable() gc.enable() assert gc.isenabled() if not enabled: gc.disable() if __name__ == "__main__": test_main()
{ "content_hash": "77bc90eb07a854d069442e99034652d3", "timestamp": "", "source": "github", "line_count": 608, "max_line_length": 79, "avg_line_length": 32.4391447368421, "alnum_prop": 0.5034224002433707, "repo_name": "babyliynfg/cross", "id": "bcf37866e6d7dc8ab857716001875c1095170d02", "size": "19723", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/project-creator/Python2.6.6/Lib/test/test_gc.py", "mode": "33261", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "36722" }, { "name": "C", "bytes": "6345646" }, { "name": "C++", "bytes": "15980000" }, { "name": "CMake", "bytes": "1238" }, { "name": "GLSL", "bytes": "64406" }, { "name": "HTML", "bytes": "147661" }, { "name": "Java", "bytes": "574078" }, { "name": "JavaScript", "bytes": "503327" }, { "name": "Makefile", "bytes": "18778" }, { "name": "Objective-C", "bytes": "396703" }, { "name": "Objective-C++", "bytes": "378740" }, { "name": "PLSQL", "bytes": "22886" }, { "name": "Python", "bytes": "15265548" }, { "name": "Roff", "bytes": "23" }, { "name": "Shell", "bytes": "61021" }, { "name": "Visual Basic", "bytes": "19200" } ], "symlink_target": "" }
"""Tests for the Networks Windows Registry plugin.""" from __future__ import unicode_literals import unittest from dfdatetime import filetime as dfdatetime_filetime from dfwinreg import definitions as dfwinreg_definitions from dfwinreg import fake as dfwinreg_fake from plaso.lib import definitions from plaso.parsers.winreg_plugins import networks from tests.parsers.winreg_plugins import test_lib class NetworksWindowsRegistryPluginTest(test_lib.RegistryPluginTestCase): """Tests for the Networks Windows Registry plugin.""" def _CreateTestKey(self, key_path, time_string): """Creates Registry keys and values for testing. Args: key_path (str): Windows Registry key path. time_string (str): key last written date and time. Returns: dfwinreg.WinRegistryKey: a Windows Registry key. """ filetime = dfdatetime_filetime.Filetime() filetime.CopyFromDateTimeString(time_string) registry_key = dfwinreg_fake.FakeWinRegistryKey( 'NetworkList', key_path=key_path, last_written_time=filetime.timestamp, offset=153) # Setup Profiles. profiles_key_name = 'Profiles' profiles_key = dfwinreg_fake.FakeWinRegistryKey(profiles_key_name) registry_key.AddSubkey(profiles_key_name, profiles_key) profile1_key_name = '{B358E985-4464-4ABD-AF99-7D4A0AF66BB7}' profile1_key = dfwinreg_fake.FakeWinRegistryKey(profile1_key_name) profiles_key.AddSubkey(profile1_key_name, profile1_key) value_data = b'\x00\x00\x00\x00' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Category', data=value_data, data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN) profile1_key.AddValue(registry_value) value_data = ( b'\xde\x07\x0c\x00\x02\x00\x10\x00\x08\x00\x04\x00\x27\x00\x6a\x00') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'DateCreated', data=value_data, data_type=dfwinreg_definitions.REG_BINARY) profile1_key.AddValue(registry_value) value_data = ( b'\xdf\x07\x01\x00\x02\x00\x1b\x00\x0f\x00\x0f\x00\x1b\x00\xc5\x03') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'DateLastConnected', data=value_data, data_type=dfwinreg_definitions.REG_BINARY) profile1_key.AddValue(registry_value) value_data = 'My Awesome Wifi Hotspot'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Description', data=value_data, data_type=dfwinreg_definitions.REG_SZ) profile1_key.AddValue(registry_value) value_data = b'\x00\x00\x00\x00' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Managed', data=value_data, data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN) profile1_key.AddValue(registry_value) value_data = b'\x00\x00\x00\x47' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'NameType', data=value_data, data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN) profile1_key.AddValue(registry_value) value_data = 'My Awesome Wifi Hotspot'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'ProfileName', data=value_data, data_type=dfwinreg_definitions.REG_SZ) profile1_key.AddValue(registry_value) profile2_key_name = '{C1C57B58-BFE2-428B-818C-9D69A873AD3D}' profile2_key = dfwinreg_fake.FakeWinRegistryKey(profile2_key_name) profiles_key.AddSubkey(profile2_key_name, profile2_key) value_data = b'\x00\x00\x00\x00' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Category', data=value_data, data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN) profile2_key.AddValue(registry_value) value_data = ( b'\xde\x07\x05\x00\x02\x00\x06\x00\x11\x00\x02\x00\x13\x00\x1b\x03') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'DateCreated', data=value_data, data_type=dfwinreg_definitions.REG_BINARY) profile2_key.AddValue(registry_value) value_data = ( b'\xde\x07\x05\x00\x02\x00\x06\x00\x11\x00\x07\x00\x36\x00\x0a\x00') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'DateLastConnected', data=value_data, data_type=dfwinreg_definitions.REG_BINARY) profile2_key.AddValue(registry_value) value_data = 'Network'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Description', data=value_data, data_type=dfwinreg_definitions.REG_SZ) profile2_key.AddValue(registry_value) value_data = b'\x00\x00\x00\x00' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Managed', data=value_data, data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN) profile2_key.AddValue(registry_value) value_data = b'\x00\x00\x00\x06' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'NameType', data=value_data, data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN) profile2_key.AddValue(registry_value) value_data = 'Network'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'ProfileName', data=value_data, data_type=dfwinreg_definitions.REG_SZ) profile2_key.AddValue(registry_value) # Setup signatures. signatures_key_name = 'Signatures' signatures_key = dfwinreg_fake.FakeWinRegistryKey(signatures_key_name) registry_key.AddSubkey(signatures_key_name, signatures_key) managed_key_name = 'Managed' managed_key = dfwinreg_fake.FakeWinRegistryKey(managed_key_name) signatures_key.AddSubkey(managed_key_name, managed_key) unmanaged_key_name = 'Unmanaged' unmanaged_key = dfwinreg_fake.FakeWinRegistryKey(unmanaged_key_name) signatures_key.AddSubkey(unmanaged_key_name, unmanaged_key) unmanaged_subkey_name = ( '010103000F0000F0080000000F0000F0E8982FB31F37E52AF30A6575A4898CE667' '6E8C2F99C4C5131D84F64BD823E0') unmanaged_subkey = dfwinreg_fake.FakeWinRegistryKey(unmanaged_subkey_name) unmanaged_key.AddSubkey(unmanaged_subkey_name, unmanaged_subkey) value_data = b'\x00\x50\x56\xea\x6c\xec' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'DefaultGatewayMac', data=value_data, data_type=dfwinreg_definitions.REG_BINARY) unmanaged_subkey.AddValue(registry_value) value_data = 'Network'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Description', data=value_data, data_type=dfwinreg_definitions.REG_SZ) unmanaged_subkey.AddValue(registry_value) value_data = 'localdomain'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'DnsSuffix', data=value_data, data_type=dfwinreg_definitions.REG_SZ) unmanaged_subkey.AddValue(registry_value) value_data = 'Network'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'FirstNetwork', data=value_data, data_type=dfwinreg_definitions.REG_SZ) unmanaged_subkey.AddValue(registry_value) value_data = '{C1C57B58-BFE2-428B-818C-9D69A873AD3D}'.encode('utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( 'ProfileGuid', data=value_data, data_type=dfwinreg_definitions.REG_SZ) unmanaged_subkey.AddValue(registry_value) value_data = b'\x00\x00\x00\x08' registry_value = dfwinreg_fake.FakeWinRegistryValue( 'Source', data=value_data, data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN) unmanaged_subkey.AddValue(registry_value) return registry_key def testFilters(self): """Tests the FILTERS class attribute.""" plugin = networks.NetworksWindowsRegistryPlugin() key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion\\' 'NetworkList') self._AssertFiltersOnKeyPath(plugin, key_path) self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus') def testProcess(self): """Tests the Process function on created key.""" key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion') time_string = '2013-01-30 10:47:57' registry_key = self._CreateTestKey(key_path, time_string) plugin = networks.NetworksWindowsRegistryPlugin() storage_writer = self._ParseKeyWithPlugin(registry_key, plugin) self.assertEqual(storage_writer.number_of_warnings, 0) self.assertEqual(storage_writer.number_of_events, 4) events = list(storage_writer.GetSortedEvents()) event = events[0] self.CheckTimestamp(event.timestamp, '2014-05-06 17:02:19.795000') self.assertEqual( event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION) event_data = self._GetEventDataOfEvent(storage_writer, event) self.assertEqual(event_data.data_type, 'windows:registry:network') expected_message = ( 'SSID: Network ' 'Description: Network ' 'Connection Type: Wired ' 'Default Gateway Mac: 00:50:56:ea:6c:ec ' 'DNS Suffix: localdomain') expected_short_message = '{0:s}...'.format(expected_message[:77]) self._TestGetMessageStrings( event_data, expected_message, expected_short_message) event = events[3] self.CheckTimestamp(event.timestamp, '2015-01-27 15:15:27.965000') self.assertEqual( event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_CONNECTED) event_data = self._GetEventDataOfEvent(storage_writer, event) self.assertEqual(event_data.data_type, 'windows:registry:network') expected_message = ( 'SSID: My Awesome Wifi Hotspot ' 'Description: My Awesome Wifi Hotspot ' 'Connection Type: Wireless') expected_short_message = '{0:s}...'.format(expected_message[:77]) self._TestGetMessageStrings( event_data, expected_message, expected_short_message) if __name__ == '__main__': unittest.main()
{ "content_hash": "6697d8fbe9806081d3c4e21da3de08be", "timestamp": "", "source": "github", "line_count": 262, "max_line_length": 79, "avg_line_length": 37.64503816793893, "alnum_prop": 0.7081009834735882, "repo_name": "rgayon/plaso", "id": "998247ab8f26678d1b2ae772b374f62a23fd027c", "size": "9910", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/parsers/winreg_plugins/networks.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "415" }, { "name": "Dockerfile", "bytes": "1047" }, { "name": "Makefile", "bytes": "712" }, { "name": "PowerShell", "bytes": "17771" }, { "name": "Python", "bytes": "4803191" }, { "name": "Ruby", "bytes": "926" }, { "name": "Shell", "bytes": "46225" } ], "symlink_target": "" }
import json import traceback import unicodedata import bel.nanopub.validate import falcon import structlog log = structlog.getLogger(__name__) class NanopubValidateResource(object): """Validate nanopubs""" def on_post(self, req, resp): # Validate nanopub only using cached assertions/annotations # complete - fill in any missing assertion/annotation validations # force - redo all validations # cached - only return cached/pre-generated validations validation_level = req.get_param('validation_level', default="complete") # BEL Resources loading try: data = req.stream.read(req.content_length or 0) data = data.decode(encoding="utf-8") data = data.replace("\u00a0", " ") # get rid of non-breaking spaces data = json.loads(data) except ValueError as e: raise falcon.HTTPUnprocessableEntity( title="Cannot process payload", description=f"Cannot process nanopub (maybe an encoding error? please use UTF-8 for JSON payload) error: {e}", ) nanopub = {} if "nanopub" in data: nanopub["nanopub"] = data.get("nanopub") else: nanopub = None error_level = data.get("error_level", "WARNING") if nanopub: try: nanopub = bel.nanopub.validate.validate(nanopub, error_level=error_level, validation_level=validation_level) resp.media = nanopub resp.status = falcon.HTTP_200 except Exception as e: log.error(traceback.print_exc()) raise falcon.HTTPUnprocessableEntity( title="Cannot process nanopub", description=f"Cannot process nanopub: {e}" ) else: raise falcon.HTTPBadRequest( title="Cannot process nanopub", description=f"No nanopub in payload to process. Please check your submission.", )
{ "content_hash": "59fc2b23c1da4dcb68606f8324c45238", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 126, "avg_line_length": 35.241379310344826, "alnum_prop": 0.5963796477495108, "repo_name": "belbio/bel_api", "id": "47a6025ef4d16c515f7cb702e501efe24ac53ac4", "size": "2044", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/resources/nanopubs.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "817" }, { "name": "JavaScript", "bytes": "6662" }, { "name": "Makefile", "bytes": "6310" }, { "name": "Python", "bytes": "58336" }, { "name": "Shell", "bytes": "1080" } ], "symlink_target": "" }
from collections import namedtuple from functools import wraps import logging import time reload(logging) # Disables this logging level + all levels below. # Set to None if you want to enable all logging. # Set to logging.CRITICAL if you want to disable all logging. DISABLE_LOGGING_LEVEL = logging.DEBUG # logging.CRITICAL HandlerInfo = namedtuple("HandlerInfo", ["handler", "level", "formatter"]) loggers = {} def run_once(f): @wraps(f) def wrapper(*args, **kwargs): if not f.has_run: f.has_run = True return f(*args, **kwargs) f.has_run = False return wrapper @run_once def disable_logging(level=None): if level: logging.disable(level) def get_logger(name, level=logging.INFO, handler_infos=None): disable_logging(DISABLE_LOGGING_LEVEL) if name in loggers: return loggers[name] logger = logging.getLogger(name) logger.setLevel(level) # Default logger handler specification goes here. if not handler_infos: handler = logging.StreamHandler() format = "%(asctime)s [%(levelname)s] [%(name)s] %(message)s" handler_infos = [HandlerInfo(handler, level, format)] for handler, level, format in handler_infos: formatter = logging.Formatter(format) handler.setLevel(level) handler.setFormatter(formatter) logger.addHandler(handler) loggers[name] = logger return logger def log_exc(logger, msg, exc_type=Exception): logger.error(msg) raise exc_type(msg) logger = get_logger(__name__) # Stick this as a decorator on any function to print the # of time spent # in that function. def log_time(f): @wraps(f) def wrapper(*args, **kwargs): tstart = time.time() result = f(*args, **kwargs) m, s = divmod(time.time() - tstart, 60) msg = "Time in %s(): " if m == 0: logger.debug(msg + "%s seconds", f.__name__, s) else: logger.debug(msg + "%s minutes, %s seconds", f.__name__, m, s) return result return wrapper
{ "content_hash": "9cbc1bfdd91729bbc1d58a2747e4187b", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 74, "avg_line_length": 25.95, "alnum_prop": 0.6348747591522158, "repo_name": "d-grossman/magichour", "id": "94bb508aa676dfbb1a3ac0073223f8a1c1ec3c13", "size": "2076", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "magichour/api/local/util/log.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "6992" }, { "name": "Java", "bytes": "240314" }, { "name": "Jupyter Notebook", "bytes": "9326871" }, { "name": "Perl", "bytes": "37746" }, { "name": "Python", "bytes": "296398" }, { "name": "Shell", "bytes": "4182" } ], "symlink_target": "" }
""" A label command (sub-command) for the Organice management command. """ from django.utils.translation import ugettext as _ class BootstrapCommandMixin(object): def bootstrap_command(self): """ Initialize Organice = initdb, initauth, initcms, initblog """ self.handle_label('initdb') self.handle_label('initauth') self.handle_label('initcms') self.handle_label('initblog') self.log(_('Have an organiced day!'))
{ "content_hash": "64957c0ec93ec1051c22b9662eb4d643", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 66, "avg_line_length": 26.944444444444443, "alnum_prop": 0.643298969072165, "repo_name": "Organice/django-organice", "id": "c32cc16336283a2e0289fc258c9936de6a7829ef", "size": "485", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "organice/management/commands/mixins/bootstrap.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Gherkin", "bytes": "1620" }, { "name": "HTML", "bytes": "405" }, { "name": "Makefile", "bytes": "4186" }, { "name": "Python", "bytes": "122993" } ], "symlink_target": "" }
import pymongo print "..."
{ "content_hash": "b1e1922c355053ce26f3979bcd0e2c8e", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 14, "avg_line_length": 13.5, "alnum_prop": 0.6666666666666666, "repo_name": "BartGo/mongodb-drafts", "id": "9c1e76a8e86a21920b29510cd4bfb1ee9bcfed58", "size": "27", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "914" }, { "name": "Shell", "bytes": "834" } ], "symlink_target": "" }
""" Exceptions that may be raised by rabbitpy during use ---------------------------------------------------- """ class RabbitpyException(Exception): """Base exception of all rabbitpy exceptions.""" pass class AMQPException(RabbitpyException): """Base exception of all AMQP exceptions.""" pass class ActionException(RabbitpyException): """Raised when an action is taken on a Rabbitpy object that is not supported due to the state of the object. An example would be trying to ack a Message object when the message object was locally created and not sent by RabbitMQ via an AMQP Basic.Get or Basic.Consume. """ def __str__(self): return self.args[0] class ChannelClosedException(RabbitpyException): """Raised when an action is attempted on a channel that is closed.""" def __str__(self): return 'Can not perform RPC requests on a closed channel, you must ' \ 'create a new channel' class ConnectionException(RabbitpyException): """Raised when Rabbitpy can not connect to the specified server and if a connection fails and the RabbitMQ version does not support the authentication_failure_close feature added in RabbitMQ 3.2. """ def __str__(self): return 'Unable to connect to the remote server %r' % self.args class ConnectionResetException(RabbitpyException): """Raised if the socket level connection was reset. This can happen due to the loss of network connection or socket timeout, or more than 2 missed heartbeat intervals if heartbeats are enabled. """ def __str__(self): return 'Connection was reset at socket level' class RemoteCancellationException(RabbitpyException): """Raised if RabbitMQ cancels an active consumer""" def __str__(self): return 'Remote server cancelled the active consumer' class RemoteClosedChannelException(RabbitpyException): """Raised if RabbitMQ closes the channel and the reply_code in the Channel.Close RPC request does not have a mapped exception in Rabbitpy. """ def __str__(self): return 'Channel %i was closed by the remote server (%i): %s' % \ (self.args[0], self.args[1], self.args[2]) class RemoteClosedException(RabbitpyException): """Raised if RabbitMQ closes the connection and the reply_code in the Connection.Close RPC request does not have a mapped exception in Rabbitpy. """ def __str__(self): return 'Connection was closed by the remote server (%i): %s' % \ (self.args[0], self.args[1]) class MessageReturnedException(RabbitpyException): """Raised if the RabbitMQ sends a message back to a publisher via the Basic.Return RPC call. """ def __str__(self): return 'Message was returned by RabbitMQ: (%s) %s for exchange %s' % \ (self.args[0], self.args[1], self.args[2]) class NoActiveTransactionError(RabbitpyException): """Raised when a transaction method is issued but the transaction has not been initiated. """ def __str__(self): return 'No active transaction for the request, channel closed' class NotConsumingError(RabbitpyException): """Raised Queue.cancel_consumer() is invoked but the queue is not actively consuming. """ def __str__(self): return 'No active consumer to cancel' class NotSupportedError(RabbitpyException): """Raised when a feature is requested that is not supported by the RabbitMQ server. """ def __str__(self): return 'The selected feature "%s" is not supported' % self.args[0] class TooManyChannelsError(RabbitpyException): """Raised if an application attempts to create a channel, exceeding the maximum number of channels (MAXINT or 2,147,483,647) available for a single connection. Note that each time a channel object is created, it will take a new channel id. If you create and destroy 2,147,483,648 channels, this exception will be raised. """ def __str__(self): return 'The maximum amount of negotiated channels has been reached' class UnexpectedResponseError(RabbitpyException): """Raised when an RPC call is made to RabbitMQ but the response it sent back is not recognized. """ def __str__(self): return 'Received an expected response, expected %s, received %s' % \ (self.args[0], self.args[1]) # AMQP Exceptions class AMQPContentTooLarge(AMQPException): """ The client attempted to transfer content larger than the server could accept at the present time. The client may retry at a later time. """ pass class AMQPNoRoute(AMQPException): """ Undocumented AMQP Soft Error """ pass class AMQPNoConsumers(AMQPException): """ When the exchange cannot deliver to a consumer when the immediate flag is set. As a result of pending data on the queue or the absence of any consumers of the queue. """ pass class AMQPAccessRefused(AMQPException): """ The client attempted to work with a server entity to which it has no access due to security settings. """ pass class AMQPNotFound(AMQPException): """ The client attempted to work with a server entity that does not exist. """ pass class AMQPResourceLocked(AMQPException): """ The client attempted to work with a server entity to which it has no access because another client is working with it. """ pass class AMQPPreconditionFailed(AMQPException): """ The client requested a method that was not allowed because some precondition failed. """ pass class AMQPConnectionForced(AMQPException): """ An operator intervened to close the connection for some reason. The client may retry at some later date. """ pass class AMQPInvalidPath(AMQPException): """ The client tried to work with an unknown virtual host. """ pass class AMQPFrameError(AMQPException): """ The sender sent a malformed frame that the recipient could not decode. This strongly implies a programming error in the sending peer. """ pass class AMQPSyntaxError(AMQPException): """ The sender sent a frame that contained illegal values for one or more fields. This strongly implies a programming error in the sending peer. """ pass class AMQPCommandInvalid(AMQPException): """ The client sent an invalid sequence of frames, attempting to perform an operation that was considered invalid by the server. This usually implies a programming error in the client. """ pass class AMQPChannelError(AMQPException): """ The client attempted to work with a channel that had not been correctly opened. This most likely indicates a fault in the client layer. """ pass class AMQPUnexpectedFrame(AMQPException): """ The peer sent a frame that was not expected, usually in the context of a content header and body. This strongly indicates a fault in the peer's content processing. """ pass class AMQPResourceError(AMQPException): """ The server could not complete the method because it lacked sufficient resources. This may be due to the client creating too many of some type of entity. """ pass class AMQPNotAllowed(AMQPException): """ The client tried to work with some entity in a manner that is prohibited by the server, due to security settings or by some other criteria. """ pass class AMQPNotImplemented(AMQPException): """ The client tried to use functionality that is not implemented in the server. """ pass class AMQPInternalError(AMQPException): """ The server could not complete the method because of an internal error. The server may require intervention by an operator in order to resume normal operations. """ pass AMQP = {311: AMQPContentTooLarge, 312: AMQPNoRoute, 313: AMQPNoConsumers, 320: AMQPConnectionForced, 402: AMQPInvalidPath, 403: AMQPAccessRefused, 404: AMQPNotFound, 405: AMQPResourceLocked, 406: AMQPPreconditionFailed, 501: AMQPFrameError, 502: AMQPSyntaxError, 503: AMQPCommandInvalid, 504: AMQPChannelError, 505: AMQPUnexpectedFrame, 506: AMQPResourceError, 530: AMQPNotAllowed, 540: AMQPNotImplemented, 541: AMQPInternalError}
{ "content_hash": "18ea1d280e09fd9f044ba5c64d2d8922", "timestamp": "", "source": "github", "line_count": 325, "max_line_length": 79, "avg_line_length": 26.295384615384616, "alnum_prop": 0.6805523051720103, "repo_name": "jonahbull/rabbitpy", "id": "49a2474eb639aad00080b79898716aefcaa8ffca", "size": "8546", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rabbitpy/exceptions.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "235763" } ], "symlink_target": "" }
from collections import namedtuple PurchasedItem = namedtuple('PurchasedItem', 'name, quantity, price, currency, sku') class RedirectNeeded(Exception): pass class PaymentError(Exception): pass class ExternalPostNeeded(Exception): pass
{ "content_hash": "98b39d4879bc70fb27a1c85758f2643d", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 66, "avg_line_length": 17.5625, "alnum_prop": 0.6868327402135231, "repo_name": "artursmet/django-payments", "id": "2aeae276147602f895e7432c32acfa9386b6747e", "size": "281", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "payments/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "899" }, { "name": "JavaScript", "bytes": "2625" }, { "name": "Python", "bytes": "160376" } ], "symlink_target": "" }
def run(): global os, settings from regs_common.processing import find_views, update_view, find_attachment_views, update_attachment_view import os import settings run_for_view_type('document views', find_views, update_view) run_for_view_type('attachment views', find_attachment_views, update_attachment_view) def run_for_view_type(view_label, find_func, update_func): print 'Resetting %s.' % view_label views = find_func(downloaded='failed', query={'deleted': False}) for result in views: result['view'].downloaded = 'no' update_func(**result) print 'Done with %s.' % view_label if __name__ == "__main__": run()
{ "content_hash": "4f7e04e6e3174982d6c12371063292ad", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 109, "avg_line_length": 32.57142857142857, "alnum_prop": 0.6578947368421053, "repo_name": "sunlightlabs/regulations-scraper", "id": "c43fdb4efcbe5fc65aee75ae8b8c7c40809082e9", "size": "707", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "regscrape/regs_common/commands/reset_downloads.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "286465" }, { "name": "Ruby", "bytes": "150" }, { "name": "Shell", "bytes": "956" } ], "symlink_target": "" }
import os import shutil import unittest import docker from docker.utils import kwargs_from_env import six from .. import helpers BUSYBOX = 'busybox:buildroot-2014.02' TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION') class BaseIntegrationTest(unittest.TestCase): """ A base class for integration test cases. It cleans up the Docker server after itself. """ def setUp(self): if six.PY2: self.assertRegex = self.assertRegexpMatches self.assertCountEqual = self.assertItemsEqual self.tmp_imgs = [] self.tmp_containers = [] self.tmp_folders = [] self.tmp_volumes = [] self.tmp_networks = [] self.tmp_plugins = [] self.tmp_secrets = [] self.tmp_configs = [] def tearDown(self): client = docker.from_env(version=TEST_API_VERSION) for img in self.tmp_imgs: try: client.api.remove_image(img) except docker.errors.APIError: pass for container in self.tmp_containers: try: client.api.remove_container(container, force=True) except docker.errors.APIError: pass for network in self.tmp_networks: try: client.api.remove_network(network) except docker.errors.APIError: pass for volume in self.tmp_volumes: try: client.api.remove_volume(volume) except docker.errors.APIError: pass for secret in self.tmp_secrets: try: client.api.remove_secret(secret) except docker.errors.APIError: pass for config in self.tmp_configs: try: client.api.remove_config(config) except docker.errors.APIError: pass for folder in self.tmp_folders: shutil.rmtree(folder) class BaseAPIIntegrationTest(BaseIntegrationTest): """ A test case for `APIClient` integration tests. It sets up an `APIClient` as `self.client`. """ def setUp(self): super(BaseAPIIntegrationTest, self).setUp() self.client = self.get_client_instance() def tearDown(self): super(BaseAPIIntegrationTest, self).tearDown() self.client.close() @staticmethod def get_client_instance(): return docker.APIClient( version=TEST_API_VERSION, timeout=60, **kwargs_from_env() ) @staticmethod def _init_swarm(client, **kwargs): return client.init_swarm( '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs ) def run_container(self, *args, **kwargs): container = self.client.create_container(*args, **kwargs) self.tmp_containers.append(container) self.client.start(container) exitcode = self.client.wait(container) if exitcode != 0: output = self.client.logs(container) raise Exception( "Container exited with code {}:\n{}" .format(exitcode, output)) return container def create_and_start(self, image=BUSYBOX, command='top', **kwargs): container = self.client.create_container( image=image, command=command, **kwargs) self.tmp_containers.append(container) self.client.start(container) return container def execute(self, container, cmd, exit_code=0, **kwargs): exc = self.client.exec_create(container, cmd, **kwargs) output = self.client.exec_start(exc) actual_exit_code = self.client.exec_inspect(exc)['ExitCode'] msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format( " ".join(cmd), exit_code, actual_exit_code, output) assert actual_exit_code == exit_code, msg def init_swarm(self, **kwargs): return self._init_swarm(self.client, **kwargs)
{ "content_hash": "87dca4cf6eb9233e235e5911de78c8c6", "timestamp": "", "source": "github", "line_count": 129, "max_line_length": 79, "avg_line_length": 31.03875968992248, "alnum_prop": 0.5899100899100899, "repo_name": "vpetersson/docker-py", "id": "4f929014bdc69cabf9f804fa61d03935e15a8f08", "size": "4004", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/integration/base.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "3260" }, { "name": "Python", "bytes": "868992" }, { "name": "Shell", "bytes": "749" } ], "symlink_target": "" }
import time, sys import mote from moteCache import MoteCache from mote import Mote if __name__ == "__main__": """ cache = MoteCache() cache.read() # print "GetMotes:", cache.getMotes() allDevs = mote.detectAllPossible() selectedMoteId = None for d in allDevs: if d in cache.getMotes(): selectedMoteId = d break if selectedMoteId == None: print "No motes found. Device ids:", allDevs, "cacheMoteIds:", cache.getMotes().keys() sys.exit() # mote = cache.getMotes().values()[0] mote = cache.getMotes()[selectedMoteId] mote.connect() """ mote = Mote(id="00:19:1D:79:93:E0") mote.connect() mote.setLeds(1,0,0,0) mote.startReadThread() mote.irBasicModeOn() while(1): time.sleep(0.1) events = mote.extractButtonEvents() if len(events) != 0: print events mote.disconnect()
{ "content_hash": "98d2bdf12687e38e3ad32ecc729e3892", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 95, "avg_line_length": 21.767441860465116, "alnum_prop": 0.5844017094017094, "repo_name": "rpwagner/tiled-display", "id": "c31bb5ac906a70239c3f095b25c066176e4c5274", "size": "936", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "flWii/buttons.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
"""Convert a Keras model to frugally-deep format. """ import base64 import datetime import hashlib import json import sys import numpy as np from tensorflow.keras import backend as K from tensorflow.keras.layers import Input, Embedding from tensorflow.keras.models import Model, load_model __author__ = "Tobias Hermann" __copyright__ = "Copyright 2017, Tobias Hermann" __license__ = "MIT" __maintainer__ = "Tobias Hermann, https://github.com/Dobiasd/frugally-deep" __email__ = "editgym@gmail.com" STORE_FLOATS_HUMAN_READABLE = False def transform_input_kernel(kernel): """Transforms weights of a single CuDNN input kernel into the regular Keras format.""" return kernel.T.reshape(kernel.shape, order='F') def transform_recurrent_kernel(kernel): """Transforms weights of a single CuDNN recurrent kernel into the regular Keras format.""" return kernel.T def transform_kernels(kernels, n_gates, transform_func): """ Transforms CuDNN kernel matrices (either LSTM or GRU) into the regular Keras format. Parameters ---------- kernels : numpy.ndarray Composite matrix of input or recurrent kernels. n_gates : int Number of recurrent unit gates, 3 for GRU, 4 for LSTM. transform_func: function(numpy.ndarray) Function to apply to each input or recurrent kernel. Returns ------- numpy.ndarray Transformed composite matrix of input or recurrent kernels in C-contiguous layout. """ return np.require(np.hstack([transform_func(kernel) for kernel in np.hsplit(kernels, n_gates)]), requirements='C') def transform_bias(bias): """Transforms bias weights of an LSTM layer into the regular Keras format.""" return np.sum(np.split(bias, 2, axis=0), axis=0) def write_text_file(path, text): """Write a string to a file""" with open(path, "w") as text_file: print(text, file=text_file) def int_or_none(value): """Leave None values as is, convert everything else to int""" if value is None: return value return int(value) def keras_shape_to_fdeep_tensor_shape(raw_shape): """Convert a keras shape to an fdeep shape""" return singleton_list_to_value(raw_shape)[1:] def get_layer_input_shape_tensor_shape(layer): """Convert layer input shape to an fdeep shape""" return keras_shape_to_fdeep_tensor_shape(layer.input_shape) def show_tensor(tens): """Serialize 3-tensor to a dict""" return { 'shape': tens.shape[1:], 'values': encode_floats(tens.flatten()) } def get_model_input_layers(model): """Works for different Keras version.""" if hasattr(model, '_input_layers'): return model._input_layers if hasattr(model, 'input_layers'): return model.input_layers raise ValueError('can not get (_)input_layers from model') def measure_predict(model, data_in): """Returns output and duration in seconds""" start_time = datetime.datetime.now() data_out = model.predict(data_in) end_time = datetime.datetime.now() duration = end_time - start_time print('Forward pass took {} s.'.format(duration.total_seconds())) return data_out, duration.total_seconds() def replace_none_with(value, shape): """Replace every None with a fixed value.""" return tuple(list(map(lambda x: x if x is not None else value, shape))) def are_embedding_layer_positions_ok_for_testing(model): """ Test data can only be generated if all embeddings layers are positioned directly behind the input nodes """ def embedding_layer_names(model): layers = model.layers result = set() for layer in layers: if isinstance(layer, Embedding): result.add(layer.name) layer_type = type(layer).__name__ if layer_type in ['Model', 'Sequential', 'Functional']: result.union(embedding_layer_names(layer)) return result def embedding_layer_names_at_input_nodes(model): result = set() for input_layer in get_model_input_layers(model): if input_layer._outbound_nodes and isinstance( input_layer._outbound_nodes[0].outbound_layer, Embedding): result.add(input_layer._outbound_nodes[0].outbound_layer.name) return set(result) return embedding_layer_names(model) == embedding_layer_names_at_input_nodes(model) def gen_test_data(model): """Generate data for model verification test.""" def set_shape_idx_0_to_1_if_none(shape): """Change first element in tuple to 1.""" if shape[0] is not None: return shape shape_lst = list(shape) shape_lst[0] = 1 shape = tuple(shape_lst) return shape def generate_input_data(input_layer): """Random data fitting the input shape of a layer.""" if input_layer._outbound_nodes and isinstance( input_layer._outbound_nodes[0].outbound_layer, Embedding): random_fn = lambda size: np.random.randint( 0, input_layer._outbound_nodes[0].outbound_layer.input_dim, size) else: random_fn = np.random.normal try: shape = input_layer.batch_input_shape except AttributeError: shape = input_layer.input_shape return random_fn( size=replace_none_with(32, set_shape_idx_0_to_1_if_none(singleton_list_to_value(shape)))).astype(np.float32) assert are_embedding_layer_positions_ok_for_testing( model), "Test data can only be generated if embedding layers are positioned directly after input nodes." data_in = list(map(generate_input_data, get_model_input_layers(model))) warm_up_runs = 3 test_runs = 5 for i in range(warm_up_runs): if i == 0: # store the results of first call for the test # this is because states of recurrent layers is 0. # cannot call model.reset_states() in some cases in keras without an error. # an error occurs when recurrent layer is stateful and the initial state is passed as input data_out_test, duration = measure_predict(model, data_in) else: measure_predict(model, data_in) duration_sum = 0 print('Starting performance measurements.') for _ in range(test_runs): data_out, duration = measure_predict(model, data_in) duration_sum = duration_sum + duration duration_avg = duration_sum / test_runs print('Forward pass took {} s on average.'.format(duration_avg)) return { 'inputs': list(map(show_tensor, as_list(data_in))), 'outputs': list(map(show_tensor, as_list(data_out_test))) } def split_every(size, seq): """Split a sequence every seq elements.""" return (seq[pos:pos + size] for pos in range(0, len(seq), size)) def encode_floats(arr): """Serialize a sequence of floats.""" if STORE_FLOATS_HUMAN_READABLE: return arr.flatten().tolist() return list(split_every(1024, base64.b64encode(arr).decode('ascii'))) def prepare_filter_weights_conv_2d(weights): """Change dimension order of 2d filter weights to the one used in fdeep""" assert len(weights.shape) == 4 return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 3, 0]).flatten() def prepare_filter_weights_slice_conv_2d(weights): """Change dimension order of 2d filter weights to the one used in fdeep""" assert len(weights.shape) == 4 return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 0, 3]).flatten() def prepare_filter_weights_conv_1d(weights): """Change dimension order of 1d filter weights to the one used in fdeep""" assert len(weights.shape) == 3 return np.moveaxis(weights, [0, 1, 2], [1, 2, 0]).flatten() def show_conv_1d_layer(layer): """Serialize Conv1D layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 3 weights_flat = prepare_filter_weights_conv_1d(weights[0]) assert layer.padding in ['valid', 'same', 'causal'] assert len(layer.input_shape) == 3 assert layer.input_shape[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_conv_2d_layer(layer): """Serialize Conv2D layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 4 weights_flat = prepare_filter_weights_conv_2d(weights[0]) assert layer.padding in ['valid', 'same'] assert len(layer.input_shape) == 4 assert layer.input_shape[0] in {None, 1} result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_separable_conv_2d_layer(layer): """Serialize SeparableConv2D layer to dict""" weights = layer.get_weights() assert layer.depth_multiplier == 1 assert len(weights) == 2 or len(weights) == 3 assert len(weights[0].shape) == 4 assert len(weights[1].shape) == 4 # probably incorrect for depth_multiplier > 1? slice_weights = prepare_filter_weights_slice_conv_2d(weights[0]) stack_weights = prepare_filter_weights_conv_2d(weights[1]) assert layer.padding in ['valid', 'same'] assert len(layer.input_shape) == 4 assert layer.input_shape[0] in {None, 1} result = { 'slice_weights': encode_floats(slice_weights), 'stack_weights': encode_floats(stack_weights), } if len(weights) == 3: bias = weights[2] result['bias'] = encode_floats(bias) return result def show_depthwise_conv_2d_layer(layer): """Serialize DepthwiseConv2D layer to dict""" weights = layer.get_weights() assert layer.depth_multiplier == 1 assert len(weights) in [1, 2] assert len(weights[0].shape) == 4 # probably incorrect for depth_multiplier > 1? slice_weights = prepare_filter_weights_slice_conv_2d(weights[0]) assert layer.padding in ['valid', 'same'] assert len(layer.input_shape) == 4 assert layer.input_shape[0] in {None, 1} result = { 'slice_weights': encode_floats(slice_weights), } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_batch_normalization_layer(layer): """Serialize batch normalization layer to dict""" moving_mean = K.get_value(layer.moving_mean) moving_variance = K.get_value(layer.moving_variance) result = {} result['moving_mean'] = encode_floats(moving_mean) result['moving_variance'] = encode_floats(moving_variance) if layer.center: beta = K.get_value(layer.beta) result['beta'] = encode_floats(beta) if layer.scale: gamma = K.get_value(layer.gamma) result['gamma'] = encode_floats(gamma) return result def show_dense_layer(layer): """Serialize dense layer to dict""" weights = layer.get_weights() assert len(weights) == 1 or len(weights) == 2 assert len(weights[0].shape) == 2 weights_flat = weights[0].flatten() result = { 'weights': encode_floats(weights_flat) } if len(weights) == 2: bias = weights[1] result['bias'] = encode_floats(bias) return result def show_prelu_layer(layer): """Serialize prelu layer to dict""" weights = layer.get_weights() assert len(weights) == 1 weights_flat = weights[0].flatten() result = { 'alpha': encode_floats(weights_flat) } return result def show_relu_layer(layer): """Serialize relu layer to dict""" assert layer.negative_slope == 0 assert layer.threshold == 0 return {} def show_embedding_layer(layer): """Serialize Embedding layer to dict""" weights = layer.get_weights() assert len(weights) == 1 result = { 'weights': encode_floats(weights[0]) } return result def show_lstm_layer(layer): """Serialize LSTM layer to dict""" assert not layer.go_backwards assert not layer.unroll weights = layer.get_weights() if isinstance(layer.input, list): assert len(layer.input) in [1, 3] assert len(weights) == 2 or len(weights) == 3 result = {'weights': encode_floats(weights[0]), 'recurrent_weights': encode_floats(weights[1])} if len(weights) == 3: result['bias'] = encode_floats(weights[2]) return result def show_gru_layer(layer): """Serialize GRU layer to dict""" assert not layer.go_backwards assert not layer.unroll assert not layer.return_state weights = layer.get_weights() assert len(weights) == 2 or len(weights) == 3 result = {'weights': encode_floats(weights[0]), 'recurrent_weights': encode_floats(weights[1])} if len(weights) == 3: result['bias'] = encode_floats(weights[2]) return result def transform_cudnn_weights(input_weights, recurrent_weights, n_gates): return transform_kernels(input_weights, n_gates, transform_input_kernel), \ transform_kernels(recurrent_weights, n_gates, transform_recurrent_kernel) def show_cudnn_lstm_layer(layer): """Serialize a GPU-trained LSTM layer to dict""" weights = layer.get_weights() if isinstance(layer.input, list): assert len(layer.input) in [1, 3] assert len(weights) == 3 # CuDNN LSTM always has a bias n_gates = 4 input_weights, recurrent_weights = transform_cudnn_weights(weights[0], weights[1], n_gates) result = {'weights': encode_floats(input_weights), 'recurrent_weights': encode_floats(recurrent_weights), 'bias': encode_floats(transform_bias(weights[2]))} return result def show_cudnn_gru_layer(layer): """Serialize a GPU-trained GRU layer to dict""" weights = layer.get_weights() assert len(weights) == 3 # CuDNN GRU always has a bias n_gates = 3 input_weights, recurrent_weights = transform_cudnn_weights(weights[0], weights[1], n_gates) result = {'weights': encode_floats(input_weights), 'recurrent_weights': encode_floats(recurrent_weights), 'bias': encode_floats(weights[2])} return result def get_transform_func(layer): """Returns functions that can be applied to layer weights to transform them into the standard Keras format, if applicable.""" if layer.__class__.__name__ in ['CuDNNGRU', 'CuDNNLSTM']: if layer.__class__.__name__ == 'CuDNNGRU': n_gates = 3 elif layer.__class__.__name__ == 'CuDNNLSTM': n_gates = 4 input_transform_func = lambda kernels: transform_kernels(kernels, n_gates, transform_input_kernel) recurrent_transform_func = lambda kernels: transform_kernels(kernels, n_gates, transform_recurrent_kernel) else: input_transform_func = lambda kernels: kernels recurrent_transform_func = lambda kernels: kernels if layer.__class__.__name__ == 'CuDNNLSTM': bias_transform_func = transform_bias else: bias_transform_func = lambda bias: bias return input_transform_func, recurrent_transform_func, bias_transform_func def show_bidirectional_layer(layer): """Serialize Bidirectional layer to dict""" forward_weights = layer.forward_layer.get_weights() assert len(forward_weights) == 2 or len(forward_weights) == 3 forward_input_transform_func, forward_recurrent_transform_func, forward_bias_transform_func = get_transform_func( layer.forward_layer) backward_weights = layer.backward_layer.get_weights() assert len(backward_weights) == 2 or len(backward_weights) == 3 backward_input_transform_func, backward_recurrent_transform_func, backward_bias_transform_func = get_transform_func( layer.backward_layer) result = {'forward_weights': encode_floats(forward_input_transform_func(forward_weights[0])), 'forward_recurrent_weights': encode_floats(forward_recurrent_transform_func(forward_weights[1])), 'backward_weights': encode_floats(backward_input_transform_func(backward_weights[0])), 'backward_recurrent_weights': encode_floats(backward_recurrent_transform_func(backward_weights[1]))} if len(forward_weights) == 3: result['forward_bias'] = encode_floats(forward_bias_transform_func(forward_weights[2])) if len(backward_weights) == 3: result['backward_bias'] = encode_floats(backward_bias_transform_func(backward_weights[2])) return result def show_input_layer(layer): """Serialize input layer to dict""" assert not layer.sparse return {} def show_softmax_layer(layer): """Serialize softmax layer to dict""" assert layer.axis == -1 def show_reshape_layer(layer): """Serialize reshape layer to dict""" for dim_size in layer.target_shape: assert dim_size != -1, 'Reshape inference not supported' def get_layer_functions_dict(): return { 'Conv1D': show_conv_1d_layer, 'Conv2D': show_conv_2d_layer, 'SeparableConv2D': show_separable_conv_2d_layer, 'DepthwiseConv2D': show_depthwise_conv_2d_layer, 'BatchNormalization': show_batch_normalization_layer, 'Dense': show_dense_layer, 'PReLU': show_prelu_layer, 'ReLU': show_relu_layer, 'Embedding': show_embedding_layer, 'LSTM': show_lstm_layer, 'GRU': show_gru_layer, 'CuDNNLSTM': show_cudnn_lstm_layer, 'CuDNNGRU': show_cudnn_gru_layer, 'Bidirectional': show_bidirectional_layer, 'TimeDistributed': show_time_distributed_layer, 'Input': show_input_layer, 'Softmax': show_softmax_layer } def show_time_distributed_layer(layer): show_layer_functions = get_layer_functions_dict() config = layer.get_config() class_name = config['layer']['class_name'] if class_name in show_layer_functions: if len(layer.input_shape) == 3: input_shape_new = (layer.input_shape[0], layer.input_shape[2]) elif len(layer.input_shape) == 4: input_shape_new = (layer.input_shape[0], layer.input_shape[2], layer.input_shape[3]) elif len(layer.input_shape) == 5: input_shape_new = (layer.input_shape[0], layer.input_shape[2], layer.input_shape[3], layer.input_shape[4]) elif len(layer.input_shape) == 6: input_shape_new = (layer.input_shape[0], layer.input_shape[2], layer.input_shape[3], layer.input_shape[4], layer.input_shape[5]) else: raise Exception('Wrong input shape') layer_function = show_layer_functions[class_name] attributes = dir(layer.layer) class CopiedLayer: pass copied_layer = CopiedLayer() for attr in attributes: try: if attr not in ['input_shape', '__class__']: setattr(copied_layer, attr, getattr(layer.layer, attr)) elif attr == 'input_shape': setattr(copied_layer, 'input_shape', input_shape_new) except Exception: continue setattr(copied_layer, "output_shape", getattr(layer, "output_shape")) return layer_function(copied_layer) else: return None def get_dict_keys(d): """Return keys of a dictionary""" return [key for key in d] def merge_two_disjunct_dicts(x, y): """Given two dicts, merge them into a new dict as a shallow copy. No Key is allowed to be present in both dictionaries. """ assert set(get_dict_keys(x)).isdisjoint(get_dict_keys(y)) z = x.copy() z.update(y) return z def is_ascii(some_string): """Check if a string only contains ascii characters""" try: some_string.encode('ascii') except UnicodeEncodeError: return False else: return True def get_all_weights(model, prefix): """Serialize all weights of the models layers""" show_layer_functions = get_layer_functions_dict() result = {} layers = model.layers assert K.image_data_format() == 'channels_last' for layer in layers: layer_type = type(layer).__name__ name = prefix + layer.name assert is_ascii(name) if name in result: raise ValueError('duplicate layer name ' + name) if layer_type in ['Model', 'Sequential', 'Functional']: result = merge_two_disjunct_dicts(result, get_all_weights(layer, name + '_')) else: if hasattr(layer, 'data_format'): if layer_type in ['AveragePooling1D', 'MaxPooling1D', 'AveragePooling2D', 'MaxPooling2D', 'GlobalAveragePooling1D', 'GlobalMaxPooling1D', 'GlobalAveragePooling2D', 'GlobalMaxPooling2D']: assert layer.data_format == 'channels_last' or layer.data_format == 'channels_first' else: assert layer.data_format == 'channels_last' show_func = show_layer_functions.get(layer_type, None) shown_layer = None if show_func: shown_layer = show_func(layer) if shown_layer: result[name] = shown_layer if show_func and layer_type == 'TimeDistributed': if name not in result: result[name] = {} result[name]['td_input_len'] = encode_floats(np.array([len(layer.input_shape) - 1], dtype=np.float32)) result[name]['td_output_len'] = encode_floats(np.array([len(layer.output_shape) - 1], dtype=np.float32)) return result def get_model_name(model): """Return .name or ._name or 'dummy_model_name'""" if hasattr(model, 'name'): return model.name if hasattr(model, '_name'): return model._name return 'dummy_model_name' def convert_sequential_to_model(model): """Convert a sequential model to the underlying functional format""" if type(model).__name__ == 'Sequential': name = get_model_name(model) if hasattr(model, '_inbound_nodes'): inbound_nodes = model._inbound_nodes elif hasattr(model, 'inbound_nodes'): inbound_nodes = model.inbound_nodes else: raise ValueError('can not get (_)inbound_nodes from model') input_layer = Input(batch_shape=model.layers[0].input_shape) prev_layer = input_layer for layer in model.layers: layer._inbound_nodes = [] prev_layer = layer(prev_layer) funcmodel = Model([input_layer], [prev_layer], name=name) model = funcmodel if hasattr(model, '_inbound_nodes'): model._inbound_nodes = inbound_nodes elif hasattr(model, 'inbound_nodes'): model.inbound_nodes = inbound_nodes assert model.layers for i in range(len(model.layers)): layer_type = type(model.layers[i]).__name__ if layer_type in ['Model', 'Sequential', 'Functional']: # "model.layers[i] = ..." would not overwrite the layer. model._layers[i] = convert_sequential_to_model(model.layers[i]) return model def offset_conv2d_eval(depth, padding, x): """Perform a conv2d on x with a given padding""" kernel = K.variable(value=np.array([[[[1]] + [[0]] * (depth - 1)]]), dtype='float32') return K.conv2d(x, kernel, strides=(3, 3), padding=padding) def offset_sep_conv2d_eval(depth, padding, x): """Perform a separable conv2d on x with a given padding""" depthwise_kernel = K.variable(value=np.array([[[[1]] * depth]]), dtype='float32') pointwise_kernel = K.variable(value=np.array([[[[1]] + [[0]] * (depth - 1)]]), dtype='float32') return K.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(3, 3), padding=padding) def conv2d_offset_max_pool_eval(_, padding, x): """Perform a max pooling operation on x""" return K.pool2d(x, (1, 1), strides=(3, 3), padding=padding, pool_mode='max') def conv2d_offset_average_pool_eval(_, padding, x): """Perform an average pooling operation on x""" return K.pool2d(x, (1, 1), strides=(3, 3), padding=padding, pool_mode='avg') def check_operation_offset(depth, eval_f, padding): """Check if backend used an offset while placing the filter e.g. during a convolution. TensorFlow is inconsistent in doing so depending on the type of operation, the used device (CPU/GPU) and the input depth. """ in_arr = np.array([[[[i] * depth for i in range(6)]]]) input_data = K.variable(value=in_arr, dtype='float32') output = eval_f(depth, padding, input_data) result = K.eval(output).flatten().tolist() assert result in [[0, 3], [1, 4]] return result == [1, 4] def get_shapes(tensors): """Return shapes of a list of tensors""" return [t['shape'] for t in tensors] def calculate_hash(model): layers = model.layers hash_m = hashlib.sha256() for layer in layers: for weights in layer.get_weights(): assert isinstance(weights, np.ndarray) hash_m.update(weights.tobytes()) hash_m.update(layer.name.encode('ascii')) return hash_m.hexdigest() def as_list(value_or_values): """Leave lists untouched, convert non-list types to a singleton list""" if isinstance(value_or_values, list): return value_or_values return [value_or_values] def singleton_list_to_value(value_or_values): """ Leaves non-list values untouched. Raises an Exception in case the input list does not have exactly one element. """ if isinstance(value_or_values, list): assert len(value_or_values) == 1 return value_or_values[0] return value_or_values def model_to_fdeep_json(model, no_tests=False): """Convert any Keras model to the frugally-deep model format.""" # Force creation of underlying functional model. # see: https://github.com/fchollet/keras/issues/8136 # Loss and optimizer type do not matter, since we do not train the model. model.compile(loss='mse', optimizer='sgd') model = convert_sequential_to_model(model) test_data = None if no_tests else gen_test_data(model) json_output = {} print('Converting model architecture.') json_output['architecture'] = json.loads(model.to_json()) json_output['image_data_format'] = K.image_data_format() json_output['input_shapes'] = list(map(get_layer_input_shape_tensor_shape, get_model_input_layers(model))) json_output['output_shapes'] = list(map(keras_shape_to_fdeep_tensor_shape, as_list(model.output_shape))) if test_data: json_output['tests'] = [test_data] print('Converting model weights.') json_output['trainable_params'] = get_all_weights(model, '') print('Done converting model weights.') print('Calculating model hash.') json_output['hash'] = calculate_hash(model) print('Model conversion finished.') return json_output def convert(in_path, out_path, no_tests=False): """Convert any (h5-)stored Keras model to the frugally-deep model format.""" print('loading {}'.format(in_path)) model = load_model(in_path) json_output = model_to_fdeep_json(model, no_tests) print('writing {}'.format(out_path)) write_text_file(out_path, json.dumps( json_output, allow_nan=False, indent=2, sort_keys=True)) def main(): """Parse command line and convert model.""" usage = 'usage: [Keras model in HDF5 format] [output path] (--no-tests)' # todo: Use ArgumentParser instead. if len(sys.argv) not in [3, 4]: print(usage) sys.exit(1) in_path = sys.argv[1] out_path = sys.argv[2] no_tests = False if len(sys.argv) == 4: if sys.argv[3] not in ['--no-tests']: print(usage) sys.exit(1) if sys.argv[3] == '--no-tests': no_tests = True convert(in_path, out_path, no_tests) if __name__ == "__main__": main()
{ "content_hash": "3e205fb6d9495770da9719f0004345dc", "timestamp": "", "source": "github", "line_count": 815, "max_line_length": 129, "avg_line_length": 34.68466257668712, "alnum_prop": 0.6338616103014009, "repo_name": "BerlinUnited/NaoTH", "id": "b18286f7bf0ce05bc113f9683079de7a91b3b0ed", "size": "28291", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "Utils/py/BallDetection/RegressionNetwork/frugally_exporter/convert_model.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "319" }, { "name": "C", "bytes": "16295" }, { "name": "C++", "bytes": "3831321" }, { "name": "CSS", "bytes": "8839" }, { "name": "HTML", "bytes": "21148" }, { "name": "Java", "bytes": "1816793" }, { "name": "Jupyter Notebook", "bytes": "8092" }, { "name": "Lua", "bytes": "73794" }, { "name": "MATLAB", "bytes": "141780" }, { "name": "Python", "bytes": "1337382" }, { "name": "Shell", "bytes": "60599" } ], "symlink_target": "" }
import logging import time from collections import namedtuple from typing import List from apscheduler.schedulers.blocking import BlockingScheduler from player import play_jazz_radio from player import play_rock_radio from player import play_weather_audio_from_yle from weather_api import fmi_forecast # FMI forecast only works in north-east Europe logger = logging.getLogger() logging.basicConfig(level=logging.DEBUG) Alarm = namedtuple("Alarm", "id original_time wake_up_time") def calculate_wake_time(original_time, weather): return original_time def alarm_events(weather) -> List[Alarm]: alarms = list() for event in list(): alarms.append(Alarm( id=1337, original_time=time.localtime(), wake_up_time=calculate_wake_time(time.localtime(), weather) )) logger.warning("alarm_events function not yet built") return alarms def weather_forecast(time_stamp): return fmi_forecast(time_stamp=time_stamp, location="Helsinki") def sound_alarm(alarm_type="jazz"): logger.info("Start playing music") if alarm_type == "jazz": play_jazz_radio() elif alarm_type == "rock": play_rock_radio() elif alarm_type == "weather": play_weather_audio_from_yle() play_jazz_radio() else: play_jazz_radio() def alarm(old_events): now = time.localtime() logger.debug("running alarm check at {}".format(now)) weather = weather_forecast(time_stamp=now) for event in alarm_events(weather): if event.wake_up_time >= now and event.id not in old_events: if weather.type == "interesting" or True: sound_alarm(alarm_type="weather") else: sound_alarm() old_events.append(event.id) def user_actions(): return input("Please enter your age: ") def main(): old_events = list() scheduler = BlockingScheduler() scheduler.add_job(alarm, 'interval', seconds=60, kwargs={"old_events": old_events}) scheduler.start() time.sleep(1) # makes sure that the user actions are printed after initial prints while True: user_actions() exit(1) if __name__ == "__main__": main()
{ "content_hash": "5ee245e37a1217a77029413d34bba037", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 87, "avg_line_length": 25.586206896551722, "alnum_prop": 0.6545372866127583, "repo_name": "JuhaniTakkunen/adaptive-alarm", "id": "91d2292faecf04815296c667d50fff6249390da9", "size": "2226", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "alarm.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "6718" } ], "symlink_target": "" }
from django.contrib import admin from readbacks.apps.reader.models import Grade, Unit, Reading, Paragraph admin.site.register(Grade) admin.site.register(Unit) admin.site.register(Reading) admin.site.register(Paragraph)
{ "content_hash": "e6e96e9daf2ab85015afa3f55dbd0b72", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 72, "avg_line_length": 24.666666666666668, "alnum_prop": 0.8153153153153153, "repo_name": "argybarg/readbacks", "id": "80ef4d01fe07a8c484e71bdf10b701cb48b5016d", "size": "222", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "readbacks/apps/reader/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "27861" }, { "name": "Python", "bytes": "15383" } ], "symlink_target": "" }
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0);
{ "content_hash": "ae1743752d8922ac78b97bf40b532420", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 166, "avg_line_length": 38, "alnum_prop": 0.706766917293233, "repo_name": "antoinecarme/pyaf", "id": "460774ea4d5e355abb3f066663d60945cc3c8897", "size": "266", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_5/ar_/test_artificial_32_Anscombe_MovingMedian_5__100.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "6773299" }, { "name": "Procfile", "bytes": "24" }, { "name": "Python", "bytes": "54209093" }, { "name": "R", "bytes": "807" }, { "name": "Shell", "bytes": "3619" } ], "symlink_target": "" }
import codecs from setuptools import setup, find_packages from geotagger import __version__ def long_description(): with codecs.open('README.rst', encoding='utf8') as f: return f.read() def install_requires(): with codecs.open('requirements.txt', encoding='utf8') as f: reqs = f.read() return reqs.strip().split() setup( name='geotagger', version=__version__, description='Geotag photos with exiftool based on your Moves app history', long_description=long_description(), url='https://github.com/jakubroztocil/geotagger', download_url='https://github.com/jakubroztocil/geotagger', author='Jakub Roztocil', author_email='jakub@roztocil.co', license='MIT', include_package_data=True, packages=find_packages(), entry_points={ 'console_scripts': [ 'geotagger = geotagger.__main__:main', ], }, install_requires=install_requires(), classifiers=[], )
{ "content_hash": "6cf9bfa738bd7063f32611380ac0efa1", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 78, "avg_line_length": 25.473684210526315, "alnum_prop": 0.6518595041322314, "repo_name": "jkbrzt/geotagger", "id": "d86337ef8c239fa3d73a8043f3d761e2d09aac85", "size": "968", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24655" } ], "symlink_target": "" }
from flup.server.fcgi import WSGIServer from frontend import app WSGIServer(app, bindAddress=app.config['FCGI_SOCKET']).run()
{ "content_hash": "c1e3eeaad032211d7420416538f2d8e6", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 60, "avg_line_length": 31.75, "alnum_prop": 0.7952755905511811, "repo_name": "jkossen/imposter", "id": "68c99ba0399bd761dfdc586e459dc00bc10e05a0", "size": "149", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/frontend_fcgi.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "JavaScript", "bytes": "14966" }, { "name": "Python", "bytes": "59910" } ], "symlink_target": "" }
""" Provide functions for the trigonometric functions. """ from __future__ import absolute_import, division, print_function import math def aspect_ratio(width, height): return float(width) / float(height) def calculate_fov(zoom, height=1.0): """Calculates the required FOV to set the view frustrum to have a view with the specified height at the specified distance. :param float zoom: The distance to calculate the FOV for. :param float height: The desired view height at the specified distance. The default is 1.0. :rtype: A float representing the FOV to use in degrees. """ # http://www.glprogramming.com/red/chapter03.html rad_theta = 2.0 * math.atan2(height / 2.0, zoom) return math.degrees(rad_theta) def calculate_zoom(fov, height=1.0): """Calculates the zoom (distance) from the camera with the specified FOV and height of image. :param float fov: The FOV to use. :param float height: The height of the image at the desired distance. :rtype: A float representing the zoom (distance) from the camera for the desired height at the specified FOV. :raise ZeroDivisionError: Raised if the fov is 0.0. """ return float(height) / math.tan(fov / 2.0) def calculate_height(fov, zoom): """Performs the opposite of calculate_fov. Used to find the current height at a specific distance. :param float fov: The current FOV. :param float zoom: The distance to calculate the height for. :rtype: A float representing the height at the specified distance for the specified FOV. """ height = zoom * (math.tan(fov / 2.0)) return height def calculate_plane_size(aspect_ratio, fov, distance): """Calculates the width and height of a plane at the specified distance using the FOV of the frustrum and aspect ratio of the viewport. :param float aspect_ratio: The aspect ratio of the viewport. :param float fov: The FOV of the frustrum. :param float distance: The distance from the origin/camera of the plane to calculate. :rtype: A tuple of two floats: width and height: The width and height of the plane. """ # http://www.songho.ca/opengl/gl_transform.html # http://nehe.gamedev.net/article/replacement_for_gluperspective/21002/ # http://steinsoft.net/index.php?site=Programming/Code%20Snippets/OpenGL/gluperspective&printable=1 tangent = math.radians(fov) height = distance * tangent width = height * aspect_ratio return width * 2.0, height * 2.0
{ "content_hash": "520c97f8325fb16d2be4e3e9501cca21", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 103, "avg_line_length": 34.026315789473685, "alnum_prop": 0.688708430007734, "repo_name": "PhloxAR/math3", "id": "646d7e19974845fa9d3e3f130e2874756cb1a457", "size": "2610", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "math3/funcs/trig.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "293019" }, { "name": "Shell", "bytes": "1377" } ], "symlink_target": "" }
import inspect import imp import os import sys import types import time import importlib import pico _mtimes = {} def module_dict(module): module_dict = {} pico_exports = getattr(module, 'pico_exports', None) members = inspect.getmembers(module) def function_filter(x): (name, f) = x return ((inspect.isfunction(f) or inspect.ismethod(f)) and (pico_exports is None or name in pico_exports) and f.__module__ == module.__name__ and not name.startswith('_') and not hasattr(f, 'private')) def class_filter(x): (name, c) = x return (inspect.isclass(c) and (issubclass(c, pico.Pico) or issubclass(c, pico.object)) and (pico_exports is None or name in pico_exports) and c.__module__ == module.__name__) class_defs = map(class_dict, filter(class_filter, members)) function_defs = map(func_dict, filter(function_filter, members)) module_dict['classes'] = class_defs module_dict['functions'] = function_defs module_dict['__doc__'] = module.__doc__ module_dict['__headers__'] = getattr(module, '__headers__', {}) return module_dict def class_dict(x): name, cls = x def method_filter(x): (name, f) = x return ((inspect.isfunction(f) or inspect.ismethod(f)) and (not name.startswith('_') or name == '__init__') and not hasattr(f, 'private')) class_dict = {'__class__': cls.__name__} class_dict['name'] = name methods = filter(method_filter, inspect.getmembers(cls)) class_dict['__init__'] = func_dict(methods.pop(0)) class_dict['functions'] = map(func_dict, methods) class_dict['__doc__'] = cls.__doc__ class_dict['__headers__'] = getattr(cls, '__headers__', {}) return class_dict def func_dict(x): name, f = x func_dict = {} func_dict['name'] = name func_dict['cache'] = ((hasattr(f, 'cacheable') and f.cacheable)) func_dict['stream'] = ((hasattr(f, 'stream') and f.stream)) a = inspect.getargspec(f) arg_list_r = reversed(a.args) defaults_list_r = reversed(a.defaults or [None]) args = reversed(map(None, arg_list_r, defaults_list_r)) args = filter(lambda x: x[0] and x[0] != 'self', args) func_dict['args'] = args func_dict['doc'] = f.__doc__ return func_dict def load(module_name, RELOAD=False): if module_name == 'pico': return sys.modules['pico'] if module_name == 'pico.modules': if module_name in sys.modules: return sys.modules[module_name] else: return sys.modules[__name__] modules_path = './' if not sys.path.__contains__(modules_path): sys.path.insert(0, modules_path) m = importlib.import_module(module_name) if RELOAD: mtime = os.stat(m.__file__.replace('.pyc', '.py')).st_mtime if _mtimes.get(module_name, mtime) < mtime: if module_name in sys.modules: del sys.modules[module_name] m = importlib.import_module(module_name) m = reload(m) print("Reloaded module %s, changed at %s" % (module_name, time.ctime(mtime))) _mtimes[module_name] = mtime if not (hasattr(m, 'pico') and m.pico == pico): raise ImportError('This module has not imported pico!') return m def module_proxy(cls): module_name = cls.__module__ module = imp.new_module(module_name) module.pico = pico def method_filter(x): (name, f) = x return ((inspect.isfunction(f) or inspect.ismethod(f)) and (not name.startswith('_') or name == '__init__') and not hasattr(f, 'private')) methods = filter(method_filter, inspect.getmembers(cls)) for (name, f) in methods: setattr(module, name, f) return module json_dumpers = { types.ModuleType: module_dict }
{ "content_hash": "24dc6439785c0fae08ea10ced7f4bb64", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 76, "avg_line_length": 33.28333333333333, "alnum_prop": 0.5743615423134703, "repo_name": "AdiPersonalWorks/ATOS_GOM_SystemPrototyping", "id": "960ed034ed228684d3db5a37cdcab89f64b9614d", "size": "3994", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pico/modules.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "95610" }, { "name": "HTML", "bytes": "377950" }, { "name": "JavaScript", "bytes": "51334" }, { "name": "PHP", "bytes": "18505" }, { "name": "Python", "bytes": "58951" }, { "name": "Ruby", "bytes": "752" } ], "symlink_target": "" }
import os import stat import re __version__ = '0.2.0' __author__ = 'Yurii Zhytskyi' class MSOfficeInstaller: def __init__(self, file_name): self.f_name = file_name self.desktop_files = dict() self.programs = dict() self.wine_prefixes = dict() self.programs_path = dict() self.share_folder = '%s/.local/share' % os.environ['HOME'] def set_exec_permission(self): file_permissions = self.get_file_permissions() if not os.access(self.f_name, os.X_OK): os.chmod(self.f_name, file_permissions + stat.S_IXUSR) self.get_file_permissions() def get_file_permissions(self): f_permissions = stat.S_IMODE(os.stat(self.f_name).st_mode) print('file {name} current permissions is {permissions}'.format(name=self.f_name, permissions=oct(f_permissions))) return f_permissions def run_installer(self): import sys cmd = ['./%s' % self.f_name] + sys.argv[1:] os.system(' '.join(cmd)) def fix_application_list(self): curr_path = os.getcwd() os.chdir('%s/applications' % self.share_folder) self._parse_desktop_file() self._get_desktop_file_for_each_program() self._get_progid_for_each_wineprefix() self._find_programs_path_by_progid() self._create_new_desktop_files() self._remove_old_desktop_files() self._update_desktop_and_mime_databases() os.chdir(curr_path) def _update_desktop_and_mime_databases(self): os.system('update-desktop-database %s/applications' % self.share_folder) os.system('update-mime-database %s/mime' % self.share_folder) def _remove_old_desktop_files(self): for old_desktop_file in self.desktop_files: os.remove(old_desktop_file) def _create_new_desktop_files(self): for prog in self.programs: with open('%s.desktop' % prog, 'w') as new_desktop_file: new_desktop_file.write('[Desktop Entry]\n') curr_prog = self.desktop_files[self.programs[prog][0]] key_names = ('Type', 'Name', 'Icon', 'NoDisplay', 'StartupNotify') for key in key_names: if key in curr_prog: new_desktop_file.write('%s=%s\n' % (key, curr_prog[key])) new_desktop_file.write( '%s=%s\n' % ('Exec', curr_prog['Exec']['Command'].format( path=self.programs_path[curr_prog['Exec']['ProgIDOpen']])) ) new_desktop_file.write( '%s=%s\n' % ('MimeType', ''.join([self.desktop_files[p]['MimeType'] for p in self.programs[prog]]))) def _find_programs_path_by_progid(self): for prefix in self.wine_prefixes: with open('%s/system.reg' % prefix) as reg: reg_file = reg.read() for id_ in self.wine_prefixes[prefix]: founded_path = re.search(r'\[Software\\\\Classes\\\\' + '\\.'.join(id_.split('.')) + r'\\\\shell\\\\Open\\\\command\]' + '((.+\n)+)@=\"(\\\\\")?([^\"]+)\\\\?\"', reg_file, re.IGNORECASE) if founded_path: self.programs_path[id_] = founded_path.groups()[-1].strip(' %1').strip('\\\\') def _get_progid_for_each_wineprefix(self): for desktop in self.desktop_files: exec_ = self.desktop_files[desktop]['Exec'].split(' ') prog_id_ind = exec_.index('/ProgIDOpen') for i in exec_: matched = re.match('WINEPREFIX=\\"(.+)\\"', i) if matched: self.wine_prefixes.setdefault(matched.group(1), set()).add(exec_[prog_id_ind + 1]) self.desktop_files[desktop]['Exec'] = { 'Command': ' '.join(exec_[:prog_id_ind]) + ' {path} ' + exec_[-1], 'ProgIDOpen': exec_[prog_id_ind + 1] } def _get_desktop_file_for_each_program(self): for desktop in self.desktop_files: self.programs.setdefault(self.desktop_files[desktop]['Name'], []).append(desktop) def _parse_desktop_file(self): import glob mime_type_list = glob.glob('wine-extension-*.desktop') for mime in mime_type_list: with open(mime) as mime_file: self.desktop_files[mime] = dict() for line in mime_file: striped_line = line.strip() if striped_line and striped_line[0] != '[' and striped_line[-1] != ']': key, value = striped_line[:striped_line.find('=')], striped_line[striped_line.find('=') + 1:] self.desktop_files[mime][key] = value # this no need already but I leave this here as example @staticmethod def getpass(prompt="Password: "): import termios import sys fd = sys.stdin.fileno() old = termios.tcgetattr(fd) new = termios.tcgetattr(fd) new[3] = new[3] & ~termios.ECHO # lflags try: termios.tcsetattr(fd, termios.TCSADRAIN, new) passwd = input(prompt) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old) print() return passwd installer_file_name = 'installer.sh' if os.access(installer_file_name, os.F_OK): office_installer = MSOfficeInstaller(installer_file_name) # TODO run installer form python file # office_installer.set_exec_permission() # office_installer.run_installer() office_installer.fix_application_list() else: print('No installer file.\nPlease check installer.sh file.')
{ "content_hash": "70e934c60ab64a44624a2b80f070e04d", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 122, "avg_line_length": 41.67391304347826, "alnum_prop": 0.5513823682837767, "repo_name": "sweetcolor/ms_office_installer_for_ubuntu", "id": "16631803e6778eb15a406b2f68772e108d9bec06", "size": "5751", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ms_office_installer.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "5751" }, { "name": "Shell", "bytes": "2987" } ], "symlink_target": "" }
import os import dataset import metrics from losses import load_myloss from keras.optimizers import Adam ''' contains utilities related to model training process''' def train_model(model, model_name, num_epochs, class_weights=None): training_gen = dataset.training(os.path.join('.', 'baseline')) optimizer = Adam(lr=0.001) model.compile( loss=load_myloss(class_weights), optimizer=optimizer, metrics=[metrics.fmeasure, metrics.precision, metrics.recall]) # TODO add precision and recall, also extra: accuracy? # # Train the model # model.fit_generator( training_gen, epochs=num_epochs, # run this many epochs steps_per_epoch=20, # run this many mini batches per epoch ) model_path = "saved_models/" + model_name + ".h5" model.save(model_path) return model_path
{ "content_hash": "2cb2d9beb70edbfc29de7232484af8b1", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 89, "avg_line_length": 24.52777777777778, "alnum_prop": 0.6579841449603624, "repo_name": "williamdjones/cv_assignment_5", "id": "2eaa7bdd7cd9d89bdead5ef4942e024859a58c2d", "size": "883", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "train.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "18789" }, { "name": "TeX", "bytes": "14204" } ], "symlink_target": "" }
import unittest import IECore import Gaffer import GafferTest class NodeTest( GafferTest.TestCase ) : def testParenting( self ) : c = Gaffer.GraphComponent() n = Gaffer.Node() self.assertEqual( n.acceptsParent( c ), False ) self.assertRaises( RuntimeError, c.addChild, n ) n2 = Gaffer.Node() self.assertEqual( n.acceptsParent( n2 ), True ) n2.addChild( n ) p = Gaffer.Plug() self.assert_( n.acceptsChild( p ) ) self.assert_( not n.acceptsParent( p ) ) n.addChild( p ) self.assert_( p.parent().isSame( n ) ) def testNaming( self ) : n = Gaffer.Node() self.assertEqual( n.getName(), "Node" ) def testScriptNode( self ) : n = Gaffer.Node() n2 = Gaffer.Node() self.assertEqual( n.scriptNode(), None ) self.assertEqual( n2.scriptNode(), None ) sn = Gaffer.ScriptNode() sn.addChild( n ) n.addChild( n2 ) self.assertTrue( sn.scriptNode().isSame( sn ) ) self.assertTrue( n.scriptNode().isSame( sn ) ) self.assertTrue( n2.scriptNode().isSame( sn ) ) def testExtendedConstructor( self ) : n = Gaffer.Node() self.assertEqual( n.getName(), "Node" ) n = Gaffer.Node( "a" ) self.assertEqual( n.getName(), "a" ) self.assertRaises( Exception, Gaffer.Node, "too", "many" ) def testDynamicPlugSerialisationOrder( self ) : n = Gaffer.Node() n["p1"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) n["p2"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) n["p3"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) self.assertEqual( n.children()[0].getName(), "user" ) self.assertEqual( n.children()[1].getName(), "p1" ) self.assertEqual( n.children()[2].getName(), "p2" ) self.assertEqual( n.children()[3].getName(), "p3" ) s = Gaffer.ScriptNode() s["n"] = n ss = s.serialise() s = Gaffer.ScriptNode() s.execute( ss ) self.assertEqual( s["n"].children()[0].getName(), "user" ) self.assertEqual( s["n"].children()[1].getName(), "p1" ) self.assertEqual( s["n"].children()[2].getName(), "p2" ) self.assertEqual( s["n"].children()[3].getName(), "p3" ) def testSerialiseDynamicStringPlugs( self ) : n = Gaffer.Node() n["p1"] = Gaffer.StringPlug( defaultValue = "default", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) n["p1"].setValue( "value" ) self.assertEqual( n["p1"].getValue(), "value" ) s = Gaffer.ScriptNode() s["n"] = n ss = s.serialise() s = Gaffer.ScriptNode() s.execute( ss ) self.assertEqual( s["n"]["p1"].defaultValue(), "default" ) self.assertEqual( s["n"]["p1"].getValue(), "value" ) def testSerialiseDynamicBoolPlugs( self ) : n = Gaffer.Node() n["p1"] = Gaffer.BoolPlug( defaultValue = True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) n["p1"].setValue( False ) s = Gaffer.ScriptNode() s["n"] = n ss = s.serialise() s = Gaffer.ScriptNode() s.execute( ss ) self.assertEqual( s["n"]["p1"].defaultValue(), True ) self.assertEqual( s["n"]["p1"].getValue(), False ) def testUnparentingRemovesConnections( self ) : s = Gaffer.ScriptNode() n1 = GafferTest.AddNode( "n1" ) n2 = GafferTest.AddNode( "n2" ) s.addChild( n1 ) s.addChild( n2 ) n2["op1"].setInput( n1["sum"] ) self.failUnless( n2["op1"].getInput().isSame( n1["sum"] ) ) del s["n2"] self.assertEqual( n2["op1"].getInput(), None ) s.addChild( n2 ) n2["op1"].setInput( n1["sum"] ) self.failUnless( n2["op1"].getInput().isSame( n1["sum"] ) ) del s["n1"] self.assertEqual( n2["op1"].getInput(), None ) def testUnparentingRemovesUserConnections( self ) : s = Gaffer.ScriptNode() n1 = GafferTest.AddNode() n2 = GafferTest.AddNode() s["n1"] = n1 s["n2"] = n2 s["n1"]["user"]["i1"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["n1"]["user"]["i2"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["n1"]["user"]["v"] = Gaffer.V2iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["n1"]["user"]["i1"].setInput( n2["sum"] ) s["n2"]["op1"].setInput( s["n1"]["user"]["i2"] ) s["n1"]["user"]["v"][0].setInput( s["n2"]["sum"] ) s["n2"]["op2"].setInput( s["n1"]["user"]["v"][1] ) del s["n1"] self.assertTrue( n1.parent() is None ) self.assertTrue( n1["user"]["i1"].getInput() is None ) self.assertTrue( n2["op1"].getInput() is None ) self.assertTrue( n1["user"]["v"][0].getInput() is None ) self.assertTrue( n2["op2"].getInput() is None ) def testOverrideAcceptsInput( self ) : class AcceptsInputTestNode( Gaffer.Node ) : def __init__( self, name = "AcceptsInputTestNode" ) : Gaffer.Node.__init__( self, name ) self.addChild( Gaffer.IntPlug( "in" ) ) self.addChild( Gaffer.IntPlug( "out", Gaffer.Plug.Direction.Out ) ) def acceptsInput( self, plug, inputPlug ) : if plug.isSame( self["in"] ) : return isinstance( inputPlug.source().node(), AcceptsInputTestNode ) return True n1 = AcceptsInputTestNode() n2 = AcceptsInputTestNode() n3 = GafferTest.AddNode() self.assertEqual( n1["in"].acceptsInput( n2["out"] ), True ) self.assertEqual( n1["in"].acceptsInput( n3["sum"] ), False ) n1["in"].setInput( n2["out"] ) self.assertRaises( RuntimeError, n1["in"].setInput, n3["sum"] ) # check that we can't use a pass-through connection as # a loophole. n1["in"].setInput( None ) # this particular connection makes no sense but breaks # no rules - we're just using it to test the loophole. n2["out"].setInput( n3["sum"] ) self.assertEqual( n1["in"].acceptsInput( n2["out"] ), False ) self.assertRaises( RuntimeError, n1["in"].setInput, n2["out"] ) def testPlugFlagsChangedSignal( self ) : n = Gaffer.Node() n["p"] = Gaffer.Plug() cs = GafferTest.CapturingSlot( n.plugFlagsChangedSignal() ) self.assertEqual( len( cs ), 0 ) n["p"].setFlags( Gaffer.Plug.Flags.Dynamic, True ) self.assertEqual( len( cs ), 1 ) self.failUnless( cs[0][0].isSame( n["p"] ) ) # second time should have no effect because they're the same n["p"].setFlags( Gaffer.Plug.Flags.Dynamic, True ) self.assertEqual( len( cs ), 1 ) n["p"].setFlags( Gaffer.Plug.Flags.Dynamic, False ) self.assertEqual( len( cs ), 2 ) self.failUnless( cs[1][0].isSame( n["p"] ) ) def testUserPlugs( self ) : s = Gaffer.ScriptNode() s["n"] = Gaffer.Node() s["n"]["user"]["test"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["n"]["user"]["test"].setValue( 10 ) s2 = Gaffer.ScriptNode() s2.execute( s.serialise() ) self.assertEqual( s2["n"]["user"]["test"].getValue(), 10 ) def testNodesConstructWithDefaultValues( self ) : self.assertNodesConstructWithDefaultValues( Gaffer ) self.assertNodesConstructWithDefaultValues( GafferTest ) def testUserPlugDoesntTrackChildConnections( self ) : s = Gaffer.ScriptNode() s["n1"] = Gaffer.Node() s["n1"]["user"]["p"] = Gaffer.IntPlug() s["n2"] = Gaffer.Node() s["n2"]["user"]["p"] = Gaffer.IntPlug() s["n2"]["user"]["p"].setInput( s["n1"]["user"]["p"] ) self.assertTrue( s["n2"]["user"]["p"].getInput().isSame( s["n1"]["user"]["p"] ) ) self.assertTrue( s["n2"]["user"].getInput() is None ) s["n1"]["user"]["p2"] = Gaffer.IntPlug() self.assertEqual( len( s["n2"]["user"] ), 1 ) def testInternalConnectionsSurviveUnparenting( self ) : class InternalConnectionsNode( Gaffer.Node ) : def __init__( self, name = "InternalConnectionsNode" ) : Gaffer.Node.__init__( self, name ) self["in1"] = Gaffer.IntPlug() self["in2"] = Gaffer.IntPlug() self["__in"] = Gaffer.IntPlug() self["out1"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out ) self["out2"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out ) self["__out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out ) self["out1"].setInput( self["in1"] ) self["__add"] = GafferTest.AddNode() self["__add"]["op1"].setInput( self["in2"] ) self["__add"]["op2"].setInput( self["__out"] ) self["__in"].setInput( self["__add"]["sum"] ) self["out2"].setInput( self["__add"]["sum"] ) IECore.registerRunTimeTyped( InternalConnectionsNode ) s = Gaffer.ScriptNode() n = InternalConnectionsNode() s["n"] = n def assertConnections() : self.assertTrue( n["out1"].getInput().isSame( n["in1"] ) ) self.assertTrue( n["__add"]["op1"].getInput().isSame( n["in2"] ) ) self.assertTrue( n["__add"]["op2"].getInput().isSame( n["__out"] ) ) self.assertTrue( n["out2"].getInput().isSame( n["__add"]["sum"] ) ) self.assertTrue( n["__in"].getInput().isSame( n["__add"]["sum"] ) ) assertConnections() s.removeChild( n ) assertConnections() s.addChild( n ) assertConnections() def testRanges( self ) : n = Gaffer.Node() n["c1"] = Gaffer.Node() n["c2"] = GafferTest.AddNode() n["c2"]["gc1"] = Gaffer.Node() n["c3"] = Gaffer.Node() n["c3"]["gc2"] = GafferTest.AddNode() n["c3"]["gc3"] = GafferTest.AddNode() self.assertEqual( list( Gaffer.Node.Range( n ) ), [ n["c1"], n["c2"], n["c3"] ], ) self.assertEqual( list( Gaffer.Node.RecursiveRange( n ) ), [ n["c1"], n["c2"], n["c2"]["gc1"], n["c3"], n["c3"]["gc2"], n["c3"]["gc3"] ], ) self.assertEqual( list( GafferTest.AddNode.Range( n ) ), [ n["c2"] ], ) self.assertEqual( list( GafferTest.AddNode.RecursiveRange( n ) ), [ n["c2"], n["c3"]["gc2"], n["c3"]["gc3"] ], ) def testRangesForPythonTypes( self ) : n = Gaffer.Node() n["a"] = GafferTest.AddNode() n["b"] = Gaffer.Node() n["c"] = GafferTest.AddNode() n["d"] = Gaffer.Node() n["d"]["e"] = GafferTest.AddNode() self.assertEqual( list( Gaffer.Node.Range( n ) ), [ n["a"], n["b"], n["c"], n["d"] ], ) self.assertEqual( list( GafferTest.AddNode.Range( n ) ), [ n["a"], n["c"] ], ) self.assertEqual( list( Gaffer.Node.RecursiveRange( n ) ), [ n["a"], n["b"], n["c"], n["d"], n["d"]["e"] ], ) self.assertEqual( list( GafferTest.AddNode.RecursiveRange( n ) ), [ n["a"], n["c"], n["d"]["e"] ], ) if __name__ == "__main__" : unittest.main()
{ "content_hash": "24cc2d80a411e3f29f67372c7ca7ce91", "timestamp": "", "source": "github", "line_count": 375, "max_line_length": 120, "avg_line_length": 27.218666666666667, "alnum_prop": 0.6168315861663565, "repo_name": "appleseedhq/gaffer", "id": "480cb554efcea1939d6ff1c69447bc12431a9436", "size": "12077", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/GafferTest/NodeTest.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "39910" }, { "name": "C++", "bytes": "7337901" }, { "name": "CMake", "bytes": "85201" }, { "name": "GLSL", "bytes": "6236" }, { "name": "Python", "bytes": "7531988" }, { "name": "Shell", "bytes": "15031" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function, unicode_literals import copy import threading import warnings from django.core.exceptions import ImproperlyConfigured from django.utils.encoding import force_text from django.utils.six import with_metaclass from haystack import connection_router, connections from haystack.constants import DEFAULT_ALIAS, DJANGO_CT, DJANGO_ID, ID, Indexable from haystack.fields import * from haystack.manager import SearchIndexManager from haystack.utils import get_facet_field_name, get_identifier, get_model_ct class DeclarativeMetaclass(type): def __new__(cls, name, bases, attrs): attrs['fields'] = {} # Inherit any fields from parent(s). try: parents = [b for b in bases if issubclass(b, SearchIndex)] # Simulate the MRO. parents.reverse() for p in parents: fields = getattr(p, 'fields', None) if fields: attrs['fields'].update(fields) except NameError: pass # Build a dictionary of faceted fields for cross-referencing. facet_fields = {} for field_name, obj in attrs.items(): # Only need to check the FacetFields. if hasattr(obj, 'facet_for'): if not obj.facet_for in facet_fields: facet_fields[obj.facet_for] = [] facet_fields[obj.facet_for].append(field_name) built_fields = {} for field_name, obj in attrs.items(): if isinstance(obj, SearchField): field = attrs[field_name] field.set_instance_name(field_name) built_fields[field_name] = field # Only check non-faceted fields for the following info. if not hasattr(field, 'facet_for'): if field.faceted == True: # If no other field is claiming this field as # ``facet_for``, create a shadow ``FacetField``. if not field_name in facet_fields: shadow_facet_name = get_facet_field_name(field_name) shadow_facet_field = field.facet_class(facet_for=field_name) shadow_facet_field.set_instance_name(shadow_facet_name) built_fields[shadow_facet_name] = shadow_facet_field attrs['fields'].update(built_fields) # Assigning default 'objects' query manager if it does not already exist if not 'objects' in attrs: try: attrs['objects'] = SearchIndexManager(attrs['Meta'].index_label) except (KeyError, AttributeError): attrs['objects'] = SearchIndexManager(DEFAULT_ALIAS) return super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs) class SearchIndex(with_metaclass(DeclarativeMetaclass, threading.local)): """ Base class for building indexes. An example might look like this:: import datetime from haystack import indexes from myapp.models import Note class NoteIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) author = indexes.CharField(model_attr='user') pub_date = indexes.DateTimeField(model_attr='pub_date') def get_model(self): return Note def index_queryset(self, using=None): return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now()) """ def __init__(self): self.prepared_data = None content_fields = [] self.field_map = dict() for field_name, field in self.fields.items(): #form field map self.field_map[field.index_fieldname] = field_name if field.document is True: content_fields.append(field_name) if not len(content_fields) == 1: raise SearchFieldError("The index '%s' must have one (and only one) SearchField with document=True." % self.__class__.__name__) def get_model(self): """ Should return the ``Model`` class (not an instance) that the rest of the ``SearchIndex`` should use. This method is required & you must override it to return the correct class. """ raise NotImplementedError("You must provide a 'get_model' method for the '%r' index." % self) def index_queryset(self, using=None): """ Get the default QuerySet to index when doing a full update. Subclasses can override this method to avoid indexing certain objects. """ return self.get_model()._default_manager.all() def read_queryset(self, using=None): """ Get the default QuerySet for read actions. Subclasses can override this method to work with other managers. Useful when working with default managers that filter some objects. """ return self.index_queryset(using=using) def build_queryset(self, using=None, start_date=None, end_date=None): """ Get the default QuerySet to index when doing an index update. Subclasses can override this method to take into account related model modification times. The default is to use ``SearchIndex.index_queryset`` and filter based on ``SearchIndex.get_updated_field`` """ extra_lookup_kwargs = {} model = self.get_model() updated_field = self.get_updated_field() update_field_msg = ("No updated date field found for '%s' " "- not restricting by age.") % model.__name__ if start_date: if updated_field: extra_lookup_kwargs['%s__gte' % updated_field] = start_date else: warnings.warn(update_field_msg) if end_date: if updated_field: extra_lookup_kwargs['%s__lte' % updated_field] = end_date else: warnings.warn(update_field_msg) index_qs = None if hasattr(self, 'get_queryset'): warnings.warn("'SearchIndex.get_queryset' was deprecated in Haystack v2. Please rename the method 'index_queryset'.") index_qs = self.get_queryset() else: index_qs = self.index_queryset(using=using) if not hasattr(index_qs, 'filter'): raise ImproperlyConfigured("The '%r' class must return a 'QuerySet' in the 'index_queryset' method." % self) # `.select_related()` seems like a good idea here but can fail on # nullable `ForeignKey` as well as what seems like other cases. return index_qs.filter(**extra_lookup_kwargs).order_by(model._meta.pk.name) def prepare(self, obj): """ Fetches and adds/alters data before indexing. """ self.prepared_data = { ID: get_identifier(obj), DJANGO_CT: get_model_ct(obj), DJANGO_ID: force_text(obj.pk), } for field_name, field in self.fields.items(): # Use the possibly overridden name, which will default to the # variable name of the field. self.prepared_data[field.index_fieldname] = field.prepare(obj) if hasattr(self, "prepare_%s" % field_name): value = getattr(self, "prepare_%s" % field_name)(obj) self.prepared_data[field.index_fieldname] = value return self.prepared_data def full_prepare(self, obj): self.prepared_data = self.prepare(obj) for field_name, field in self.fields.items(): # Duplicate data for faceted fields. if getattr(field, 'facet_for', None): source_field_name = self.fields[field.facet_for].index_fieldname # If there's data there, leave it alone. Otherwise, populate it # with whatever the related field has. if self.prepared_data[field_name] is None and source_field_name in self.prepared_data: self.prepared_data[field.index_fieldname] = self.prepared_data[source_field_name] # Remove any fields that lack a value and are ``null=True``. if field.null is True: if self.prepared_data[field.index_fieldname] is None: del(self.prepared_data[field.index_fieldname]) return self.prepared_data def get_content_field(self): """Returns the field that supplies the primary document to be indexed.""" for field_name, field in self.fields.items(): if field.document is True: return field.index_fieldname def get_field_weights(self): """Returns a dict of fields with weight values""" weights = {} for field_name, field in self.fields.items(): if field.boost: weights[field_name] = field.boost return weights def _get_backend(self, using): warnings.warn('SearchIndex._get_backend is deprecated; use SearchIndex.get_backend instead', DeprecationWarning) return self.get_backend(using) def get_backend(self, using=None): if using is None: try: using = connection_router.for_write(index=self)[0] except IndexError: # There's no backend to handle it. Bomb out. return None return connections[using].get_backend() def update(self, using=None): """ Updates the entire index. If ``using`` is provided, it specifies which connection should be used. Default relies on the routers to decide which backend should be used. """ backend = self.get_backend(using) if backend is not None: backend.update(self, self.index_queryset(using=using)) def update_object(self, instance, using=None, **kwargs): """ Update the index for a single object. Attached to the class's post-save hook. If ``using`` is provided, it specifies which connection should be used. Default relies on the routers to decide which backend should be used. """ # Check to make sure we want to index this first. if self.should_update(instance, **kwargs): backend = self.get_backend(using) if backend is not None: backend.update(self, [instance]) def remove_object(self, instance, using=None, **kwargs): """ Remove an object from the index. Attached to the class's post-delete hook. If ``using`` is provided, it specifies which connection should be used. Default relies on the routers to decide which backend should be used. """ backend = self.get_backend(using) if backend is not None: backend.remove(instance, **kwargs) def clear(self, using=None): """ Clears the entire index. If ``using`` is provided, it specifies which connection should be used. Default relies on the routers to decide which backend should be used. """ backend = self.get_backend(using) if backend is not None: backend.clear(models=[self.get_model()]) def reindex(self, using=None): """ Completely clear the index for this model and rebuild it. If ``using`` is provided, it specifies which connection should be used. Default relies on the routers to decide which backend should be used. """ self.clear(using=using) self.update(using=using) def get_updated_field(self): """ Get the field name that represents the updated date for the model. If specified, this is used by the reindex command to filter out results from the QuerySet, enabling you to reindex only recent records. This method should either return None (reindex everything always) or a string of the Model's DateField/DateTimeField name. """ return None def should_update(self, instance, **kwargs): """ Determine if an object should be updated in the index. It's useful to override this when an object may save frequently and cause excessive reindexing. You should check conditions on the instance and return False if it is not to be indexed. By default, returns True (always reindex). """ return True def load_all_queryset(self): """ Provides the ability to override how objects get loaded in conjunction with ``SearchQuerySet.load_all``. This is useful for post-processing the results from the query, enabling things like adding ``select_related`` or filtering certain data. By default, returns ``all()`` on the model's default manager. """ return self.get_model()._default_manager.all() class BasicSearchIndex(SearchIndex): text = CharField(document=True, use_template=True) # End SearchIndexes # Begin ModelSearchIndexes def index_field_from_django_field(f, default=CharField): """ Returns the Haystack field type that would likely be associated with each Django type. """ result = default if f.get_internal_type() in ('DateField', 'DateTimeField'): result = DateTimeField elif f.get_internal_type() in ('BooleanField', 'NullBooleanField'): result = BooleanField elif f.get_internal_type() in ('CommaSeparatedIntegerField',): result = MultiValueField elif f.get_internal_type() in ('DecimalField', 'FloatField'): result = FloatField elif f.get_internal_type() in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField'): result = IntegerField return result class ModelSearchIndex(SearchIndex): """ Introspects the model assigned to it and generates a `SearchIndex` based on the fields of that model. In addition, it adds a `text` field that is the `document=True` field and has `use_template=True` option set, just like the `BasicSearchIndex`. Usage of this class might result in inferior `SearchIndex` objects, which can directly affect your search results. Use this to establish basic functionality and move to custom `SearchIndex` objects for better control. At this time, it does not handle related fields. """ text = CharField(document=True, use_template=True) # list of reserved field names fields_to_skip = (ID, DJANGO_CT, DJANGO_ID, 'content', 'text') def __init__(self, extra_field_kwargs=None): super(ModelSearchIndex, self).__init__() self.model = None self.prepared_data = None content_fields = [] self.extra_field_kwargs = extra_field_kwargs or {} # Introspect the model, adding/removing fields as needed. # Adds/Excludes should happen only if the fields are not already # defined in `self.fields`. self._meta = getattr(self, 'Meta', None) if self._meta: self.model = getattr(self._meta, 'model', None) fields = getattr(self._meta, 'fields', []) excludes = getattr(self._meta, 'excludes', []) # Add in the new fields. self.fields.update(self.get_fields(fields, excludes)) for field_name, field in self.fields.items(): if field.document is True: content_fields.append(field_name) if not len(content_fields) == 1: raise SearchFieldError("The index '%s' must have one (and only one) SearchField with document=True." % self.__class__.__name__) def should_skip_field(self, field): """ Given a Django model field, return if it should be included in the contributed SearchFields. """ # Skip fields in skip list if field.name in self.fields_to_skip: return True # Ignore certain fields (AutoField, related fields). if field.primary_key or field.is_relation: return True return False def get_model(self): return self.model def get_index_fieldname(self, f): """ Given a Django field, return the appropriate index fieldname. """ return f.name def get_fields(self, fields=None, excludes=None): """ Given any explicit fields to include and fields to exclude, add additional fields based on the associated model. """ final_fields = {} fields = fields or [] excludes = excludes or [] for f in self.model._meta.fields: # If the field name is already present, skip if f.name in self.fields: continue # If field is not present in explicit field listing, skip if fields and f.name not in fields: continue # If field is in exclude list, skip if excludes and f.name in excludes: continue if self.should_skip_field(f): continue index_field_class = index_field_from_django_field(f) kwargs = copy.copy(self.extra_field_kwargs) kwargs.update({ 'model_attr': f.name, }) if f.null is True: kwargs['null'] = True if f.has_default(): kwargs['default'] = f.default final_fields[f.name] = index_field_class(**kwargs) final_fields[f.name].set_instance_name(self.get_index_fieldname(f)) return final_fields
{ "content_hash": "577bf35d54efe09d9645928317b3a6f0", "timestamp": "", "source": "github", "line_count": 499, "max_line_length": 139, "avg_line_length": 35.807615230460925, "alnum_prop": 0.605216028654578, "repo_name": "celerityweb/django-haystack", "id": "3d233619656106bca9f56681b386445401fdd453", "size": "17887", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "haystack/indexes.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "1431" }, { "name": "Python", "bytes": "878213" }, { "name": "Shell", "bytes": "1961" } ], "symlink_target": "" }
import json from urllib import parse import h_pyramid_sentry from elasticsearch import exceptions from pyramid import httpexceptions, i18n, view from pyramid.httpexceptions import HTTPNoContent from bouncer import util from bouncer.embed_detector import url_embeds_client _ = i18n.TranslationStringFactory(__package__) class FailedHealthcheck(Exception): """An exception raised when the healthcheck fails.""" @view.view_defaults(renderer="bouncer:templates/annotation.html.jinja2") class AnnotationController(object): def __init__(self, request): self.request = request @view.view_config(route_name="annotation_with_url") @view.view_config(route_name="annotation_without_url") def annotation(self): settings = self.request.registry.settings try: document = self.request.es.get( index=settings["elasticsearch_index"], doc_type="annotation", id=self.request.matchdict["id"], ) except exceptions.NotFoundError: raise httpexceptions.HTTPNotFound(_("Annotation not found")) try: parsed_document = util.parse_document(document) authority = parsed_document["authority"] annotation_id = parsed_document["annotation_id"] document_uri = parsed_document["document_uri"] show_metadata = parsed_document["show_metadata"] quote = parsed_document["quote"] text = parsed_document["text"] except util.DeletedAnnotationError: raise httpexceptions.HTTPNotFound(_("Annotation not found")) except util.InvalidAnnotationError as exc: raise httpexceptions.HTTPUnprocessableEntity(str(exc)) # Remove any existing #fragment identifier from the URI before we # append our own. document_uri = parse.urldefrag(document_uri)[0] if not _is_valid_http_url(document_uri): raise httpexceptions.HTTPUnprocessableEntity( _( "Sorry, but it looks like this annotation was made on a " "document that is not publicly available." ) ) via_url = None if _can_use_proxy(settings, authority=authority) and not url_embeds_client( document_uri ): via_url = "{via_base_url}/{uri}#annotations:{id}".format( via_base_url=settings["via_base_url"], uri=document_uri, id=annotation_id, ) extension_url = "{uri}#annotations:{id}".format( uri=document_uri, id=annotation_id ) pretty_url = util.get_pretty_url(document_uri) title = util.get_boilerplate_quote(document_uri) return { "data": json.dumps( { # Warning: variable names change from python_style to # javaScriptStyle here! "chromeExtensionId": settings["chrome_extension_id"], "viaUrl": via_url, "extensionUrl": extension_url, } ), "show_metadata": show_metadata, "pretty_url": pretty_url, "quote": quote, "text": text, "title": title, } @view.view_config(renderer="bouncer:templates/index.html.jinja2", route_name="index") def index(request): raise httpexceptions.HTTPFound(location=request.registry.settings["hypothesis_url"]) @view.view_config( renderer="bouncer:templates/annotation.html.jinja2", route_name="goto_url" ) def goto_url(request): """ Redirect the user to a specified URL with the annotation client layer activated. This provides a URL-sharing mechanism. Optional querystring parameters can refine the behavior of the annotation client at the target url by identifying: * "group" - a group to focus; OR * "q" a query to populate the client search with """ settings = request.registry.settings url = request.params.get("url") if url is None: raise httpexceptions.HTTPBadRequest('"url" parameter is missing') if not _is_valid_http_url(url): raise httpexceptions.HTTPBadRequest( _( "Sorry, but this service can only show annotations on " "valid HTTP or HTTPs URLs." ) ) # Remove any existing #fragment identifier from the URI before we # append our own. url = parse.urldefrag(url)[0] group = request.params.get("group", "") query = parse.quote(request.params.get("q", "")) # Translate any refining querystring parameters into a URL fragment # syntax understood by the client fragment = "annotations:" # group will supersede query (q) if both are present if group: # focus a specific group in the client fragment = fragment + "group:{group}".format(group=group) else: # populate the client search with a query fragment = fragment + "query:{query}".format(query=query) if not url_embeds_client(url): via_url = "{via_base_url}/{url}#{fragment}".format( via_base_url=settings["via_base_url"], url=url, fragment=fragment ) else: via_url = None extension_url = "{url}#{fragment}".format(url=url, fragment=fragment) pretty_url = util.get_pretty_url(url) return { "data": json.dumps( { "chromeExtensionId": settings["chrome_extension_id"], "viaUrl": via_url, "extensionUrl": extension_url, } ), "pretty_url": pretty_url, } @view.view_config(route_name="crash") def crash(request): # pragma: nocover """Crash if requested to for testing purposes.""" # Ensure that no conceivable accident could cause this to be triggered if request.params.get("cid", "") == "a751bb01": raise ValueError("Something has gone wrong") return HTTPNoContent() @view.view_defaults(renderer="bouncer:templates/error.html.jinja2") class ErrorController(object): def __init__(self, exc, request): self.exc = exc self.request = request @view.view_config(context=httpexceptions.HTTPError) @view.view_config(context=httpexceptions.HTTPServerError) def httperror(self): self.request.response.status_int = self.exc.status_int # If code raises an HTTPError or HTTPServerError we assume this was # deliberately raised and: # 1. Show the user an error page including specific error message # 2. _Do not_ report the error to Sentry. return {"message": str(self.exc)} @view.view_config(context=Exception) def error(self): # If code raises a non-HTTPException exception we assume it was a bug # and: # 1. Show the user a generic error page # 2. Report the details of the error to Sentry. self.request.response.status_int = 500 h_pyramid_sentry.report_exception() # In debug mode re-raise exceptions so that they get printed in the # terminal. if self.request.registry.settings["debug"]: raise return { "message": _( "Sorry, but something went wrong with the link. " "The issue has been reported and we'll try to " "fix it." ) } @view.view_config(route_name="healthcheck", renderer="json", http_cache=0) def healthcheck(request): index = request.registry.settings["elasticsearch_index"] try: status = request.es.cluster.health(index=index)["status"] except exceptions.ElasticsearchException as exc: raise FailedHealthcheck("elasticsearch exception") from exc if status not in ("yellow", "green"): raise FailedHealthcheck("cluster status was {!r}".format(status)) return {"status": "okay"} def _is_valid_http_url(url): """ Return `True` if `url` is a valid HTTP or HTTPS URL. Parsing is currently very lenient as the URL only has to be accepted by `urlparse()`. """ try: parsed_url = parse.urlparse(url) return parsed_url.scheme == "http" or parsed_url.scheme == "https" except Exception: return False def _can_use_proxy(settings, authority): """ Return `True` if an annotation can be shown via the proxy service. This currently only considers the authority but in future it could also incorporate checks for whether the target page embeds Hypothesis. :param settings: App settings dict :param authority: Authority of annotation's user """ # The proxy service can only be used with pages that use first party # accounts, because third-party accounts require the host page to supply # login information to the client, which in turn relies on the user's cookie # session and therefore does not work properly through the proxy. return settings["hypothesis_authority"] == authority def includeme(config): # pragma: nocover config.add_route("index", "/") config.add_route("healthcheck", "/_status") config.add_route("crash", "/_crash") config.add_route("goto_url", "/go") config.add_route("annotation_with_url", "/{id}/*url") config.add_route("annotation_without_url", "/{id}") config.scan(__name__)
{ "content_hash": "c014d2b1040b1d989ccfb6e094a97f58", "timestamp": "", "source": "github", "line_count": 280, "max_line_length": 88, "avg_line_length": 33.7, "alnum_prop": 0.6251589656634167, "repo_name": "hypothesis/bouncer", "id": "ca51fe563916b9c665c305f9aa0cdbf8fecd10e2", "size": "9436", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "bouncer/views.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "1115" }, { "name": "Dockerfile", "bytes": "722" }, { "name": "JavaScript", "bytes": "8639" }, { "name": "Jinja", "bytes": "2084" }, { "name": "Makefile", "bytes": "4320" }, { "name": "Python", "bytes": "46360" }, { "name": "Shell", "bytes": "1466" } ], "symlink_target": "" }
import cStringIO import csv import datetime import functools import hashlib import itertools import json import logging import time from funcy import project import xlsxwriter from flask_login import AnonymousUserMixin, UserMixin from flask_sqlalchemy import SQLAlchemy from passlib.apps import custom_app_context as pwd_context from redash import settings, redis_connection, utils from redash.destinations import (get_configuration_schema_for_destination_type, get_destination) from redash.metrics import database # noqa: F401 from redash.permissions import has_access, view_only from redash.query_runner import (get_configuration_schema_for_query_runner_type, get_query_runner) from redash.utils import generate_token, json_dumps from redash.utils.comparators import CaseInsensitiveComparator from redash.utils.configuration import ConfigurationContainer from sqlalchemy import distinct, or_ from sqlalchemy.dialects import postgresql from sqlalchemy.event import listens_for from sqlalchemy.ext.mutable import Mutable from sqlalchemy.inspection import inspect from sqlalchemy.orm import backref, joinedload, object_session, subqueryload from sqlalchemy.orm.exc import NoResultFound # noqa: F401 from sqlalchemy.types import TypeDecorator from functools import reduce class SQLAlchemyExt(SQLAlchemy): def apply_pool_defaults(self, app, options): if settings.SQLALCHEMY_DISABLE_POOL: from sqlalchemy.pool import NullPool options['poolclass'] = NullPool else: return super(SQLAlchemyExt, self).apply_pool_defaults(app, options) db = SQLAlchemyExt(session_options={ 'expire_on_commit': False }) Column = functools.partial(db.Column, nullable=False) class ScheduledQueriesExecutions(object): KEY_NAME = 'sq:executed_at' def __init__(self): self.executions = {} def refresh(self): self.executions = redis_connection.hgetall(self.KEY_NAME) def update(self, query_id): redis_connection.hmset(self.KEY_NAME, { query_id: time.time() }) def get(self, query_id): timestamp = self.executions.get(str(query_id)) if timestamp: timestamp = utils.dt_from_timestamp(timestamp) return timestamp scheduled_queries_executions = ScheduledQueriesExecutions() # AccessPermission and Change use a 'generic foreign key' approach to refer to # either queries or dashboards. # TODO replace this with association tables. _gfk_types = {} class GFKBase(object): """ Compatibility with 'generic foreign key' approach Peewee used. """ # XXX Replace this with table-per-association. object_type = Column(db.String(255)) object_id = Column(db.Integer) _object = None @property def object(self): session = object_session(self) if self._object or not session: return self._object else: object_class = _gfk_types[self.object_type] self._object = session.query(object_class).filter( object_class.id == self.object_id).first() return self._object @object.setter def object(self, value): self._object = value self.object_type = value.__class__.__tablename__ self.object_id = value.id # XXX replace PseudoJSON and MutableDict with real JSON field class PseudoJSON(TypeDecorator): impl = db.Text def process_bind_param(self, value, dialect): return json_dumps(value) def process_result_value(self, value, dialect): if not value: return value return json.loads(value) class MutableDict(Mutable, dict): @classmethod def coerce(cls, key, value): "Convert plain dictionaries to MutableDict." if not isinstance(value, MutableDict): if isinstance(value, dict): return MutableDict(value) # this call will raise ValueError return Mutable.coerce(key, value) else: return value def __setitem__(self, key, value): "Detect dictionary set events and emit change events." dict.__setitem__(self, key, value) self.changed() def __delitem__(self, key): "Detect dictionary del events and emit change events." dict.__delitem__(self, key) self.changed() class MutableList(Mutable, list): def append(self, value): list.append(self, value) self.changed() def remove(self, value): list.remove(self, value) self.changed() @classmethod def coerce(cls, key, value): if not isinstance(value, MutableList): if isinstance(value, list): return MutableList(value) return Mutable.coerce(key, value) else: return value class TimestampMixin(object): updated_at = Column(db.DateTime(True), default=db.func.now(), onupdate=db.func.now(), nullable=False) created_at = Column(db.DateTime(True), default=db.func.now(), nullable=False) class ChangeTrackingMixin(object): skipped_fields = ('id', 'created_at', 'updated_at', 'version') _clean_values = None def __init__(self, *a, **kw): super(ChangeTrackingMixin, self).__init__(*a, **kw) self.record_changes(self.user) def prep_cleanvalues(self): self.__dict__['_clean_values'] = {} for attr in inspect(self.__class__).column_attrs: col, = attr.columns # 'query' is col name but not attr name self._clean_values[col.name] = None def __setattr__(self, key, value): if self._clean_values is None: self.prep_cleanvalues() for attr in inspect(self.__class__).column_attrs: col, = attr.columns previous = getattr(self, attr.key, None) self._clean_values[col.name] = previous super(ChangeTrackingMixin, self).__setattr__(key, value) def record_changes(self, changed_by): db.session.add(self) db.session.flush() changes = {} for attr in inspect(self.__class__).column_attrs: col, = attr.columns if attr.key not in self.skipped_fields: changes[col.name] = {'previous': self._clean_values[col.name], 'current': getattr(self, attr.key)} db.session.add(Change(object=self, object_version=self.version, user=changed_by, change=changes)) class BelongsToOrgMixin(object): @classmethod def get_by_id_and_org(cls, object_id, org): return db.session.query(cls).filter(cls.id == object_id, cls.org == org).one() class PermissionsCheckMixin(object): def has_permission(self, permission): return self.has_permissions((permission,)) def has_permissions(self, permissions): has_permissions = reduce(lambda a, b: a and b, map(lambda permission: permission in self.permissions, permissions), True) return has_permissions class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin): @property def permissions(self): return [] def is_api_user(self): return False class ApiUser(UserMixin, PermissionsCheckMixin): def __init__(self, api_key, org, groups, name=None): self.object = None if isinstance(api_key, basestring): self.id = api_key self.name = name else: self.id = api_key.api_key self.name = "ApiKey: {}".format(api_key.id) self.object = api_key.object self.group_ids = groups self.org = org def __repr__(self): return u"<{}>".format(self.name) def is_api_user(self): return True @property def permissions(self): return ['view_query'] def has_access(self, obj, access_type): return False class Organization(TimestampMixin, db.Model): SETTING_GOOGLE_APPS_DOMAINS = 'google_apps_domains' SETTING_IS_PUBLIC = "is_public" id = Column(db.Integer, primary_key=True) name = Column(db.String(255)) slug = Column(db.String(255), unique=True) settings = Column(MutableDict.as_mutable(PseudoJSON)) groups = db.relationship("Group", lazy="dynamic") __tablename__ = 'organizations' def __repr__(self): return u"<Organization: {}, {}>".format(self.id, self.name) def __unicode__(self): return u'%s (%s)' % (self.name, self.id) @classmethod def get_by_slug(cls, slug): return cls.query.filter(cls.slug == slug).first() @property def default_group(self): return self.groups.filter(Group.name == 'default', Group.type == Group.BUILTIN_GROUP).first() @property def google_apps_domains(self): return self.settings.get(self.SETTING_GOOGLE_APPS_DOMAINS, []) @property def is_public(self): return self.settings.get(self.SETTING_IS_PUBLIC, False) @property def is_disabled(self): return self.settings.get('is_disabled', False) def disable(self): self.settings['is_disabled'] = True def enable(self): self.settings['is_disabled'] = False @property def admin_group(self): return self.groups.filter(Group.name == 'admin', Group.type == Group.BUILTIN_GROUP).first() def has_user(self, email): return self.users.filter(User.email == email).count() == 1 class Group(db.Model, BelongsToOrgMixin): DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query', 'view_query', 'view_source', 'execute_query', 'list_users', 'schedule_query', 'list_dashboards', 'list_alerts', 'list_data_sources'] BUILTIN_GROUP = 'builtin' REGULAR_GROUP = 'regular' id = Column(db.Integer, primary_key=True) data_sources = db.relationship("DataSourceGroup", back_populates="group", cascade="all") org_id = Column(db.Integer, db.ForeignKey('organizations.id')) org = db.relationship(Organization, back_populates="groups") type = Column(db.String(255), default=REGULAR_GROUP) name = Column(db.String(100)) permissions = Column(postgresql.ARRAY(db.String(255)), default=DEFAULT_PERMISSIONS) created_at = Column(db.DateTime(True), default=db.func.now()) __tablename__ = 'groups' def to_dict(self): return { 'id': self.id, 'name': self.name, 'permissions': self.permissions, 'type': self.type, 'created_at': self.created_at } @classmethod def all(cls, org): return cls.query.filter(cls.org == org) @classmethod def members(cls, group_id): return User.query.filter(User.group_ids.any(group_id)) @classmethod def find_by_name(cls, org, group_names): result = cls.query.filter(cls.org == org, cls.name.in_(group_names)) return list(result) def __unicode__(self): return unicode(self.id) class LowercasedString(TypeDecorator): """ A lowercased string """ impl = db.String comparator_factory = CaseInsensitiveComparator def __init__(self, length=320, *args, **kwargs): super(LowercasedString, self).__init__(length=length, *args, **kwargs) def process_bind_param(self, value, dialect): if value is not None: return value.lower() return value @property def python_type(self): return self.impl.type.python_type class User(TimestampMixin, db.Model, BelongsToOrgMixin, UserMixin, PermissionsCheckMixin): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey('organizations.id')) org = db.relationship(Organization, backref=db.backref("users", lazy="dynamic")) name = Column(db.String(320)) email = Column(LowercasedString) password_hash = Column(db.String(128), nullable=True) # XXX replace with association table group_ids = Column('groups', MutableList.as_mutable(postgresql.ARRAY(db.Integer)), nullable=True) api_key = Column(db.String(40), default=lambda: generate_token(40), unique=True) __tablename__ = 'users' __table_args__ = (db.Index('users_org_id_email', 'org_id', 'email', unique=True),) def __init__(self, *args, **kwargs): if kwargs.get('email') is not None: kwargs['email'] = kwargs['email'].lower() super(User, self).__init__(*args, **kwargs) def to_dict(self, with_api_key=False): d = { 'id': self.id, 'name': self.name, 'email': self.email, 'gravatar_url': self.gravatar_url, 'groups': self.group_ids, 'updated_at': self.updated_at, 'created_at': self.created_at } if self.password_hash is None: d['auth_type'] = 'external' else: d['auth_type'] = 'password' if with_api_key: d['api_key'] = self.api_key return d def is_api_user(self): return False @property def gravatar_url(self): email_md5 = hashlib.md5(self.email.lower()).hexdigest() return "https://www.gravatar.com/avatar/%s?s=40" % email_md5 @property def permissions(self): # TODO: this should be cached. return list(itertools.chain(*[g.permissions for g in Group.query.filter(Group.id.in_(self.group_ids))])) @classmethod def get_by_email_and_org(cls, email, org): return cls.query.filter(cls.email == email, cls.org == org).one() @classmethod def get_by_api_key_and_org(cls, api_key, org): return cls.query.filter(cls.api_key == api_key, cls.org == org).one() @classmethod def all(cls, org): return cls.query.filter(cls.org == org) @classmethod def find_by_email(cls, email): return cls.query.filter(cls.email == email) def __unicode__(self): return u'%s (%s)' % (self.name, self.email) def hash_password(self, password): self.password_hash = pwd_context.encrypt(password) def verify_password(self, password): return self.password_hash and pwd_context.verify(password, self.password_hash) def update_group_assignments(self, group_names): groups = Group.find_by_name(self.org, group_names) groups.append(self.org.default_group) self.group_ids = [g.id for g in groups] db.session.add(self) def has_access(self, obj, access_type): return AccessPermission.exists(obj, access_type, grantee=self) class Configuration(TypeDecorator): impl = db.Text def process_bind_param(self, value, dialect): return value.to_json() def process_result_value(self, value, dialect): return ConfigurationContainer.from_json(value) class DataSource(BelongsToOrgMixin, db.Model): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey('organizations.id')) org = db.relationship(Organization, backref="data_sources") name = Column(db.String(255)) type = Column(db.String(255)) options = Column(ConfigurationContainer.as_mutable(Configuration)) queue_name = Column(db.String(255), default="queries") scheduled_queue_name = Column(db.String(255), default="scheduled_queries") created_at = Column(db.DateTime(True), default=db.func.now()) data_source_groups = db.relationship("DataSourceGroup", back_populates="data_source", cascade="all") __tablename__ = 'data_sources' __table_args__ = (db.Index('data_sources_org_id_name', 'org_id', 'name'),) def __eq__(self, other): return self.id == other.id def to_dict(self, all=False, with_permissions_for=None): d = { 'id': self.id, 'name': self.name, 'type': self.type, 'syntax': self.query_runner.syntax, 'paused': self.paused, 'pause_reason': self.pause_reason } if all: schema = get_configuration_schema_for_query_runner_type(self.type) self.options.set_schema(schema) d['options'] = self.options.to_dict(mask_secrets=True) d['queue_name'] = self.queue_name d['scheduled_queue_name'] = self.scheduled_queue_name d['groups'] = self.groups if with_permissions_for is not None: d['view_only'] = db.session.query(DataSourceGroup.view_only).filter( DataSourceGroup.group == with_permissions_for, DataSourceGroup.data_source == self).one()[0] return d def __unicode__(self): return self.name @classmethod def create_with_group(cls, *args, **kwargs): data_source = cls(*args, **kwargs) data_source_group = DataSourceGroup( data_source=data_source, group=data_source.org.default_group) db.session.add_all([data_source, data_source_group]) return data_source @classmethod def all(cls, org, group_ids=None): data_sources = cls.query.filter(cls.org == org).order_by(cls.id.asc()) if group_ids: data_sources = data_sources.join(DataSourceGroup).filter( DataSourceGroup.group_id.in_(group_ids)) return data_sources @classmethod def get_by_id(cls, _id): return cls.query.filter(cls.id == _id).one() def delete(self): Query.query.filter(Query.data_source == self).update(dict(data_source_id=None, latest_query_data_id=None)) QueryResult.query.filter(QueryResult.data_source == self).delete() res = db.session.delete(self) db.session.commit() return res def get_schema(self, refresh=False): key = "data_source:schema:{}".format(self.id) cache = None if not refresh: cache = redis_connection.get(key) if cache is None: query_runner = self.query_runner schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name']) redis_connection.set(key, json.dumps(schema)) else: schema = json.loads(cache) return schema def _pause_key(self): return 'ds:{}:pause'.format(self.id) @property def paused(self): return redis_connection.exists(self._pause_key()) @property def pause_reason(self): return redis_connection.get(self._pause_key()) def pause(self, reason=None): redis_connection.set(self._pause_key(), reason) def resume(self): redis_connection.delete(self._pause_key()) def add_group(self, group, view_only=False): dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only) db.session.add(dsg) return dsg def remove_group(self, group): db.session.query(DataSourceGroup).filter( DataSourceGroup.group == group, DataSourceGroup.data_source == self).delete() db.session.commit() def update_group_permission(self, group, view_only): dsg = DataSourceGroup.query.filter( DataSourceGroup.group == group, DataSourceGroup.data_source == self).one() dsg.view_only = view_only db.session.add(dsg) return dsg @property def query_runner(self): return get_query_runner(self.type, self.options) @classmethod def get_by_name(cls, name): return cls.query.filter(cls.name == name).one() # XXX examine call sites to see if a regular SQLA collection would work better @property def groups(self): groups = db.session.query(DataSourceGroup).filter( DataSourceGroup.data_source == self) return dict(map(lambda g: (g.group_id, g.view_only), groups)) class DataSourceGroup(db.Model): # XXX drop id, use datasource/group as PK id = Column(db.Integer, primary_key=True) data_source_id = Column(db.Integer, db.ForeignKey("data_sources.id")) data_source = db.relationship(DataSource, back_populates="data_source_groups") group_id = Column(db.Integer, db.ForeignKey("groups.id")) group = db.relationship(Group, back_populates="data_sources") view_only = Column(db.Boolean, default=False) __tablename__ = "data_source_groups" class QueryResult(db.Model, BelongsToOrgMixin): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey('organizations.id')) org = db.relationship(Organization) data_source_id = Column(db.Integer, db.ForeignKey("data_sources.id")) data_source = db.relationship(DataSource, backref=backref('query_results')) query_hash = Column(db.String(32), index=True) query_text = Column('query', db.Text) data = Column(db.Text) runtime = Column(postgresql.DOUBLE_PRECISION) retrieved_at = Column(db.DateTime(True)) __tablename__ = 'query_results' def to_dict(self): return { 'id': self.id, 'query_hash': self.query_hash, 'query': self.query_text, 'data': json.loads(self.data), 'data_source_id': self.data_source_id, 'runtime': self.runtime, 'retrieved_at': self.retrieved_at } @classmethod def unused(cls, days=7): age_threshold = datetime.datetime.now() - datetime.timedelta(days=days) unused_results = (db.session.query(QueryResult.id).filter( Query.id == None, QueryResult.retrieved_at < age_threshold) .outerjoin(Query)) return unused_results @classmethod def get_latest(cls, data_source, query, max_age=0): query_hash = utils.gen_query_hash(query) if max_age == -1: q = db.session.query(QueryResult).filter( cls.query_hash == query_hash, cls.data_source == data_source).order_by( QueryResult.retrieved_at.desc()) else: q = db.session.query(QueryResult).filter( QueryResult.query_hash == query_hash, QueryResult.data_source == data_source, db.func.timezone('utc', QueryResult.retrieved_at) + datetime.timedelta(seconds=max_age) >= db.func.timezone('utc', db.func.now()) ).order_by(QueryResult.retrieved_at.desc()) return q.first() @classmethod def store_result(cls, org, data_source, query_hash, query, data, run_time, retrieved_at): query_result = cls(org=org, query_hash=query_hash, query_text=query, runtime=run_time, data_source=data_source, retrieved_at=retrieved_at, data=data) db.session.add(query_result) logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id) # TODO: Investigate how big an impact this select-before-update makes. queries = db.session.query(Query).filter( Query.query_hash == query_hash, Query.data_source == data_source) for q in queries: q.latest_query_data = query_result db.session.add(q) query_ids = [q.id for q in queries] logging.info("Updated %s queries with result (%s).", len(query_ids), query_hash) return query_result, query_ids def __unicode__(self): return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at) @property def groups(self): return self.data_source.groups def make_csv_content(self): s = cStringIO.StringIO() query_data = json.loads(self.data) writer = csv.DictWriter(s, extrasaction="ignore", fieldnames=[col['name'] for col in query_data['columns']]) writer.writer = utils.UnicodeWriter(s) writer.writeheader() for row in query_data['rows']: writer.writerow(row) return s.getvalue() def make_excel_content(self): s = cStringIO.StringIO() query_data = json.loads(self.data) book = xlsxwriter.Workbook(s, {'constant_memory': True}) sheet = book.add_worksheet("result") column_names = [] for (c, col) in enumerate(query_data['columns']): sheet.write(0, c, col['name']) column_names.append(col['name']) for (r, row) in enumerate(query_data['rows']): for (c, name) in enumerate(column_names): v = row.get(name) if isinstance(v, list): v = str(v).encode('utf-8') sheet.write(r + 1, c, v) book.close() return s.getvalue() def should_schedule_next(previous_iteration, now, schedule, failures): if schedule.isdigit(): ttl = int(schedule) next_iteration = previous_iteration + datetime.timedelta(seconds=ttl) else: hour, minute = schedule.split(':') hour, minute = int(hour), int(minute) # The following logic is needed for cases like the following: # - The query scheduled to run at 23:59. # - The scheduler wakes up at 00:01. # - Using naive implementation of comparing timestamps, it will skip the execution. normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute) if normalized_previous_iteration > previous_iteration: previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1) next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute) if failures: next_iteration += datetime.timedelta(minutes=2**failures) return now > next_iteration class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model): id = Column(db.Integer, primary_key=True) version = Column(db.Integer, default=1) org_id = Column(db.Integer, db.ForeignKey('organizations.id')) org = db.relationship(Organization, backref="queries") data_source_id = Column(db.Integer, db.ForeignKey("data_sources.id"), nullable=True) data_source = db.relationship(DataSource, backref='queries') latest_query_data_id = Column(db.Integer, db.ForeignKey("query_results.id"), nullable=True) latest_query_data = db.relationship(QueryResult) name = Column(db.String(255)) description = Column(db.String(4096), nullable=True) query_text = Column("query", db.Text) query_hash = Column(db.String(32)) api_key = Column(db.String(40), default=lambda: generate_token(40)) user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User, foreign_keys=[user_id]) last_modified_by_id = Column(db.Integer, db.ForeignKey('users.id'), nullable=True) last_modified_by = db.relationship(User, backref="modified_queries", foreign_keys=[last_modified_by_id]) is_archived = Column(db.Boolean, default=False, index=True) is_draft = Column(db.Boolean, default=True, index=True) schedule = Column(db.String(10), nullable=True) schedule_failures = Column(db.Integer, default=0) visualizations = db.relationship("Visualization", cascade="all, delete-orphan") options = Column(MutableDict.as_mutable(PseudoJSON), default={}) __tablename__ = 'queries' __mapper_args__ = { "version_id_col": version, 'version_id_generator': False } def to_dict(self, with_stats=False, with_visualizations=False, with_user=True, with_last_modified_by=True): d = { 'id': self.id, 'latest_query_data_id': self.latest_query_data_id, 'name': self.name, 'description': self.description, 'query': self.query_text, 'query_hash': self.query_hash, 'schedule': self.schedule, 'api_key': self.api_key, 'is_archived': self.is_archived, 'is_draft': self.is_draft, 'updated_at': self.updated_at, 'created_at': self.created_at, 'data_source_id': self.data_source_id, 'options': self.options, 'version': self.version } if with_user: d['user'] = self.user.to_dict() else: d['user_id'] = self.user_id if with_last_modified_by: d['last_modified_by'] = self.last_modified_by.to_dict() if self.last_modified_by is not None else None else: d['last_modified_by_id'] = self.last_modified_by_id if with_stats: if self.latest_query_data is not None: d['retrieved_at'] = self.retrieved_at d['runtime'] = self.runtime else: d['retrieved_at'] = None d['runtime'] = None if with_visualizations: d['visualizations'] = [vis.to_dict(with_query=False) for vis in self.visualizations] return d def archive(self, user=None): db.session.add(self) self.is_archived = True self.schedule = None for vis in self.visualizations: for w in vis.widgets: db.session.delete(w) for a in self.alerts: db.session.delete(a) if user: self.record_changes(user) @classmethod def create(cls, **kwargs): query = cls(**kwargs) db.session.add(Visualization(query_rel=query, name="Table", description='', type="TABLE", options="{}")) return query @classmethod def all_queries(cls, group_ids, user_id=None, drafts=False): query_ids = (db.session.query(distinct(cls.id)) .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) .filter(Query.is_archived == False) .filter(DataSourceGroup.group_id.in_(group_ids))) q = (cls.query .options(joinedload(Query.user), joinedload(Query.latest_query_data).load_only('runtime', 'retrieved_at')) .filter(cls.id.in_(query_ids)) .order_by(Query.created_at.desc())) if not drafts: q = q.filter(or_(Query.is_draft == False, Query.user_id == user_id)) return q @classmethod def by_user(cls, user): return cls.all_queries(user.group_ids, user.id).filter(Query.user == user) @classmethod def outdated_queries(cls): queries = (db.session.query(Query) .options(joinedload(Query.latest_query_data).load_only('retrieved_at')) .filter(Query.schedule != None) .order_by(Query.id)) now = utils.utcnow() outdated_queries = {} scheduled_queries_executions.refresh() for query in queries: if query.latest_query_data: retrieved_at = query.latest_query_data.retrieved_at else: retrieved_at = now retrieved_at = scheduled_queries_executions.get(query.id) or retrieved_at if should_schedule_next(retrieved_at, now, query.schedule, query.schedule_failures): key = "{}:{}".format(query.query_hash, query.data_source_id) outdated_queries[key] = query return outdated_queries.values() @classmethod def search(cls, term, group_ids, include_drafts=False): # TODO: This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution. where = (Query.name.ilike(u"%{}%".format(term)) | Query.description.ilike(u"%{}%".format(term))) if term.isdigit(): where |= Query.id == term where &= Query.is_archived == False if not include_drafts: where &= Query.is_draft == False where &= DataSourceGroup.group_id.in_(group_ids) query_ids = ( db.session.query(Query.id).join( DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) .filter(where)).distinct() return Query.query.options(joinedload(Query.user)).filter(Query.id.in_(query_ids)) @classmethod def recent(cls, group_ids, user_id=None, limit=20): query = (cls.query .filter(Event.created_at > (db.func.current_date() - 7)) .join(Event, Query.id == Event.object_id.cast(db.Integer)) .join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) .filter( Event.action.in_(['edit', 'execute', 'edit_name', 'edit_description', 'view_source']), Event.object_id != None, Event.object_type == 'query', DataSourceGroup.group_id.in_(group_ids), or_(Query.is_draft == False, Query.user_id == user_id), Query.is_archived == False) .group_by(Event.object_id, Query.id) .order_by(db.desc(db.func.count(0)))) if user_id: query = query.filter(Event.user_id == user_id) query = query.limit(limit) return query @classmethod def get_by_id(cls, _id): return cls.query.filter(cls.id == _id).one() def fork(self, user): forked_list = ['org', 'data_source', 'latest_query_data', 'description', 'query_text', 'query_hash', 'options'] kwargs = {a: getattr(self, a) for a in forked_list} forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name), user=user, **kwargs) for v in self.visualizations: if v.type == 'TABLE': continue forked_v = v.to_dict() forked_v['options'] = v.options forked_v['query_rel'] = forked_query forked_v.pop('id') forked_query.visualizations.append(Visualization(**forked_v)) db.session.add(forked_query) return forked_query @property def runtime(self): return self.latest_query_data.runtime @property def retrieved_at(self): return self.latest_query_data.retrieved_at @property def groups(self): if self.data_source is None: return {} return self.data_source.groups def __unicode__(self): return unicode(self.id) @listens_for(Query.query_text, 'set') def gen_query_hash(target, val, oldval, initiator): target.query_hash = utils.gen_query_hash(val) target.schedule_failures = 0 @listens_for(Query.user_id, 'set') def query_last_modified_by(target, val, oldval, initiator): target.last_modified_by_id = val class AccessPermission(GFKBase, db.Model): id = Column(db.Integer, primary_key=True) # 'object' defined in GFKBase access_type = Column(db.String(255)) grantor_id = Column(db.Integer, db.ForeignKey("users.id")) grantor = db.relationship(User, backref='grantor', foreign_keys=[grantor_id]) grantee_id = Column(db.Integer, db.ForeignKey("users.id")) grantee = db.relationship(User, backref='grantee', foreign_keys=[grantee_id]) __tablename__ = 'access_permissions' @classmethod def grant(cls, obj, access_type, grantee, grantor): grant = cls.query.filter(cls.object_type == obj.__tablename__, cls.object_id == obj.id, cls.access_type == access_type, cls.grantee == grantee, cls.grantor == grantor).one_or_none() if not grant: grant = cls(object_type=obj.__tablename__, object_id=obj.id, access_type=access_type, grantee=grantee, grantor=grantor) db.session.add(grant) return grant @classmethod def revoke(cls, obj, grantee, access_type=None): permissions = cls._query(obj, access_type, grantee) return permissions.delete() @classmethod def find(cls, obj, access_type=None, grantee=None, grantor=None): return cls._query(obj, access_type, grantee, grantor) @classmethod def exists(cls, obj, access_type, grantee): return cls.find(obj, access_type, grantee).count() > 0 @classmethod def _query(cls, obj, access_type=None, grantee=None, grantor=None): q = cls.query.filter(cls.object_id == obj.id, cls.object_type == obj.__tablename__) if access_type: q = q.filter(AccessPermission.access_type == access_type) if grantee: q = q.filter(AccessPermission.grantee == grantee) if grantor: q = q.filter(AccessPermission.grantor == grantor) return q def to_dict(self): d = { 'id': self.id, 'object_id': self.object_id, 'object_type': self.object_type, 'access_type': self.access_type, 'grantor': self.grantor_id, 'grantee': self.grantee_id } return d class Change(GFKBase, db.Model): id = Column(db.Integer, primary_key=True) # 'object' defined in GFKBase object_version = Column(db.Integer, default=0) user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User, backref='changes') change = Column(PseudoJSON) created_at = Column(db.DateTime(True), default=db.func.now()) __tablename__ = 'changes' def to_dict(self, full=True): d = { 'id': self.id, 'object_id': self.object_id, 'object_type': self.object_type, 'change_type': self.change_type, 'object_version': self.object_version, 'change': self.change, 'created_at': self.created_at } if full: d['user'] = self.user.to_dict() else: d['user_id'] = self.user_id return d @classmethod def last_change(cls, obj): return db.session.query(cls).filter( cls.object_id == obj.id, cls.object_type == obj.__class__.__tablename__).order_by( cls.object_version.desc()).first() class Alert(TimestampMixin, db.Model): UNKNOWN_STATE = 'unknown' OK_STATE = 'ok' TRIGGERED_STATE = 'triggered' id = Column(db.Integer, primary_key=True) name = Column(db.String(255)) query_id = Column(db.Integer, db.ForeignKey("queries.id")) query_rel = db.relationship(Query, backref=backref('alerts', cascade="all")) user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User, backref='alerts') options = Column(MutableDict.as_mutable(PseudoJSON)) state = Column(db.String(255), default=UNKNOWN_STATE) subscriptions = db.relationship("AlertSubscription", cascade="all, delete-orphan") last_triggered_at = Column(db.DateTime(True), nullable=True) rearm = Column(db.Integer, nullable=True) __tablename__ = 'alerts' @classmethod def all(cls, group_ids): return db.session.query(Alert)\ .options(joinedload(Alert.user), joinedload(Alert.query_rel))\ .join(Query)\ .join(DataSourceGroup, DataSourceGroup.data_source_id == Query.data_source_id)\ .filter(DataSourceGroup.group_id.in_(group_ids)) @classmethod def get_by_id_and_org(cls, id, org): return db.session.query(Alert).join(Query).filter(Alert.id == id, Query.org == org).one() def to_dict(self, full=True): d = { 'id': self.id, 'name': self.name, 'options': self.options, 'state': self.state, 'last_triggered_at': self.last_triggered_at, 'updated_at': self.updated_at, 'created_at': self.created_at, 'rearm': self.rearm } if full: d['query'] = self.query_rel.to_dict() d['user'] = self.user.to_dict() else: d['query_id'] = self.query_id d['user_id'] = self.user_id return d def value(self): data = json.loads(self.query_rel.latest_query_data.data) if data['rows']: value = data['rows'][0][self.options['column']] op = self.options['op'] if op == 'greater than' and value > self.options['value']: new_state = self.TRIGGERED_STATE elif op == 'less than' and value < self.options['value']: new_state = self.TRIGGERED_STATE elif op == 'equals' and value == self.options['value']: new_state = self.TRIGGERED_STATE else: new_state = self.OK_STATE # todo: safe guard for empty return data['rows'][0][self.options['column']] def evaluate(self): value = self.value() op = self.options['op'] if op == 'greater than' and value > self.options['value']: new_state = self.TRIGGERED_STATE elif op == 'less than' and value < self.options['value']: new_state = self.TRIGGERED_STATE elif op == 'equals' and value == self.options['value']: new_state = self.TRIGGERED_STATE else: new_state = self.UNKNOWN_STATE return new_state def subscribers(self): return User.query.join(AlertSubscription).filter(AlertSubscription.alert == self) @property def groups(self): return self.query_rel.groups def generate_slug(ctx): slug = utils.slugify(ctx.current_parameters['name']) tries = 1 while Dashboard.query.filter(Dashboard.slug == slug).first() is not None: slug = utils.slugify(ctx.current_parameters['name']) + "_" + str(tries) tries += 1 return slug class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model): id = Column(db.Integer, primary_key=True) version = Column(db.Integer) org_id = Column(db.Integer, db.ForeignKey("organizations.id")) org = db.relationship(Organization, backref="dashboards") slug = Column(db.String(140), index=True, default=generate_slug) name = Column(db.String(100)) user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User) # TODO: The layout should dynamically be built from position and size information on each widget. # Will require update in the frontend code to support this. layout = Column(db.Text) dashboard_filters_enabled = Column(db.Boolean, default=False) is_archived = Column(db.Boolean, default=False, index=True) is_draft = Column(db.Boolean, default=True, index=True) widgets = db.relationship('Widget', backref='dashboard', lazy='dynamic') __tablename__ = 'dashboards' __mapper_args__ = { "version_id_col": version } def to_dict(self, with_widgets=False, user=None): layout = json.loads(self.layout) if with_widgets: widget_list = Widget.query.filter(Widget.dashboard == self) widgets = {} for w in widget_list: if w.visualization_id is None: widgets[w.id] = w.to_dict() elif user and has_access(w.visualization.query_rel.groups, user, view_only): widgets[w.id] = w.to_dict() else: widgets[w.id] = project(w.to_dict(), ('id', 'width', 'dashboard_id', 'options', 'created_at', 'updated_at')) widgets[w.id]['restricted'] = True # The following is a workaround for cases when the widget object gets deleted without the dashboard layout # updated. This happens for users with old databases that didn't have a foreign key relationship between # visualizations and widgets. # It's temporary until better solution is implemented (we probably should move the position information # to the widget). widgets_layout = [] for row in layout: if not row: continue new_row = [] for widget_id in row: widget = widgets.get(widget_id, None) if widget: new_row.append(widget) widgets_layout.append(new_row) else: widgets_layout = None return { 'id': self.id, 'slug': self.slug, 'name': self.name, 'user_id': self.user_id, 'layout': layout, 'dashboard_filters_enabled': self.dashboard_filters_enabled, 'widgets': widgets_layout, 'is_archived': self.is_archived, 'is_draft': self.is_draft, 'updated_at': self.updated_at, 'created_at': self.created_at, 'version': self.version } @classmethod def all(cls, org, group_ids, user_id): query = ( Dashboard.query .outerjoin(Widget) .outerjoin(Visualization) .outerjoin(Query) .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) .filter( Dashboard.is_archived == False, (DataSourceGroup.group_id.in_(group_ids) | (Dashboard.user_id == user_id) | ((Widget.dashboard != None) & (Widget.visualization == None))), Dashboard.org == org) .group_by(Dashboard.id)) query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == False)) return query @classmethod def recent(cls, org, group_ids, user_id, for_user=False, limit=20): query = (Dashboard.query .outerjoin(Event, Dashboard.id == Event.object_id.cast(db.Integer)) .outerjoin(Widget) .outerjoin(Visualization) .outerjoin(Query) .outerjoin(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id) .filter( Event.created_at > (db.func.current_date() - 7), Event.action.in_(['edit', 'view']), Event.object_id != None, Event.object_type == 'dashboard', Dashboard.org == org, Dashboard.is_archived == False, or_(Dashboard.is_draft == False, Dashboard.user_id == user_id), DataSourceGroup.group_id.in_(group_ids) | (Dashboard.user_id == user_id) | ((Widget.dashboard != None) & (Widget.visualization == None))) .group_by(Event.object_id, Dashboard.id) .order_by(db.desc(db.func.count(0)))) if for_user: query = query.filter(Event.user_id == user_id) query = query.limit(limit) return query @classmethod def get_by_slug_and_org(cls, slug, org): return cls.query.filter(cls.slug == slug, cls.org == org).one() def __unicode__(self): return u"%s=%s" % (self.id, self.name) class Visualization(TimestampMixin, db.Model): id = Column(db.Integer, primary_key=True) type = Column(db.String(100)) query_id = Column(db.Integer, db.ForeignKey("queries.id")) # query_rel and not query, because db.Model already has query defined. query_rel = db.relationship(Query, back_populates='visualizations') name = Column(db.String(255)) description = Column(db.String(4096), nullable=True) options = Column(db.Text) __tablename__ = 'visualizations' def to_dict(self, with_query=True): d = { 'id': self.id, 'type': self.type, 'name': self.name, 'description': self.description, 'options': json.loads(self.options), 'updated_at': self.updated_at, 'created_at': self.created_at } if with_query: d['query'] = self.query_rel.to_dict() return d @classmethod def get_by_id_and_org(cls, visualization_id, org): return db.session.query(Visualization).join(Query).filter( cls.id == visualization_id, Query.org == org).one() def __unicode__(self): return u"%s %s" % (self.id, self.type) class Widget(TimestampMixin, db.Model): id = Column(db.Integer, primary_key=True) visualization_id = Column(db.Integer, db.ForeignKey('visualizations.id'), nullable=True) visualization = db.relationship(Visualization, backref='widgets') text = Column(db.Text, nullable=True) width = Column(db.Integer) options = Column(db.Text) dashboard_id = Column(db.Integer, db.ForeignKey("dashboards.id"), index=True) # unused; kept for backward compatability: type = Column(db.String(100), nullable=True) query_id = Column(db.Integer, nullable=True) __tablename__ = 'widgets' def to_dict(self): d = { 'id': self.id, 'width': self.width, 'options': json.loads(self.options), 'dashboard_id': self.dashboard_id, 'text': self.text, 'updated_at': self.updated_at, 'created_at': self.created_at } if self.visualization and self.visualization.id: d['visualization'] = self.visualization.to_dict() return d def delete(self): layout = json.loads(self.dashboard.layout) layout = map(lambda row: filter(lambda w: w != self.id, row), layout) layout = filter(lambda row: len(row) > 0, layout) self.dashboard.layout = json.dumps(layout) db.session.add(self.dashboard) db.session.delete(self) def __unicode__(self): return u"%s" % self.id @classmethod def get_by_id_and_org(cls, widget_id, org): return db.session.query(cls).join(Dashboard).filter(cls.id == widget_id, Dashboard.org == org).one() class Event(db.Model): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey("organizations.id")) org = db.relationship(Organization, backref="events") user_id = Column(db.Integer, db.ForeignKey("users.id"), nullable=True) user = db.relationship(User, backref="events") action = Column(db.String(255)) object_type = Column(db.String(255)) object_id = Column(db.String(255), nullable=True) additional_properties = Column(MutableDict.as_mutable(PseudoJSON), nullable=True, default={}) created_at = Column(db.DateTime(True), default=db.func.now()) __tablename__ = 'events' def __unicode__(self): return u"%s,%s,%s,%s" % (self.user_id, self.action, self.object_type, self.object_id) def to_dict(self): return { 'org_id': self.org_id, 'user_id': self.user_id, 'action': self.action, 'object_type': self.object_type, 'object_id': self.object_id, 'additional_properties': self.additional_properties, 'created_at': self.created_at.isoformat() } @classmethod def record(cls, event): org_id = event.pop('org_id') user_id = event.pop('user_id', None) action = event.pop('action') object_type = event.pop('object_type') object_id = event.pop('object_id', None) created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp')) event = cls(org_id=org_id, user_id=user_id, action=action, object_type=object_type, object_id=object_id, additional_properties=event, created_at=created_at) db.session.add(event) return event class ApiKey(TimestampMixin, GFKBase, db.Model): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey("organizations.id")) org = db.relationship(Organization) api_key = Column(db.String(255), index=True, default=lambda: generate_token(40)) active = Column(db.Boolean, default=True) # 'object' provided by GFKBase created_by_id = Column(db.Integer, db.ForeignKey("users.id"), nullable=True) created_by = db.relationship(User) __tablename__ = 'api_keys' __table_args__ = (db.Index('api_keys_object_type_object_id', 'object_type', 'object_id'),) @classmethod def get_by_api_key(cls, api_key): return cls.query.filter(cls.api_key == api_key, cls.active == True).one() @classmethod def get_by_object(cls, object): return cls.query.filter(cls.object_type == object.__class__.__tablename__, cls.object_id == object.id, cls.active == True).first() @classmethod def create_for_object(cls, object, user): k = cls(org=user.org, object=object, created_by=user) db.session.add(k) return k class NotificationDestination(BelongsToOrgMixin, db.Model): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey("organizations.id")) org = db.relationship(Organization, backref="notification_destinations") user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User, backref="notification_destinations") name = Column(db.String(255)) type = Column(db.String(255)) options = Column(ConfigurationContainer.as_mutable(Configuration)) created_at = Column(db.DateTime(True), default=db.func.now()) __tablename__ = 'notification_destinations' __table_args__ = (db.Index('notification_destinations_org_id_name', 'org_id', 'name', unique=True),) def to_dict(self, all=False): d = { 'id': self.id, 'name': self.name, 'type': self.type, 'icon': self.destination.icon() } if all: schema = get_configuration_schema_for_destination_type(self.type) self.options.set_schema(schema) d['options'] = self.options.to_dict(mask_secrets=True) return d def __unicode__(self): return self.name @property def destination(self): return get_destination(self.type, self.options) @classmethod def all(cls, org): notification_destinations = cls.query.filter(cls.org == org).order_by(cls.id.asc()) return notification_destinations def notify(self, alert, query, user, new_state, app, host): schema = get_configuration_schema_for_destination_type(self.type) self.options.set_schema(schema) return self.destination.notify(alert, query, user, new_state, app, host, self.options) class AlertSubscription(TimestampMixin, db.Model): id = Column(db.Integer, primary_key=True) user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User) destination_id = Column(db.Integer, db.ForeignKey("notification_destinations.id"), nullable=True) destination = db.relationship(NotificationDestination) alert_id = Column(db.Integer, db.ForeignKey("alerts.id")) alert = db.relationship(Alert, back_populates="subscriptions") __tablename__ = 'alert_subscriptions' __table_args__ = (db.Index('alert_subscriptions_destination_id_alert_id', 'destination_id', 'alert_id', unique=True),) def to_dict(self): d = { 'id': self.id, 'user': self.user.to_dict(), 'alert_id': self.alert_id } if self.destination: d['destination'] = self.destination.to_dict() return d @classmethod def all(cls, alert_id): return AlertSubscription.query.join(User).filter(AlertSubscription.alert_id == alert_id) def notify(self, alert, query, user, new_state, app, host): if self.destination: return self.destination.notify(alert, query, user, new_state, app, host) else: # User email subscription, so create an email destination object config = {'addresses': self.user.email} schema = get_configuration_schema_for_destination_type('email') options = ConfigurationContainer(config, schema) destination = get_destination('email', options) return destination.notify(alert, query, user, new_state, app, host, options) class QuerySnippet(TimestampMixin, db.Model, BelongsToOrgMixin): id = Column(db.Integer, primary_key=True) org_id = Column(db.Integer, db.ForeignKey("organizations.id")) org = db.relationship(Organization, backref="query_snippets") trigger = Column(db.String(255), unique=True) description = Column(db.Text) user_id = Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship(User, backref="query_snippets") snippet = Column(db.Text) __tablename__ = 'query_snippets' @classmethod def all(cls, org): return cls.query.filter(cls.org == org) def to_dict(self): d = { 'id': self.id, 'trigger': self.trigger, 'description': self.description, 'snippet': self.snippet, 'user': self.user.to_dict(), 'updated_at': self.updated_at, 'created_at': self.created_at } return d _gfk_types = {'queries': Query, 'dashboards': Dashboard} def init_db(): default_org = Organization(name="Default", slug='default', settings={}) admin_group = Group(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP) default_group = Group(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP) db.session.add_all([default_org, admin_group, default_group]) # XXX remove after fixing User.group_ids db.session.commit() return default_org, admin_group, default_group
{ "content_hash": "f568103a78c7ef09620db2ad307d1e29", "timestamp": "", "source": "github", "line_count": 1674, "max_line_length": 138, "avg_line_length": 35.24970131421745, "alnum_prop": 0.5933602223427332, "repo_name": "crowdworks/redash", "id": "502f2521b79b2fab8c8f0d0d4aa7946ee7f01381", "size": "59008", "binary": false, "copies": "1", "ref": "refs/heads/crowdworks", "path": "redash/models.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "363388" }, { "name": "HTML", "bytes": "133346" }, { "name": "JavaScript", "bytes": "280524" }, { "name": "Makefile", "bytes": "807" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "750251" }, { "name": "Shell", "bytes": "26661" } ], "symlink_target": "" }
import os from neutron.common import config # noqa from neutron.db import model_base import sqlalchemy as sa from gbpservice.neutron.services.grouppolicy import ( group_policy_driver_api as api) from gbpservice.neutron.services.grouppolicy import config from gbpservice.neutron.tests.unit import common as cm from gbpservice.neutron.tests.unit.services.grouppolicy import ( extensions as test_ext) from gbpservice.neutron.tests.unit.services.grouppolicy import ( test_grouppolicy_plugin as test_plugin) from gbpservice.neutron.tests.unit.services.grouppolicy.extensions import ( test_extension as test_extension) class ExtensionDriverTestBase(test_plugin.GroupPolicyPluginTestCase): _extension_drivers = ['test'] _extension_path = os.path.dirname(os.path.abspath(test_ext.__file__)) def setUp(self): config.cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='group_policy') if self._extension_path: config.cfg.CONF.set_override( 'api_extensions_path', self._extension_path) super(ExtensionDriverTestBase, self).setUp() class ExtensionDriverTestCase(ExtensionDriverTestBase): def test_pt_attr(self): # Test create with default value. pt = self.create_policy_target() policy_target_id = pt['policy_target']['id'] val = pt['policy_target']['pt_extension'] self.assertIsNone(val) req = self.new_show_request('policy_targets', policy_target_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target']['pt_extension'] self.assertIsNone(val) # Test list. res = self._list('policy_targets') val = res['policy_targets'][0]['pt_extension'] self.assertIsNone(val) # Test create with explict value. pt = self.create_policy_target(pt_extension="abc") policy_target_id = pt['policy_target']['id'] val = pt['policy_target']['pt_extension'] self.assertEqual("abc", val) req = self.new_show_request('policy_targets', policy_target_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target']['pt_extension'] self.assertEqual("abc", val) # Test update. data = {'policy_target': {'pt_extension': "def"}} req = self.new_update_request('policy_targets', data, policy_target_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target']['pt_extension'] self.assertEqual("def", val) req = self.new_show_request('policy_targets', policy_target_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target']['pt_extension'] self.assertEqual("def", val) def test_ptg_attr(self): # Test create with default value. ptg = self.create_policy_target_group() policy_target_group_id = ptg['policy_target_group']['id'] val = ptg['policy_target_group']['ptg_extension'] self.assertIsNone(val) req = self.new_show_request('policy_target_groups', policy_target_group_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target_group']['ptg_extension'] self.assertIsNone(val) # Test list. res = self._list('policy_target_groups') val = res['policy_target_groups'][0]['ptg_extension'] self.assertIsNone(val) # Test create with explict value. ptg = self.create_policy_target_group(ptg_extension="abc") policy_target_group_id = ptg['policy_target_group']['id'] val = ptg['policy_target_group']['ptg_extension'] self.assertEqual("abc", val) req = self.new_show_request('policy_target_groups', policy_target_group_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target_group']['ptg_extension'] self.assertEqual("abc", val) # Test update. data = {'policy_target_group': {'ptg_extension': "def"}} req = self.new_update_request('policy_target_groups', data, policy_target_group_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target_group']['ptg_extension'] self.assertEqual("def", val) req = self.new_show_request('policy_target_groups', policy_target_group_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_target_group']['ptg_extension'] self.assertEqual("def", val) def test_l2p_attr(self): # Test create with default value. l2p = self.create_l2_policy() l2_policy_id = l2p['l2_policy']['id'] val = l2p['l2_policy']['l2p_extension'] self.assertIsNone(val) req = self.new_show_request('l2_policies', l2_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l2_policy']['l2p_extension'] self.assertIsNone(val) # Test list. res = self._list('l2_policies') val = res['l2_policies'][0]['l2p_extension'] self.assertIsNone(val) # Test create with explict value. l2p = self.create_l2_policy(l2p_extension="abc") l2_policy_id = l2p['l2_policy']['id'] val = l2p['l2_policy']['l2p_extension'] self.assertEqual("abc", val) req = self.new_show_request('l2_policies', l2_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l2_policy']['l2p_extension'] self.assertEqual("abc", val) # Test update. data = {'l2_policy': {'l2p_extension': "def"}} req = self.new_update_request('l2_policies', data, l2_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l2_policy']['l2p_extension'] self.assertEqual("def", val) req = self.new_show_request('l2_policies', l2_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l2_policy']['l2p_extension'] self.assertEqual("def", val) def test_l3p_attr(self): # Test create with default value. l3p = self.create_l3_policy() l3_policy_id = l3p['l3_policy']['id'] val = l3p['l3_policy']['l3p_extension'] self.assertIsNone(val) req = self.new_show_request('l3_policies', l3_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l3_policy']['l3p_extension'] self.assertIsNone(val) # Test list. res = self._list('l3_policies') val = res['l3_policies'][0]['l3p_extension'] self.assertIsNone(val) # Test create with explict value. l3p = self.create_l3_policy(l3p_extension="abc") l3_policy_id = l3p['l3_policy']['id'] val = l3p['l3_policy']['l3p_extension'] self.assertEqual("abc", val) req = self.new_show_request('l3_policies', l3_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l3_policy']['l3p_extension'] self.assertEqual("abc", val) # Test update. data = {'l3_policy': {'l3p_extension': "def"}} req = self.new_update_request('l3_policies', data, l3_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l3_policy']['l3p_extension'] self.assertEqual("def", val) req = self.new_show_request('l3_policies', l3_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['l3_policy']['l3p_extension'] self.assertEqual("def", val) def test_pc_attr(self): # Test create with default value. pc = self.create_policy_classifier() policy_classifier_id = pc['policy_classifier']['id'] val = pc['policy_classifier']['pc_extension'] self.assertIsNone(val) req = self.new_show_request('policy_classifiers', policy_classifier_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_classifier']['pc_extension'] self.assertIsNone(val) # Test list. res = self._list('policy_classifiers') val = res['policy_classifiers'][0]['pc_extension'] self.assertIsNone(val) # Test create with explict value. pc = self.create_policy_classifier(pc_extension="abc") policy_classifier_id = pc['policy_classifier']['id'] val = pc['policy_classifier']['pc_extension'] self.assertEqual("abc", val) req = self.new_show_request('policy_classifiers', policy_classifier_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_classifier']['pc_extension'] self.assertEqual("abc", val) # Test update. data = {'policy_classifier': {'pc_extension': "def"}} req = self.new_update_request('policy_classifiers', data, policy_classifier_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_classifier']['pc_extension'] self.assertEqual("def", val) req = self.new_show_request('policy_classifiers', policy_classifier_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_classifier']['pc_extension'] self.assertEqual("def", val) def test_pa_attr(self): # Test create with default value. pa = self.create_policy_action() policy_action_id = pa['policy_action']['id'] val = pa['policy_action']['pa_extension'] self.assertIsNone(val) req = self.new_show_request('policy_actions', policy_action_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_action']['pa_extension'] self.assertIsNone(val) # Test list. res = self._list('policy_actions') val = res['policy_actions'][0]['pa_extension'] self.assertIsNone(val) # Test create with explict value. pa = self.create_policy_action(pa_extension="abc") policy_action_id = pa['policy_action']['id'] val = pa['policy_action']['pa_extension'] self.assertEqual("abc", val) req = self.new_show_request('policy_actions', policy_action_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_action']['pa_extension'] self.assertEqual("abc", val) # Test update. data = {'policy_action': {'pa_extension': "def"}} req = self.new_update_request('policy_actions', data, policy_action_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_action']['pa_extension'] self.assertEqual("def", val) req = self.new_show_request('policy_actions', policy_action_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_action']['pa_extension'] self.assertEqual("def", val) def test_pr_attr(self): # Create necessary parameters. classifier = self.create_policy_classifier( name="class1", protocol="tcp", direction="out", port_range="50:100") classifier_id = classifier['policy_classifier']['id'] # Test create with default value. pr = self.create_policy_rule(policy_classifier_id=classifier_id) policy_rule_id = pr['policy_rule']['id'] val = pr['policy_rule']['pr_extension'] self.assertIsNone(val) req = self.new_show_request('policy_rules', policy_rule_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule']['pr_extension'] self.assertIsNone(val) # Test list. res = self._list('policy_rules') val = res['policy_rules'][0]['pr_extension'] self.assertIsNone(val) # Test create with explict value. pr = self.create_policy_rule(policy_classifier_id=classifier_id, pr_extension="abc") policy_rule_id = pr['policy_rule']['id'] val = pr['policy_rule']['pr_extension'] self.assertEqual("abc", val) req = self.new_show_request('policy_rules', policy_rule_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule']['pr_extension'] self.assertEqual("abc", val) # Test update. data = {'policy_rule': {'pr_extension': "def"}} req = self.new_update_request('policy_rules', data, policy_rule_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule']['pr_extension'] self.assertEqual("def", val) req = self.new_show_request('policy_rules', policy_rule_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule']['pr_extension'] self.assertEqual("def", val) def test_prs_attr(self): # Test create with default value. prs = self.create_policy_rule_set(policy_rules=[]) policy_rule_set_id = prs['policy_rule_set']['id'] val = prs['policy_rule_set']['prs_extension'] self.assertIsNone(val) req = self.new_show_request('policy_rule_sets', policy_rule_set_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule_set']['prs_extension'] self.assertIsNone(val) # Test list. res = self._list('policy_rule_sets') val = res['policy_rule_sets'][0]['prs_extension'] self.assertIsNone(val) # Test create with explict value. prs = self.create_policy_rule_set(policy_rules=[], prs_extension="abc") policy_rule_set_id = prs['policy_rule_set']['id'] val = prs['policy_rule_set']['prs_extension'] self.assertEqual("abc", val) req = self.new_show_request('policy_rule_sets', policy_rule_set_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule_set']['prs_extension'] self.assertEqual("abc", val) # Test update. data = {'policy_rule_set': {'prs_extension': "def"}} req = self.new_update_request('policy_rule_sets', data, policy_rule_set_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule_set']['prs_extension'] self.assertEqual("def", val) req = self.new_show_request('policy_rule_sets', policy_rule_set_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['policy_rule_set']['prs_extension'] self.assertEqual("def", val) def test_nsp_attr(self): # Test create with default value. nsp = self.create_network_service_policy() network_service_policy_id = nsp['network_service_policy']['id'] val = nsp['network_service_policy']['nsp_extension'] self.assertIsNone(val) req = self.new_show_request('network_service_policies', network_service_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['network_service_policy']['nsp_extension'] self.assertIsNone(val) # Test list. res = self._list('network_service_policies') val = res['network_service_policies'][0]['nsp_extension'] self.assertIsNone(val) # Test create with explict value. nsp = self.create_network_service_policy(nsp_extension="abc") network_service_policy_id = nsp['network_service_policy']['id'] val = nsp['network_service_policy']['nsp_extension'] self.assertEqual("abc", val) req = self.new_show_request('network_service_policies', network_service_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['network_service_policy']['nsp_extension'] self.assertEqual("abc", val) # Test update. data = {'network_service_policy': {'nsp_extension': "def"}} req = self.new_update_request('network_service_policies', data, network_service_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['network_service_policy']['nsp_extension'] self.assertEqual("def", val) req = self.new_show_request('network_service_policies', network_service_policy_id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res['network_service_policy']['nsp_extension'] self.assertEqual("def", val) def test_es_attr(self): self._test_attr('external_segment') def test_ep_attr(self): self._test_attr('external_policy') def test_np_attr(self): self._test_attr('nat_pool') def _test_attr(self, type): # Test create with default value. acronim = _acronim(type) plural = cm.get_resource_plural(type) obj = getattr(self, 'create_%s' % type)() id = obj[type]['id'] val = obj[type][acronim + '_extension'] self.assertIsNone(val) req = self.new_show_request(plural, id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res[type][acronim + '_extension'] self.assertIsNone(val) # Test list. res = self._list(plural) val = res[plural][0][acronim + '_extension'] self.assertIsNone(val) # Test create with explict value. kwargs = {acronim + '_extension': "abc"} obj = getattr(self, 'create_%s' % type)(**kwargs) id = obj[type]['id'] val = obj[type][acronim + '_extension'] self.assertEqual("abc", val) req = self.new_show_request(plural, id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res[type][acronim + '_extension'] self.assertEqual("abc", val) # Test update. data = {type: {acronim + '_extension': "def"}} req = self.new_update_request(plural, data, id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res[type][acronim + '_extension'] self.assertEqual("def", val) req = self.new_show_request(plural, id) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) val = res[type][acronim + '_extension'] self.assertEqual("def", val) class TestPolicyTargetExtension(model_base.BASEV2): __tablename__ = 'test_policy_target_extension' policy_target_id = sa.Column(sa.String(36), sa.ForeignKey('gp_policy_targets.id', ondelete="CASCADE"), primary_key=True) pt_extension = sa.Column(sa.String(64)) class TestPolicyTargetGroupExtension(model_base.BASEV2): __tablename__ = 'test_policy_target_group_extension' policy_target_group_id = sa.Column( sa.String(36), sa.ForeignKey('gp_policy_target_groups.id', ondelete="CASCADE"), primary_key=True) ptg_extension = sa.Column(sa.String(64)) class TestL2PolicyExtension(model_base.BASEV2): __tablename__ = 'test_l2_policy_extension' l2_policy_id = sa.Column(sa.String(36), sa.ForeignKey('gp_l2_policies.id', ondelete="CASCADE"), primary_key=True) l2p_extension = sa.Column(sa.String(64)) class TestL3PolicyExtension(model_base.BASEV2): __tablename__ = 'test_l3_policy_extension' l3_policy_id = sa.Column(sa.String(36), sa.ForeignKey('gp_l3_policies.id', ondelete="CASCADE"), primary_key=True) l3p_extension = sa.Column(sa.String(64)) class TestPolicyClassifierExtension(model_base.BASEV2): __tablename__ = 'test_policy_classifier_extension' policy_classifier_id = sa.Column(sa.String(36), sa.ForeignKey('gp_policy_classifiers.id', ondelete="CASCADE"), primary_key=True) pc_extension = sa.Column(sa.String(64)) class TestPolicyActionExtension(model_base.BASEV2): __tablename__ = 'test_policy_action_extension' policy_action_id = sa.Column(sa.String(36), sa.ForeignKey('gp_policy_actions.id', ondelete="CASCADE"), primary_key=True) pa_extension = sa.Column(sa.String(64)) class TestPolicyRuleExtension(model_base.BASEV2): __tablename__ = 'test_policy_rule_extension' policy_rule_id = sa.Column(sa.String(36), sa.ForeignKey('gp_policy_rules.id', ondelete="CASCADE"), primary_key=True) pr_extension = sa.Column(sa.String(64)) class TestPolicyRuleSetExtension(model_base.BASEV2): __tablename__ = 'test_policy_rule_set_extension' policy_rule_set_id = sa.Column(sa.String(36), sa.ForeignKey('gp_policy_rule_sets.id', ondelete="CASCADE"), primary_key=True) prs_extension = sa.Column(sa.String(64)) class TestNetworkServicePolicyExtension(model_base.BASEV2): __tablename__ = 'test_network_service_policy_extension' network_service_policy_id = sa.Column(sa.String(36), sa.ForeignKey('gp_network_service_policies.id', ondelete="CASCADE"), primary_key=True) nsp_extension = sa.Column(sa.String(64)) class TestExternalSegmentExtension(model_base.BASEV2): __tablename__ = 'test_external_segment_extension' external_segment_id = sa.Column(sa.String(36), sa.ForeignKey('gp_external_segments.id', ondelete="CASCADE"), primary_key=True) es_extension = sa.Column(sa.String(64)) class TestExternalPolicyExtension(model_base.BASEV2): __tablename__ = 'test_external_policy_extension' external_policy_id = sa.Column(sa.String(36), sa.ForeignKey('gp_external_policies.id', ondelete="CASCADE"), primary_key=True) ep_extension = sa.Column(sa.String(64)) class TestNatPoolExtension(model_base.BASEV2): __tablename__ = 'test_nat_pool_extension' nat_pool_id = sa.Column(sa.String(36), sa.ForeignKey('gp_nat_pools.id', ondelete="CASCADE"), primary_key=True) np_extension = sa.Column(sa.String(64)) class TestExtensionDriver(api.ExtensionDriver): _supported_extension_alias = 'test_extension' _extension_dict = test_extension.EXTENDED_ATTRIBUTES_2_0 def initialize(self): pass @property def extension_alias(self): return self._supported_extension_alias @api.default_extension_behavior(TestPolicyTargetExtension) def process_create_policy_target(self, session, data, result): pass @api.default_extension_behavior(TestPolicyTargetExtension) def process_update_policy_target(self, session, data, result): pass @api.default_extension_behavior(TestPolicyTargetExtension) def extend_policy_target_dict(self, session, result): pass @api.default_extension_behavior(TestPolicyTargetGroupExtension) def process_create_policy_target_group(self, session, data, result): pass @api.default_extension_behavior(TestPolicyTargetGroupExtension) def process_update_policy_target_group(self, session, data, result): pass @api.default_extension_behavior(TestPolicyTargetGroupExtension) def extend_policy_target_group_dict(self, session, result): pass @api.default_extension_behavior(TestL2PolicyExtension) def process_create_l2_policy(self, session, data, result): pass @api.default_extension_behavior(TestL2PolicyExtension) def process_update_l2_policy(self, session, data, result): pass @api.default_extension_behavior(TestL2PolicyExtension) def extend_l2_policy_dict(self, session, result): pass @api.default_extension_behavior(TestL3PolicyExtension) def process_create_l3_policy(self, session, data, result): pass @api.default_extension_behavior(TestL3PolicyExtension) def process_update_l3_policy(self, session, data, result): pass @api.default_extension_behavior(TestL3PolicyExtension) def extend_l3_policy_dict(self, session, result): pass @api.default_extension_behavior(TestPolicyClassifierExtension) def process_create_policy_classifier(self, session, data, result): pass @api.default_extension_behavior(TestPolicyClassifierExtension) def process_update_policy_classifier(self, session, data, result): pass @api.default_extension_behavior(TestPolicyClassifierExtension) def extend_policy_classifier_dict(self, session, result): pass @api.default_extension_behavior(TestPolicyActionExtension) def process_create_policy_action(self, session, data, result): pass @api.default_extension_behavior(TestPolicyActionExtension) def process_update_policy_action(self, session, data, result): pass @api.default_extension_behavior(TestPolicyActionExtension) def extend_policy_action_dict(self, session, result): pass @api.default_extension_behavior(TestPolicyRuleExtension) def process_create_policy_rule(self, session, data, result): pass @api.default_extension_behavior(TestPolicyRuleExtension) def process_update_policy_rule(self, session, data, result): pass @api.default_extension_behavior(TestPolicyRuleExtension) def extend_policy_rule_dict(self, session, result): pass @api.default_extension_behavior(TestPolicyRuleSetExtension) def process_create_policy_rule_set(self, session, data, result): pass @api.default_extension_behavior(TestPolicyRuleSetExtension) def process_update_policy_rule_set(self, session, data, result): pass @api.default_extension_behavior(TestPolicyRuleSetExtension) def extend_policy_rule_set_dict(self, session, result): pass @api.default_extension_behavior(TestNetworkServicePolicyExtension) def process_create_network_service_policy(self, session, data, result): pass @api.default_extension_behavior(TestNetworkServicePolicyExtension) def process_update_network_service_policy(self, session, data, result): pass @api.default_extension_behavior(TestNetworkServicePolicyExtension) def extend_network_service_policy_dict(self, session, result): pass @api.default_extension_behavior(TestExternalSegmentExtension) def process_create_external_segment(self, session, data, result): pass @api.default_extension_behavior(TestExternalSegmentExtension) def process_update_external_segment(self, session, data, result): pass @api.default_extension_behavior(TestExternalSegmentExtension) def extend_external_segment_dict(self, session, result): pass @api.default_extension_behavior(TestExternalPolicyExtension) def process_create_external_policy(self, session, data, result): pass @api.default_extension_behavior(TestExternalPolicyExtension) def process_update_external_policy(self, session, data, result): pass @api.default_extension_behavior(TestExternalPolicyExtension) def extend_external_policy_dict(self, session, result): pass @api.default_extension_behavior(TestNatPoolExtension) def process_create_nat_pool(self, session, data, result): pass @api.default_extension_behavior(TestNatPoolExtension) def process_update_nat_pool(self, session, data, result): pass @api.default_extension_behavior(TestNatPoolExtension) def extend_nat_pool_dict(self, session, result): pass def _acronim(type): return ''.join([x[0] for x in type.split('_')])
{ "content_hash": "a681e147b39f71bd1aa86e49537d2fd5", "timestamp": "", "source": "github", "line_count": 699, "max_line_length": 79, "avg_line_length": 41.31902718168813, "alnum_prop": 0.619036077833945, "repo_name": "tbachman/group-based-policy", "id": "c9e1d217ebc1b3ddc24e0b0d07f83dbd24ae3252", "size": "29455", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "gbpservice/neutron/tests/unit/services/grouppolicy/test_extension_driver_api.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "2130911" }, { "name": "Shell", "bytes": "28973" } ], "symlink_target": "" }
from modeltranslation.translator import translator from mezzanine.core.translation import (TranslatedSlugged, TranslatedDisplayable, TranslatedRichText) from mezzanine.blog.models import BlogCategory, BlogPost class TranslatedBlogPost(TranslatedDisplayable, TranslatedRichText): fields = () class TranslatedBlogCategory(TranslatedSlugged): fields = () translator.register(BlogCategory, TranslatedBlogCategory) translator.register(BlogPost, TranslatedBlogPost)
{ "content_hash": "c5c88b3bdb5ec6b421e3d329f0506de0", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 68, "avg_line_length": 34.5, "alnum_prop": 0.7228260869565217, "repo_name": "dekomote/mezzanine-modeltranslation-backport", "id": "90773e7b27b0a73834be3e1ccfe5d58590e1aa18", "size": "552", "binary": false, "copies": "1", "ref": "refs/heads/translation_in_3.1.10", "path": "mezzanine/blog/translation.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "104823" }, { "name": "HTML", "bytes": "88707" }, { "name": "JavaScript", "bytes": "249888" }, { "name": "Nginx", "bytes": "2261" }, { "name": "Python", "bytes": "640391" } ], "symlink_target": "" }
from .resource import Resource class ServiceDiagnosticSettingsResource(Resource): """Description of a service diagnostic setting. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource Id :vartype id: str :param name: Azure resource name :type name: str :ivar type: Azure resource type :vartype type: str :param location: Resource location :type location: str :param tags: Resource tags :type tags: dict :param storage_account_id: The resource ID of the storage account to which you would like to send Diagnostic Logs. :type storage_account_id: str :param service_bus_rule_id: The service bus rule ID of the service bus namespace in which you would like to have Event Hubs created for streaming Diagnostic Logs. The rule ID is of the format: '{service bus resource ID}/authorizationrules/{key name}'. :type service_bus_rule_id: str :param metrics: the list of metric settings. :type metrics: list of :class:`MetricSettings <azure.mgmt.monitor.models.MetricSettings>` :param logs: the list of logs settings. :type logs: list of :class:`LogSettings <azure.mgmt.monitor.models.LogSettings>` :param workspace_id: The workspace ID (resource ID of a Log Analytics workspace) for a Log Analytics workspace to which you would like to send Diagnostic Logs. Example: /subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2 :type workspace_id: str """ _validation = { 'id': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'}, 'service_bus_rule_id': {'key': 'properties.serviceBusRuleId', 'type': 'str'}, 'metrics': {'key': 'properties.metrics', 'type': '[MetricSettings]'}, 'logs': {'key': 'properties.logs', 'type': '[LogSettings]'}, 'workspace_id': {'key': 'properties.workspaceId', 'type': 'str'}, } def __init__(self, location, name=None, tags=None, storage_account_id=None, service_bus_rule_id=None, metrics=None, logs=None, workspace_id=None): super(ServiceDiagnosticSettingsResource, self).__init__(name=name, location=location, tags=tags) self.storage_account_id = storage_account_id self.service_bus_rule_id = service_bus_rule_id self.metrics = metrics self.logs = logs self.workspace_id = workspace_id
{ "content_hash": "975bdea1e2719561bffa26b44e5d198e", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 152, "avg_line_length": 43.984848484848484, "alnum_prop": 0.65346193592835, "repo_name": "rjschwei/azure-sdk-for-python", "id": "74b1788a3ef438cab062875250297499795d0e3a", "size": "3377", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "azure-mgmt-monitor/azure/mgmt/monitor/models/service_diagnostic_settings_resource.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "8317911" } ], "symlink_target": "" }
""" Created on Wed Jul 23 10:32:51 2014 """ import transportdata as transdat import numpy as np import logging logging.basicConfig() l = logging.getLogger(__name__) l.setLevel(logging.DEBUG) class DataObject(): """ Creates a data object containing x and y data. Data can be processed by adding operations to a queue by simply calling the processing member functions. TODO: Add list of supported operations to the documentation here. Process the data by calling self.processData(). The processed data then is returned and stored in self.xCalc() and self.yCalc() Parameters ------- x : np.array x-channel data y : np.array y-channel data Class Members ---------- self.x : np.array original x-channel data self.y : np.array original y-channel data self.xCalc : np.array recalculated x-channel data (raw data until first process data was run) self.yCalc : np.array recalculated y-channel data (raw data until first process data was run) """ def __init__(self,x,y, label = None, path = None, group = None, paramChannel = None, param = None, xChannel = None, yChannel = None): self.x = x self.y = y self.xCalc = np.array(x) self.yCalc = np.array(y) self.label = label self.path = unicode(path) self.group = group self.paramChannel = paramChannel self.param = param self.xChannel = xChannel self.yChannel = yChannel self.operations = [] self.operationParameters = [] self.isUpDownData = True # whether the currently calculated data consists of an up and down sweep def __str__(self): return """Data Object "%s" for data in file '%s' Group: '%s' Parametrized according to '%s' (selected '%s') xChannel: '%s' (%d long) yChannel: '%s' (%d long) #Operations: %d"""%(self.label, self.path, self.group, self.paramChannel, self.param, self.xChannel, len(self.x), self.yChannel, len(self.y), len(self.operations)) def _deltaMethod(self, method): """ method : int(0-4) 0: no delta method [n] (default) 1: uneven indexed raw data [2n-1] 2: even indexed raw data [2n] 3: difference ([2n-1]-[2n])/2 4: sum ([2n-1]+[2n])/2 """ x = self.xCalc y = self.yCalc if method == 0: # plain raw data pass elif method == 1: # odd raw data values x = transdat.separateAlternatingSignal(x)[0] y = transdat.separateAlternatingSignal(y)[0] elif method == 2: # even raw data values x = transdat.separateAlternatingSignal(x)[1] y = transdat.separateAlternatingSignal(y)[1] elif method == 3: # difference of odd - even values x = transdat.separateAlternatingSignal(x)[0] y = transdat.separateAlternatingSignal(y)[0] - transdat.separateAlternatingSignal(y)[1] elif method == 4: # difference of odd - even values x = transdat.separateAlternatingSignal(x)[0] y = transdat.separateAlternatingSignal(y)[0] + transdat.separateAlternatingSignal(y)[1] self.xCalc = x self.yCalc = y def deltaMethod(self, method): """ Queue delta method processing. Parameters ---------- method : int(0-4) 0: no delta method [n] (default) 1: uneven indexed raw data [2n-1] 2: even indexed raw data [2n] 3: difference ([2n-1]-[2n])/2 4: sum ([2n-1]+[2n])/2 """ if method: self.operations.append(self._deltaMethod) self.operationParameters.append({'method': method}) def _averageUpDown(self): """ Average up and down sweep and mark data as being averaged (for e.g. symmetrization) """ if not self.isUpDownData: raise Exception("Averaging up-down-sweep only makes sense if there's an up- and down-sweep. The function can only be called once.") self.xCalc = transdat.averageUpDownSweep(self.xCalc) self.yCalc = transdat.averageUpDownSweep(self.yCalc) self.isUpDownData = False def averageUpDown(self): """ Queue averaging an up and down sweep (queue this only once) """ self.operations.append(self._averageUpDown) self.operationParameters.append({}) # add empty to maintain index sync # w/ self.operations def _normalize(self, method): """ method : int(0-2) 0: no normalization (default) 1: normalize y to min(y) 2: normalize y to max(y) """ if 0 == method: pass elif 1 == method: # normalize by min(y) self.yCalc = self.yCalc/np.min(self.yCalc) elif 2 == method: # normalize by max(y) self.yCalc = self.yCalc/np.max(self.yCalc) def normalize(self, method): """ Queue normalizing the y-data according to method Parameters ---------- method : int(0-2) 0: no normalization (default) 1: normalize y to min(y) 2: normalize y to max(y) """ if method: self.operations.append(self._normalize) self.operationParameters.append({'method': method}) def _symmetrize(self, method, symm_step = None, symm_center = None): """ method : int(0-2) 0: no symmetrization (default) 1: symmetrization 2: antisymmetrization """ if ((not symm_step == None and not symm_center == None) or (symm_step == None and symm_center == None)): raise Exception("Provide either a center of symmetry (symm_center) or a symmetry step (symm_step).") x = self.xCalc y = self.yCalc if method and symm_step != None and self.isUpDownData: #admr data # only regard one half of the data for finding the period stepIdx = int(np.abs((np.abs(x[0:int(len(x))+1/2]-0)).argmin() - (np.abs(x[0:int(len(x)/2+1)]-symm_step)).argmin())) stepWidth = (x[(np.abs(x[0:int(len(x)/2+1)]-0)).argmin()] - x[np.abs(x[1:int(len(x)/2+1)]-symm_step).argmin()+1]) l.debug("(Anti-)Symmetrizing admr data with period %d (val:%f)"%(stepIdx,np.abs(stepWidth))) if 1 == method: # symmetrize y = transdat.symmetrizeSignalUpDown(y,stepIdx) x = x[0:len(y)] elif 2 == method: #antisymmetrize y = transdat.antiSymmetrizeSignalUpDown(y,stepIdx) x = x[0:len(y)] elif method and symm_step != None and not self.isUpDownData: #admr data where up and down sweep are already averaged stepIdx = int(np.abs((np.abs(x-0)).argmin() - (np.abs(x-symm_step)).argmin())) stepWidth = (x[(np.abs(x-0)).argmin()] - x[np.abs(x-symm_step).argmin()+1]) l.debug("(Anti-)Symmetrizing admr data with period %d (val:%f)"%(stepIdx,np.abs(stepWidth))) if 1 == method: # symmetrize y = transdat.symmetrizeSignal(y,stepIdx) x = x[0:len(y)] elif 2 == method: #antisymmetrize y = transdat.antiSymmetrizeSignal(y,stepIdx) x = x[0:len(y)] elif method and symm_center != None: centerIdx = (np.abs(x-symm_center)).argmin() l.debug("(Anti-)Symmetrizing data of len %d around index %d (val: %f)"%(len(x),centerIdx, x[centerIdx])) # R(H) data if 1 == method: # symmetrize y = transdat.symmetrizeSignalZero(y,centerIdx) x = x[0:len(y)][::-1] elif 2 == method: # symmetrize y = transdat.antiSymmetrizeSignalZero(y,centerIdx) x = x[0:len(y)][::-1] self.xCalc = x self.yCalc = y def symmetrize(self, method, symm_step = None, symm_center = None): """ Queue symmetrizing data see doc/symmetrizing for conventions and algorithm (FIXME) Parameters ---------- method : int(0-2) 0: no symmetrization (default) 1: symmetrization 2: antisymmetrization """ if method: if ((not symm_step == None and not symm_center == None) or (symm_step == None and symm_center == None)): raise Exception("Provide either a center of symmetry (symm_center) or a symmetry step (symm_step).") self.operations.append(self._symmetrize) self.operationParameters.append({'method': method, 'symm_step': symm_step, 'symm_center': symm_center}) def _offsetCorrection(self, method, offset = None): """ switchOffset : int(0-4) 0 -> no offset correction (default) 1 -> subtracts min(y) 2 -> subtracts max(y) 3 -> subtracts mean(y) 4 -> subtracts value defined in valueOffset offset : double custom value to subtract from the data (if switchOffset = 4) (default = None) """ if 0 == method: pass elif 1 == method: # subtract min(y) offset = np.min(self.yCalc) elif 2 == method: # subtract max(y) offset = np.max(self.yCalc) elif 3 == method: # subtract mean(y) offset = np.mean(self.yCalc) self.yCalc = self.yCalc-offset def offsetCorrection(self, method, offset = None): """ Queue substracting the offset Parameters ---------- switchOffset : int(0-4) 0 -> no offset correction 1 -> subtracts min(y) 2 -> subtracts max(y) 3 -> subtracts mean(y) 4 -> subtracts value defined in valueOffset offset : double custom value to subtract from the data (if switchOffset = 4) (default = None) """ if method: self.operations.append(self._offsetCorrection) self.operationParameters.append({'method': method, 'offset': offset}) def processData(self): """ Apply queued operations Returns ---------- xCalc : np.ndarray() x-channel of the processed data yCalc : np.ndarray() y-channel of the processed data """ self.xCalc = np.array(self.x) self.yCalc = np.array(self.y) for idx, operation in enumerate(self.operations): operation(**self.operationParameters[idx]) return self.xCalc, self.yCalc def operationsToString(self): opString = "" for idx, operation in enumerate(self.operations): opString += str(operation.__name__) + ":\n" opString += " %s"%(self.operationParameters[idx]) return opString def saveASCII(self, fname): header = str(self) + self.operationsToString() np.savetxt(fname, np.transpose((self.xCalc, self.yCalc)), header = header)
{ "content_hash": "fb2bce0499012d78d3c8295c4c365f11", "timestamp": "", "source": "github", "line_count": 323, "max_line_length": 175, "avg_line_length": 36.39009287925697, "alnum_prop": 0.5371788327377914, "repo_name": "transportWMI/previewTransportData", "id": "ed1b35cb527600c274e27a1c25650b71f546689f", "size": "11778", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/DataObject.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "58643" } ], "symlink_target": "" }
from absl.testing import absltest from absl.testing import parameterized from clif.testing.python import return_value_policy class ReturnValuePolicyTestCase(parameterized.TestCase): @parameterized.parameters( ('return_value', '^return_value_MvCtor(_MvCtor)*$'), ('return_reference', r'^return_reference(_CpCtor)*(_MvCtor)*$'), ('return_const_reference', '^return_const_reference_CpCtor(_MvCtor)*$'), ('return_pointer', '^return_pointer$'), ('return_value_policy_copy', '^return_pointer(_CpCtor)*$'), ('return_value_policy_move', '^return_pointer(_MvCtor)*$'), ('return_value_policy_reference', '^return_pointer$'), ('return_const_pointer', '^return_const_pointer_CpCtor$'), ('return_shared_pointer', '^return_shared_pointer$'), ('return_unique_pointer', '^return_unique_pointer$'), ('return_value_nocopy', '^return_value_nocopy_MvCtor(_MvCtor)*$'), ('return_reference_nocopy', '^return_reference_nocopy_MvCtor$'), ('return_pointer_nocopy', '^return_pointer_nocopy$'), ('return_shared_pointer_nocopy', '^return_shared_pointer_nocopy$'), ('return_unique_pointer_nocopy', '^return_unique_pointer_nocopy$'), ('return_value_nomove', '^return_value_nomove_CpCtor(_CpCtor)*$'), ('return_reference_nomove', '^return_reference_nomove_CpCtor(_CpCtor)*$'), ('return_pointer_nomove', '^return_pointer_nomove$'), ('return_const_reference_nomove', '^return_const_reference_nomove_CpCtor(_CpCtor)*$'), ('return_const_pointer_nomove', '^return_const_pointer_nomove_CpCtor$'), ('return_shared_pointer_nomove', '^return_shared_pointer_nomove$'), ('return_unique_pointer_nomove', '^return_unique_pointer_nomove$'), ('return_pointer_nocopy_nomove', '^return_pointer_nocopy_nomove$'), ('return_shared_pointer_nocopy_nomove', '^return_shared_pointer_nocopy_nomove$'), ('return_unique_pointer_nocopy_nomove', '^return_unique_pointer_nocopy_nomove$') ) def testReturnValue(self, return_function, expected): ret = getattr(return_value_policy, return_function)() self.assertRegex(ret.mtxt, expected) @absltest.skipIf( 'pybind11' not in return_value_policy.__doc__, 'Legacy PyCLIF does not use return value policy') def testReturnAsBytes(self): ret = return_value_policy.return_string() self.assertEqual(ret, b'return_string') @absltest.skipIf( 'pybind11' not in return_value_policy.__doc__, 'These return value policies cause memory leak of legacy PyCLIF') @parameterized.parameters( ('return_value_policy_take_ownership', '^return_pointer_unowned$'), ('return_value_policy_automatic', '^return_pointer_unowned$'), ) def testTakeOwnership(self, return_function, expected): ret = getattr(return_value_policy, return_function)() self.assertRegex(ret.mtxt, expected) if __name__ == '__main__': absltest.main()
{ "content_hash": "9442b6f2c2716d94bf1122cad55a24a5", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 80, "avg_line_length": 46.666666666666664, "alnum_prop": 0.6738095238095239, "repo_name": "google/clif", "id": "21d25851a9ad33cb80bd39915595c0c9d4943e16", "size": "3516", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "clif/testing/python/return_value_policy_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "4035" }, { "name": "C++", "bytes": "685973" }, { "name": "CMake", "bytes": "29813" }, { "name": "Dockerfile", "bytes": "4053" }, { "name": "Python", "bytes": "742833" }, { "name": "Starlark", "bytes": "28337" } ], "symlink_target": "" }
from ....testing import assert_equal from ..preprocess import BlurToFWHM def test_BlurToFWHM_inputs(): input_map = dict(args=dict(argstr='%s', ), automask=dict(argstr='-automask', ), blurmaster=dict(argstr='-blurmaster %s', ), environ=dict(nohash=True, usedefault=True, ), fwhm=dict(argstr='-FWHM %f', ), fwhmxy=dict(argstr='-FWHMxy %f', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', mandatory=True, ), mask=dict(argstr='-blurmaster %s', ), out_file=dict(argstr='-prefix %s', name_source=[u'in_file'], name_template='%s_afni', ), outputtype=dict(), terminal_output=dict(nohash=True, ), ) inputs = BlurToFWHM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BlurToFWHM_outputs(): output_map = dict(out_file=dict(), ) outputs = BlurToFWHM.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
{ "content_hash": "bbf87ce77b413882814b9cd3690500c1", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 78, "avg_line_length": 25.979591836734695, "alnum_prop": 0.6135113904163394, "repo_name": "carolFrohlich/nipype", "id": "b0c965dc070d21f9e0538e61c4e8171ee9991112", "size": "1327", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "9823" }, { "name": "KiCad", "bytes": "3797" }, { "name": "Makefile", "bytes": "2320" }, { "name": "Matlab", "bytes": "1717" }, { "name": "Python", "bytes": "5451077" }, { "name": "Shell", "bytes": "3302" }, { "name": "Tcl", "bytes": "43408" } ], "symlink_target": "" }
import threading import unittest from trac.util.concurrency import ThreadLocal class ThreadLocalTestCase(unittest.TestCase): def test_thread_local(self): local = ThreadLocal(a=1, b=2) local.b = 3 local.c = 4 local_dict = [local.__dict__.copy()] def f(): local.b = 5 local.d = 6 local_dict.append(local.__dict__.copy()) thread = threading.Thread(target=f) thread.start() thread.join() self.assertEqual(dict(a=1, b=3, c=4), local_dict[0]) self.assertEqual(dict(a=1, b=5, d=6), local_dict[1]) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(ThreadLocalTestCase)) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
{ "content_hash": "3fab353b53371b6df36676e1a1f03836", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 60, "avg_line_length": 26.29032258064516, "alnum_prop": 0.5975460122699386, "repo_name": "rbaumg/trac", "id": "9928463dcfb42a2b015008b786b5780bab5301f3", "size": "1312", "binary": false, "copies": "1", "ref": "refs/heads/trunk", "path": "trac/util/tests/concurrency.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "1085" }, { "name": "C#", "bytes": "114293" }, { "name": "CSS", "bytes": "40666" }, { "name": "Groff", "bytes": "1497" }, { "name": "JavaScript", "bytes": "16747" }, { "name": "Python", "bytes": "1287818" }, { "name": "Shell", "bytes": "481" }, { "name": "Smalltalk", "bytes": "11753" } ], "symlink_target": "" }
from __future__ import ( absolute_import, division, print_function, unicode_literals) import os import pandas as pd from Bio import SeqIO from io import open from logging import getLogger from pdb.lib.create_delimiter import create_delimiter from pdb.lib.data_paths import ProjectFolders, build_abs_path from pdb.lib.progress_bar import ProgressBar def second_filtering(dirs): """Compare FASTA with PDB_SEQ and remove rows that aren't matches. Open a UniProt fasta file for each line and compare with PDB_SEQ. Removes rows if the PDB sequence section is not 100% match with the corresponding UniProt section. Args: dirs (ProjectFolders): A named tuple of directory paths. Returns: None """ msg = getLogger('root') msg.info('START: Second filtering.') uni_filtered_name = 'pdb_seq_uni_filtered.tsv' uni_filtered_path = os.path.join( dirs.working, uni_filtered_name ) delimiter = create_delimiter('\t') if not os.path.exists(uni_filtered_path): pdb_seq_path = os.path.join(dirs.working, 'pdb_seq.tsv') df = pd.read_csv( pdb_seq_path, sep=delimiter, index_col=0, keep_default_na=False, na_values=['NULL', 'N/A']) print( "Comparing the PDB peptide to the corresponding " "section of the UniProt entry. " "Starting with {0} rows.".format(len(df.index)) ) df = compare_to_uni(df, dirs.uni_data) print( 'Function "compare_to_uni" complete. ' 'There are now {} rows'.format(len(df.index)) ) df.to_csv(uni_filtered_path, sep=delimiter, encoding='utf-8') print("\npdb_seq_uni_filtered.tsv written") print( "Wrote {} file; second " "filtering complete.".format(uni_filtered_name) ) print('\t"{}"'.format(uni_filtered_path)) else: print( "Found {}. Using local file:\n" "\t{}".format( uni_filtered_name, uni_filtered_path ) ) msg.info('COMPLETE: Second filtering.') return None def compare_to_uni(df, uni_folder): """Compare PDB seq to uniprot sequence; remove row if not exact match. Opens a UniProt fasta file for each line and compares with PDB_SEQ. Removes rows if the PDB sequence section is not 100% match with the corresponding UniProt section. Notes: Do this after UniProt files are downloaded. The input DataFrame should at this point only have the rows that are in ss_dis and there should be a value under PDB_SEQ. Args: df (DataFrame): A pre-filtered DataFrame from pdb_chain_uniprot.tsv uni_folder (Unicode): A directory path to the folder that has single UniProt fasta files. Returns: A filtered DataFrame with PDB_SEQ removed, a list of UniProt files that were missing from the UniProt folder. """ progress = ProgressBar( len(df.index), start_msg=("Comparing PDB uniprot sequences and " "removing non-matching rows."), end_msg="Finished comparing PDB uniprot sequences.", approx_percentage=1 ) for i, row in df.iterrows(): uni_id = row.SP_PRIMARY uni_fp = build_abs_path(uni_folder, uni_id) try: uni_seq = (SeqIO.read(open(uni_fp), "fasta")).seq except ValueError: print( "The UniProt folder must have UniProt files for all " "lines in the DataFrame. {0} cannot be opened".format(uni_id)) else: uni_peptide = uni_seq[row.SP_BEG-1:row.SP_END] if uni_peptide != row.PDB_SEQ: df.drop(i, inplace=True) progress.inc() df.drop('PDB_SEQ', axis=1, inplace=True) return df
{ "content_hash": "db37173328771eec3687cdf0f0c34403", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 78, "avg_line_length": 31.910569105691057, "alnum_prop": 0.6048407643312101, "repo_name": "shellydeforte/PDB", "id": "c9df35d1f1f59488eee57b578f1e192dd09c9878", "size": "3949", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pdb/filtering_step_two.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "207570" } ], "symlink_target": "" }