code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase from pyflink.table import Expression class ExpressionCompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase): """ Tests whether the Python :class:`Expression` is consistent with Java `org.apache.flink.table.api.ApiExpression`. """ @classmethod def python_class(cls): return Expression @classmethod def java_class(cls): return "org.apache.flink.table.api.ApiExpression" @classmethod def excluded_methods(cls): return { 'asSummaryString', 'accept', 'toExpr', 'getChildren', # The following methods have been replaced with the built-in methods in Python, # such as __and__ for and to be more Pythonic. 'and', 'or', 'not', 'isGreater', 'isGreaterOrEqual', 'isLess', 'isLessOrEqual', 'isEqual', 'isNotEqual', 'plus', 'minus', 'dividedBy', 'times', 'mod', 'power' } @classmethod def java_method_name(cls, python_method_name): return {'alias': 'as', 'in_': 'in'}.get(python_method_name, python_method_name) if __name__ == '__main__': import unittest try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports') except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache/flink
flink-python/pyflink/table/tests/test_expression_completeness.py
Python
apache-2.0
2,569
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'PaymentResponse' db.create_table('hiicart_paymentresponse', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('cart', self.gf('django.db.models.fields.related.ForeignKey')(related_name='payment_results', to=orm['hiicart.HiiCart'])), ('response_code', self.gf('django.db.models.fields.PositiveIntegerField')()), ('response_text', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal('hiicart', ['PaymentResponse']) def backwards(self, orm): # Deleting model 'PaymentResponse' db.delete_table('hiicart_paymentresponse') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'hiicart.hiicart': { 'Meta': {'object_name': 'HiiCart'}, '_cart_state': ('django.db.models.fields.CharField', [], {'default': "'OPEN'", 'max_length': '16', 'db_index': 'True'}), '_cart_uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}), '_sub_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '2', 'blank': 'True'}), '_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '2'}), 'bill_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}), 'bill_country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2'}), 'bill_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '255'}), 'bill_first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'bill_last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'bill_phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}), 'bill_postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}), 'bill_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}), 'bill_street1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80'}), 'bill_street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'custom_id': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '2', 'blank': 'True'}), 'failure_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}), 'fulfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'gateway': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'ship_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}), 'ship_country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2'}), 'ship_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '255'}), 'ship_first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'ship_last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'ship_phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}), 'ship_postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}), 'ship_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}), 'ship_street1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80'}), 'ship_street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80'}), 'shipping': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '2', 'blank': 'True'}), 'shipping_option_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'success_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}), 'tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '2', 'blank': 'True'}), 'tax_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), 'tax_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '5', 'blank': 'True'}), 'tax_region': ('django.db.models.fields.CharField', [], {'max_length': '127', 'null': 'True', 'blank': 'True'}), 'thankyou': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'hiicart.lineitem': { 'Meta': {'object_name': 'LineItem'}, '_sub_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '10'}), '_total': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '2'}), 'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hiicart.HiiCart']"}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'digital_description': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '10'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}), 'sku': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '255', 'db_index': 'True'}), 'unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '10'}) }, 'hiicart.note': { 'Meta': {'object_name': 'Note'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'text': ('django.db.models.fields.TextField', [], {}) }, 'hiicart.payment': { 'Meta': {'object_name': 'Payment'}, 'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '2'}), 'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': "orm['hiicart.HiiCart']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'gateway': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'transaction_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '45', 'null': 'True', 'blank': 'True'}) }, 'hiicart.paymentresponse': { 'Meta': {'object_name': 'PaymentResponse'}, 'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_results'", 'to': "orm['hiicart.HiiCart']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'response_code': ('django.db.models.fields.PositiveIntegerField', [], {}), 'response_text': ('django.db.models.fields.TextField', [], {}) }, 'hiicart.recurringlineitem': { 'Meta': {'object_name': 'RecurringLineItem'}, '_sub_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '10'}), '_total': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '2'}), 'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hiicart.HiiCart']"}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'digital_description': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '10'}), 'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'duration_unit': ('django.db.models.fields.CharField', [], {'default': "'DAY'", 'max_length': '5'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'payment_token': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}), 'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}), 'recurring_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '2'}), 'recurring_shipping': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '2'}), 'recurring_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'recurring_times': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'sku': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '255', 'db_index': 'True'}), 'trial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'trial_length': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'trial_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '2'}), 'trial_times': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) } } complete_apps = ['hiicart']
hiidef/hiicart
hiicart/migrations/0018_auto__add_paymentresponse.py
Python
mit
15,003
#!/usr/bin/env python """ Setup script for fbchat """ import os try: from setuptools import setup except ImportError: from distutils.core import setup with open('README.rst') as f: readme_content = f.read().strip() version = None author = None email = None source = None with open(os.path.join('fbchat', '__init__.py')) as f: for line in f: if line.strip().startswith('__version__'): version = line.split('=')[1].strip().replace('"', '').replace("'", '') elif line.strip().startswith('__author__'): author = line.split('=')[1].strip().replace('"', '').replace("'", '') elif line.strip().startswith('__email__'): email = line.split('=')[1].strip().replace('"', '').replace("'", '') elif line.strip().startswith('__source__'): source = line.split('=')[1].strip().replace('"', '').replace("'", '') elif None not in (version, author, email, source): break setup( name='fbchat', author=author, author_email=email, license='BSD License', keywords=["facebook chat fbchat"], description="Facebook Chat (Messenger) for Python", long_description=readme_content, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'License :: OSI Approved :: BSD License', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Communications :: Chat', ], include_package_data=True, packages=['fbchat'], install_requires=[ 'requests', 'beautifulsoup4' ], url=source, version=version, zip_safe=True, )
wannaphongcom/fbchat
setup.py
Python
bsd-3-clause
2,501
# -*- coding: utf-8 -*- from urlparse import urlparse from nose.tools import * # flake8: noqa from dateutil.parser import parse as parse_date from tests.base import DbTestCase, assert_datetime_equal from tests.utils import make_drf_request_with_version from osf_tests.factories import UserFactory, NodeFactory, RegistrationFactory, ProjectFactory from framework.auth import Auth from api.nodes.serializers import NodeSerializer from api.registrations.serializers import RegistrationSerializer from api.base.settings.defaults import API_BASE class TestNodeSerializer(DbTestCase): def setUp(self): super(TestNodeSerializer, self).setUp() self.user = UserFactory() def test_node_serialization(self): parent = ProjectFactory(creator=self.user) node = NodeFactory(creator=self.user, parent=parent) req = make_drf_request_with_version(version='2.0') result = NodeSerializer(node, context={'request': req}).data data = result['data'] assert_equal(data['id'], node._id) assert_equal(data['type'], 'nodes') # Attributes attributes = data['attributes'] assert_equal(attributes['title'], node.title) assert_equal(attributes['description'], node.description) assert_equal(attributes['public'], node.is_public) assert_equal(attributes['tags'], [str(each.name) for each in node.tags.all()]) assert_equal(attributes['current_user_can_comment'], False) assert_equal(attributes['category'], node.category) assert_equal(attributes['registration'], node.is_registration) assert_equal(attributes['fork'], node.is_fork) assert_equal(attributes['collection'], node.is_collection) # Relationships relationships = data['relationships'] assert_in('children', relationships) assert_in('contributors', relationships) assert_in('files', relationships) assert_in('parent', relationships) assert_in('affiliated_institutions', relationships) parent_link = relationships['parent']['links']['related']['href'] assert_equal( urlparse(parent_link).path, '/{}nodes/{}/'.format(API_BASE, parent._id) ) assert_in('registrations', relationships) # Not a fork, so forked_from is removed entirely assert_not_in('forked_from', relationships) def test_fork_serialization(self): node = NodeFactory(creator=self.user) fork = node.fork_node(auth=Auth(user=node.creator)) req = make_drf_request_with_version(version='2.0') result = NodeSerializer(fork, context={'request': req}).data data = result['data'] # Relationships relationships = data['relationships'] forked_from = relationships['forked_from']['links']['related']['href'] assert_equal( urlparse(forked_from).path, '/{}nodes/{}/'.format(API_BASE, node._id) ) def test_template_serialization(self): node = NodeFactory(creator=self.user) fork = node.use_as_template(auth=Auth(user=node.creator)) req = make_drf_request_with_version(version='2.0') result = NodeSerializer(fork, context={'request': req}).data data = result['data'] # Relationships relationships = data['relationships'] templated_from = relationships['template_node']['links']['related']['href'] assert_equal( urlparse(templated_from).path, '/{}nodes/{}/'.format(API_BASE, node._id) ) class TestNodeRegistrationSerializer(DbTestCase): def test_serialization(self): user = UserFactory() req = make_drf_request_with_version(version='2.2') reg = RegistrationFactory(creator=user) result = RegistrationSerializer(reg, context={'request': req}).data data = result['data'] assert_equal(data['id'], reg._id) assert_equal(data['type'], 'registrations') should_not_relate_to_registrations = [ 'registered_from', 'registered_by', 'registration_schema' ] # Attributes attributes = data['attributes'] assert_datetime_equal( parse_date(attributes['date_registered']), reg.registered_date ) assert_equal(attributes['withdrawn'], reg.is_retracted) # Relationships relationships = data['relationships'] relationship_urls = {} for relationship in relationships: relationship_urls[relationship]=relationships[relationship]['links']['related']['href'] assert_in('registered_by', relationships) registered_by = relationships['registered_by']['links']['related']['href'] assert_equal( urlparse(registered_by).path, '/{}users/{}/'.format(API_BASE, user._id) ) assert_in('registered_from', relationships) registered_from = relationships['registered_from']['links']['related']['href'] assert_equal( urlparse(registered_from).path, '/{}nodes/{}/'.format(API_BASE, reg.registered_from._id) ) for relationship in relationship_urls: if relationship in should_not_relate_to_registrations: assert_not_in('/{}registrations/'.format(API_BASE), relationship_urls[relationship]) else: assert_in('/{}registrations/'.format(API_BASE), relationship_urls[relationship], 'For key {}'.format(relationship))
mluo613/osf.io
api_tests/nodes/serializers/test_serializers.py
Python
apache-2.0
5,592
# coding: utf-8 from django import forms from django.conf import settings from django.core.serializers.json import json css = getattr(settings, 'CODEMIRROR_CSS', ['css/codemirror.min.css']) js = getattr(settings, 'CODEMIRROR_JS', ['js/codemirror.min.js']) default_config = getattr(settings, 'CODEMIRROR_CONFIG', {}) class CodeMirror(forms.Textarea): class Media: js = js css = { 'all': css } def __init__(self, **kwargs): self.config = dict(default_config) self.config.update(kwargs) super(CodeMirror, self).__init__() def render(self, name, value, attrs=None): field = super(CodeMirror, self).render(name, value, attrs) return """%s <script type="text/javascript"> CodeMirror.fromTextArea( document.getElementById('%s'), %s ); </script> """ % (field, attrs['id'], json.dumps(self.config))
onrik/django-codemirror
codemirror/widgets.py
Python
mit
900
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import collections import logging import os import platform import re import subprocess import types import util import json from ebstall.versions import Version from ebstall.util import normalize_string logger = logging.getLogger(__name__) CLI_DEFAULTS_DEFAULT = dict( packager='source' ) CLI_DEFAULTS_DEBIAN = dict( packager='apt-get' ) CLI_DEFAULTS_CENTOS = dict( packager='yum' ) CLI_DEFAULTS_DARWIN = dict( packager='source' ) FLAVORS = { 'debian': 'debian', 'ubuntu': 'debian', 'kubuntu': 'debian', 'kali': 'debian', 'centos': 'redhat', 'centos linux': 'redhat', 'fedora': 'redhat', 'red hat enterprise linux server': 'redhat', 'rhel': 'redhat', 'amazon': 'redhat', 'amzn': 'redhat', 'gentoo': 'gentoo', 'gentoo base system': 'gentoo', 'darwin': 'darwin', 'opensuse': 'suse', 'suse': 'suse', } CLI_DEFAULTS = { "default": CLI_DEFAULTS_DEFAULT, "debian": CLI_DEFAULTS_DEBIAN, "ubuntu": CLI_DEFAULTS_DEBIAN, "centos": CLI_DEFAULTS_CENTOS, "centos linux": CLI_DEFAULTS_CENTOS, "fedora": CLI_DEFAULTS_CENTOS, "red hat enterprise linux server": CLI_DEFAULTS_CENTOS, "rhel": CLI_DEFAULTS_CENTOS, "amazon": CLI_DEFAULTS_CENTOS, "amzn": CLI_DEFAULTS_CENTOS, "gentoo": CLI_DEFAULTS_DEFAULT, "gentoo base system": CLI_DEFAULTS_DEFAULT, "darwin": CLI_DEFAULTS_DARWIN, "opensuse": CLI_DEFAULTS_DEFAULT, "suse": CLI_DEFAULTS_DEFAULT, } """CLI defaults.""" # Start system START_INITD = 'init.d' START_SYSTEMD = 'systemd' # Pkg manager PKG_YUM = 'yum' PKG_APT = 'apt-get' FAMILY_REDHAT = 'redhat' FAMILY_DEBIAN = 'debian' # redhat / debian YUMS = ['redhat', 'fedora', 'centos', 'rhel', 'amzn', 'amazon'] DEBS = ['debian', 'ubuntu', 'kali'] class OSInfo(object): """OS information, name, version, like - similarity""" def __init__(self, name=None, version=None, version_major=None, like=None, family=None, packager=None, start_system=None, has_os_release=False, fallback_detection=False, long_name=None, *args, **kwargs): self.name = name self.long_name = long_name self.version_major = version_major self.version = version self.like = like self.family = family self.packager = packager self.start_system = start_system self.has_os_release = has_os_release self.fallback_detection = fallback_detection def __str__(self): return 'OSInfo(%r)' % json.dumps(self.to_json()) def __repr__(self): return 'OSInfo(%r)' % json.dumps(self.to_json()) def to_json(self): """ Converts to the JSON :return: """ js = collections.OrderedDict() js['name'] = self.name js['long_name'] = self.long_name js['version_major'] = self.version_major js['version'] = self.version js['like'] = self.like js['family'] = self.family js['packager'] = self.packager js['start_system'] = self.start_system js['has_os_release'] = self.has_os_release js['fallback_detection'] = self.fallback_detection return js class PackageInfo(object): """ Basic information about particular package """ def __init__(self, name, version, arch, repo, size=None, section=None): self._version = None self.name = name self.version = version self.arch = arch self.repo = repo self.size = size self.section = section @property def version(self): return self._version @version.setter def version(self, val): self._version = Version(val) def __str__(self): return '%s-%s.%s' % (self.name, self.version, self.arch) def __repr__(self): return 'PackageInfo(name=%r, version=%r, arch=%r, repo=%r, size=%r, section=%r)' \ % (self.name, self.version, self.arch, self.repo, self.size, self.section) def to_json(self): """ Converts to the JSON :return: """ js = collections.OrderedDict() js['name'] = self.name js['version'] = str(self.version) js['arch'] = self.arch js['repo'] = self.repo if self.size is not None: js['size'] = self.size if self.section is not None: js['section'] = self.section return js @classmethod def from_json(cls, js): """ Converts json dict to the object :param js: :return: """ obj = cls(name=js['name'], version=js['version'], arch=js['arch'], repo=js['repo']) if 'size' in js: obj.size = js['size'] if 'section' in js: obj.section = js['section'] return obj def get_os(): """ Returns basic information about the OS. :return: OSInfo """ # At first - parse os-release ros = OSInfo() os_release_path = '/etc/os-release' if os.path.isfile(os_release_path): ros.name = _get_systemd_os_release_var("ID", filepath=os_release_path) ros.version = _get_systemd_os_release_var("VERSION_ID", filepath=os_release_path) ros.like = _get_systemd_os_release_var("ID_LIKE", os_release_path).split(" ") ros.long_name = _get_systemd_os_release_var("PRETTY_NAME", filepath=os_release_path) ros.has_os_release = True if not ros.long_name: ros.long_name = _get_systemd_os_release_var("NAME", filepath=os_release_path) # Try /etc/redhat-release and /etc/debian_version if not ros.has_os_release or ros.like is None or ros.version is None or ros.name is None: os_redhat_release(ros) os_debian_version(ros) os_issue(ros) # like detection os_like_detect(ros) os_family_detect(ros) # Major version os_major_version(ros) # Packager detection - yum / apt-get os_packager(ros) # Start system - init.d / systemd os_start_system(ros) return ros def os_family_detect(ros): """ OS Family (redhat, debian, ...) :param ros: :return: """ if util.startswith(ros.like, YUMS): ros.family = FAMILY_REDHAT if util.startswith(ros.like, DEBS): ros.family = FAMILY_DEBIAN if ros.family is not None: if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0: ros.family = FAMILY_REDHAT if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0: ros.family = FAMILY_DEBIAN return def os_packager(ros): if ros.like is not None: if util.startswith(ros.like, YUMS): ros.packager = PKG_YUM if util.startswith(ros.like, DEBS): ros.packager = PKG_APT return ros if ros.name is not None: if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0: ros.packager = PKG_YUM if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0: ros.packager = PKG_APT return if os.path.exists('/etc/yum'): ros.packager = PKG_YUM if os.path.exists('/etc/apt/sources.list'): ros.packager = PKG_APT def os_start_system(ros): if os.path.exists('/etc/systemd'): ros.start_system = START_SYSTEMD else: ros.start_system = START_INITD return ros def os_issue(ros): if os.path.exists('/etc/issue'): with open('/etc/issue', 'r') as fh: issue = fh.readline().strip() issue = re.sub(r'\\[a-z]', '', issue).strip() match1 = re.match(r'^(.+?)\s+release\s+(.+?)$', issue, re.IGNORECASE) match2 = re.match(r'^(.+?)\s+([0-9.]+)\s*(LTS)?$', issue, re.IGNORECASE) if match1: ros.long_name = match1.group(1).strip() ros.version = match1.group(2).strip() elif match2: ros.long_name = match2.group(1).strip() ros.version = match2.group(2).strip() else: ros.long_name = issue return ros def os_debian_version(ros): if os.path.exists('/etc/debian_version'): with open('/etc/debian_version', 'r') as fh: debver = fh.readline().strip() ros.like = 'debian' ros.family = FAMILY_DEBIAN if ros.version is None: ros.version = debver.strip() return ros def os_redhat_release(ros): if os.path.exists('/etc/redhat-release'): with open('/etc/redhat-release', 'r') as fh: redhatrel = fh.readline().strip() ros.like = 'redhat' ros.family = FAMILY_REDHAT match = re.match(r'^(.+?)\s+release\s+(.+?)$', redhatrel, re.IGNORECASE) if match is not None: ros.long_name = match.group(1).strip() ros.version = match.group(2).strip() else: ros.long_name = redhatrel return ros def os_like_detect(ros): if not ros.like and ros.name is not None: try: ros.like = FLAVORS[ros.name.lower()] except: pass if not ros.like and ros.long_name is not None: try: ros.like = FLAVORS[ros.long_name.lower()] except: pass return ros def os_major_version(ros): if ros.version is not None: match = re.match(r'(.+?)[/.]', ros.version) if match: ros.version_major = match.group(1) return ros def get_os_info(filepath="/etc/os-release"): """ Get OS name and version :param str filepath: File path of os-release file :returns: (os_name, os_version) :rtype: `tuple` of `str` """ if os.path.isfile(filepath): # Systemd os-release parsing might be viable os_name, os_version = get_systemd_os_info(filepath=filepath) if os_name: return (os_name, os_version) # Fallback to platform module return get_python_os_info() def get_os_info_ua(filepath="/etc/os-release"): """ Get OS name and version string for User Agent :param str filepath: File path of os-release file :returns: os_ua :rtype: `str` """ if os.path.isfile(filepath): os_ua = _get_systemd_os_release_var("PRETTY_NAME", filepath=filepath) if not os_ua: os_ua = _get_systemd_os_release_var("NAME", filepath=filepath) if os_ua: return os_ua # Fallback return " ".join(get_python_os_info()) def get_systemd_os_info(filepath="/etc/os-release"): """ Parse systemd /etc/os-release for distribution information :param str filepath: File path of os-release file :returns: (os_name, os_version) :rtype: `tuple` of `str` """ os_name = _get_systemd_os_release_var("ID", filepath=filepath) os_version = _get_systemd_os_release_var("VERSION_ID", filepath=filepath) return (os_name, os_version) def get_systemd_os_like(filepath="/etc/os-release"): """ Get a list of strings that indicate the distribution likeness to other distributions. :param str filepath: File path of os-release file :returns: List of distribution acronyms :rtype: `list` of `str` """ return _get_systemd_os_release_var("ID_LIKE", filepath).split(" ") def _get_systemd_os_release_var(varname, filepath="/etc/os-release"): """ Get single value from systemd /etc/os-release :param str varname: Name of variable to fetch :param str filepath: File path of os-release file :returns: requested value :rtype: `str` """ var_string = varname+"=" if not os.path.isfile(filepath): return "" with open(filepath, 'r') as fh: contents = fh.readlines() for line in contents: if line.strip().startswith(var_string): # Return the value of var, normalized return normalize_string(line.strip()[len(var_string):]) return "" def get_python_os_info(): """ Get Operating System type/distribution and major version using python platform module :returns: (os_name, os_version) :rtype: `tuple` of `str` """ info = platform.system_alias( platform.system(), platform.release(), platform.version() ) os_type, os_ver, _ = info os_type = os_type.lower() if os_type.startswith('linux'): info = platform.linux_distribution() # On arch, platform.linux_distribution() is reportedly ('','',''), # so handle it defensively if info[0]: os_type = info[0] if info[1]: os_ver = info[1] elif os_type.startswith('darwin'): os_ver = subprocess.Popen( ["sw_vers", "-productVersion"], stdout=subprocess.PIPE ).communicate()[0].rstrip('\n') elif os_type.startswith('freebsd'): # eg "9.3-RC3-p1" os_ver = os_ver.partition("-")[0] os_ver = os_ver.partition(".")[0] elif platform.win32_ver()[1]: os_ver = platform.win32_ver()[1] else: # Cases known to fall here: Cygwin python os_ver = '' return os_type, os_ver def os_like(key): """ Tries to transform OS ID to LIKE_ID :param key: :return: string or None """ try: return FLAVORS[key.lower()] except KeyError: return None def os_constant(key): """ Get a constant value for operating system :param key: name of cli constant :return: value of constant for active os """ os_info = get_os_info() try: constants = CLI_DEFAULTS[os_info[0].lower()] except KeyError: constants = os_like_constants() if not constants: constants = CLI_DEFAULTS["default"] return constants[key] def os_like_constants(): """ Try to get constants for distribution with similar layout and configuration, indicated by /etc/os-release variable "LIKE" :returns: Constants dictionary :rtype: `dict` """ os_like = get_systemd_os_like() if os_like: for os_name in os_like: if os_name in CLI_DEFAULTS.keys(): return CLI_DEFAULTS[os_name] return {} def get_yum_packages(out): """ List of all packages parsing :param out: :return: """ ret = [] lines = out if isinstance(out, types.ListType) else out.split('\n') for line in lines: line = line.strip() match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)[\s\t]+([@a-zA-Z0-9.\-_]+)$', line) if match is None: continue package = match.group(1).strip() version = match.group(2).strip() repo = match.group(3).strip() arch = None # Architecture extract match_arch = re.match(r'^(.+?)\.([^.]+)$', package) if match_arch: package = match_arch.group(1).strip() arch = match_arch.group(2).strip() pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo) ret.append(pkg) return ret def get_yum_packages_update(out): """ List of packages to update parsing :param out: :return: """ ret = [] eqline = 0 cur_section = None lines = out if isinstance(out, types.ListType) else out.split('\n') for line in lines: line = line.strip() if line.startswith('====='): eqline += 1 continue # Process lines only after 2nd ====== line - should be the package list. if eqline != 2: continue lmatch = re.match(r'^([a-zA-Z\s]+):$', line) if lmatch is not None: cur_section = lmatch.group(1) continue match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)' r'[\s\t]+([@a-zA-Z0-9.:\-_]+)[\s\t]+([a-zA-Z0-9.\-_\s]+?)$', line) if match is None: continue package = match.group(1).strip() version = match.group(3).strip() repo = match.group(4).strip() arch = match.group(2).strip() size = match.group(5).strip() pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo, size=size, section=cur_section) ret.append(pkg) return ret def check_package_restrictions(yum_output_packages, allowed_packages): """ Checks list of the yum output pakcages vs. allowed packages :param yum_output_packages: :param check_packages: :return: (conflicting packages, new packages) """ new_packages = [] conflicting_packages = [] for out_package in yum_output_packages: allowed_list = [x for x in allowed_packages if x.name == out_package.name] if len(allowed_list) == 0: new_packages.append(out_package) continue # Sort packages based on the version, highest first. if len(allowed_list) > 1: allowed_list.sort(key=lambda x: x.version, reverse=True) allowed = allowed_list[0] if out_package.version > allowed.version: conflicting_packages.append(out_package) return conflicting_packages, new_packages def package_diff(a, b, only_in_b=False): """ Package diff a - b package x \in a is removed from a if the same package (or higher version) is in b. If there are more packages in b, the one with higher version is taken Used for removing already installed packages (b) from the packages to install (a). :param a: :param b: :param only_in_b: if True the element in a has to be in the b in the lower version. :return: """ res = [] for pkg in a: b_filtered = [x for x in b if x.name == pkg.name and x.arch == pkg.arch] # New package, not in b if len(b_filtered) == 0: if not only_in_b: res.append(pkg) continue # Sort packages based on the version, highest first. if len(b_filtered) > 1: b_filtered.sort(key=lambda x: x.version, reverse=True) # b contains smaller version of the package, add to the result if b_filtered[0].version < pkg.version: res.append(pkg) return res
EnigmaBridge/ebstall.py
ebstall/osutil.py
Python
mit
18,396
def asm_context(request): context_data = dict() context_data['current_url'] = request.build_absolute_uri() return context_data
Karolain/cms
assembly/context_processors.py
Python
mit
142
# -*- coding: utf-8 -*- """ Math Render Plugin for Pelican ============================== This plugin allows your site to render Math. It uses the MathJax JavaScript engine. For markdown, the plugin works by creating a Markdown extension which is used during the markdown compilation stage. Math therefore gets treated like a "first class citizen" in Pelican For reStructuredText, the plugin instructs the rst engine to output Mathjax for all math. The mathjax script is by default automatically inserted into the HTML. Typogrify Compatibility ----------------------- This plugin now plays nicely with Typogrify, but it requires Typogrify version 2.07 or above. User Settings ------------- Users are also able to pass a dictionary of settings in the settings file which will control how the MathJax library renders things. This could be very useful for template builders that want to adjust the look and feel of the math. See README for more details. """ import os import sys from pelican import signals, generators try: from bs4 import BeautifulSoup except ImportError as e: BeautifulSoup = None try: from . pelican_mathjax_markdown_extension import PelicanMathJaxExtension except ImportError as e: PelicanMathJaxExtension = None def process_settings(pelicanobj): """Sets user specified MathJax settings (see README for more details)""" mathjax_settings = {} # NOTE TO FUTURE DEVELOPERS: Look at the README and what is happening in # this function if any additional changes to the mathjax settings need to # be incorporated. Also, please inline comment what the variables # will be used for # Default settings mathjax_settings['auto_insert'] = True # if set to true, it will insert mathjax script automatically into content without needing to alter the template. mathjax_settings['align'] = 'center' # controls alignment of of displayed equations (values can be: left, right, center) mathjax_settings['indent'] = '0em' # if above is not set to 'center', then this setting acts as an indent mathjax_settings['show_menu'] = 'true' # controls whether to attach mathjax contextual menu mathjax_settings['process_escapes'] = 'true' # controls whether escapes are processed mathjax_settings['latex_preview'] = 'TeX' # controls what user sees while waiting for LaTex to render mathjax_settings['color'] = 'inherit' # controls color math is rendered in mathjax_settings['linebreak_automatic'] = 'false' # Set to false by default for performance reasons (see http://docs.mathjax.org/en/latest/output.html#automatic-line-breaking) mathjax_settings['tex_extensions'] = '' # latex extensions that can be embedded inside mathjax (see http://docs.mathjax.org/en/latest/tex.html#tex-and-latex-extensions) mathjax_settings['responsive'] = 'false' # Tries to make displayed math responsive mathjax_settings['responsive_break'] = '768' # The break point at which it math is responsively aligned (in pixels) mathjax_settings['mathjax_font'] = 'default' # forces mathjax to use the specified font. mathjax_settings['process_summary'] = BeautifulSoup is not None # will fix up summaries if math is cut off. Requires beautiful soup mathjax_settings['force_tls'] = 'false' # will force mathjax to be served by https - if set as False, it will only use https if site is served using https mathjax_settings['message_style'] = 'normal' # This value controls the verbosity of the messages in the lower left-hand corner. Set it to "none" to eliminate all messages # Source for MathJax mathjax_settings['source'] = "'//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'" # mathjax_settings['source'] = "'../theme/js/MathJax/MathJax.js'" # Get the user specified settings try: settings = pelicanobj.settings['MATH_JAX'] except: settings = None # If no settings have been specified, then return the defaults if not isinstance(settings, dict): return mathjax_settings # The following mathjax settings can be set via the settings dictionary for key, value in ((key, settings[key]) for key in settings): # Iterate over dictionary in a way that is compatible with both version 2 # and 3 of python if key == 'align': try: typeVal = isinstance(value, basestring) except NameError: typeVal = isinstance(value, str) if not typeVal: continue if value == 'left' or value == 'right' or value == 'center': mathjax_settings[key] = value else: mathjax_settings[key] = 'center' if key == 'indent': mathjax_settings[key] = value if key == 'show_menu' and isinstance(value, bool): mathjax_settings[key] = 'true' if value else 'false' if key == 'message_style': mathjax_settings[key] = value if value is not None else 'none' if key == 'auto_insert' and isinstance(value, bool): mathjax_settings[key] = value if key == 'process_escapes' and isinstance(value, bool): mathjax_settings[key] = 'true' if value else 'false' if key == 'latex_preview': try: typeVal = isinstance(value, basestring) except NameError: typeVal = isinstance(value, str) if not typeVal: continue mathjax_settings[key] = value if key == 'color': try: typeVal = isinstance(value, basestring) except NameError: typeVal = isinstance(value, str) if not typeVal: continue mathjax_settings[key] = value if key == 'linebreak_automatic' and isinstance(value, bool): mathjax_settings[key] = 'true' if value else 'false' if key == 'process_summary' and isinstance(value, bool): if value and BeautifulSoup is None: print("BeautifulSoup4 is needed for summaries to be processed by render_math\nPlease install it") value = False mathjax_settings[key] = value if key == 'responsive' and isinstance(value, bool): mathjax_settings[key] = 'true' if value else 'false' if key == 'force_tls' and isinstance(value, bool): mathjax_settings[key] = 'true' if value else 'false' if key == 'responsive_break' and isinstance(value, int): mathjax_settings[key] = str(value) if key == 'tex_extensions' and isinstance(value, list): # filter string values, then add '' to them try: value = filter(lambda string: isinstance(string, basestring), value) except NameError: value = filter(lambda string: isinstance(string, str), value) value = map(lambda string: "'%s'" % string, value) mathjax_settings[key] = ',' + ','.join(value) if key == 'mathjax_font': try: typeVal = isinstance(value, basestring) except NameError: typeVal = isinstance(value, str) if not typeVal: continue value = value.lower() if value == 'sanserif': value = 'SansSerif' elif value == 'fraktur': value = 'Fraktur' elif value == 'typewriter': value = 'Typewriter' else: value = 'default' mathjax_settings[key] = value return mathjax_settings def process_summary(article): """Ensures summaries are not cut off. Also inserts mathjax script so that math will be rendered""" summary = article._get_summary() summary_parsed = BeautifulSoup(summary, 'html.parser') math = summary_parsed.find_all(class_='math') if len(math) > 0: last_math_text = math[-1].get_text() if len(last_math_text) > 3 and last_math_text[-3:] == '...': content_parsed = BeautifulSoup(article._content, 'html.parser') full_text = content_parsed.find_all(class_='math')[len(math)-1].get_text() math[-1].string = "%s ..." % full_text summary = summary_parsed.decode() article._summary = "%s<script type='text/javascript'>%s</script>" % (summary, process_summary.mathjax_script) def configure_typogrify(pelicanobj, mathjax_settings): """Instructs Typogrify to ignore math tags - which allows Typogrify to play nicely with math related content""" # If Typogrify is not being used, then just exit if not pelicanobj.settings.get('TYPOGRIFY', False): return try: import typogrify from distutils.version import LooseVersion if LooseVersion(typogrify.__version__) < LooseVersion('2.0.7'): raise TypeError('Incorrect version of Typogrify') from typogrify.filters import typogrify # At this point, we are happy to use Typogrify, meaning # it is installed and it is a recent enough version # that can be used to ignore all math # Instantiate markdown extension and append it to the current extensions pelicanobj.settings['TYPOGRIFY_IGNORE_TAGS'].extend(['.math', 'script']) # ignore math class and script except (ImportError, TypeError) as e: pelicanobj.settings['TYPOGRIFY'] = False # disable Typogrify if isinstance(e, ImportError): print("\nTypogrify is not installed, so it is being ignored.\nIf you want to use it, please install via: pip install typogrify\n") if isinstance(e, TypeError): print("\nA more recent version of Typogrify is needed for the render_math module.\nPlease upgrade Typogrify to the latest version (anything equal or above version 2.0.7 is okay).\nTypogrify will be turned off due to this reason.\n") def process_mathjax_script(mathjax_settings): """Load the mathjax script template from file, and render with the settings""" # Read the mathjax javascript template from file with open (os.path.dirname(os.path.realpath(__file__)) + '/mathjax_script_template', 'r') as mathjax_script_template: mathjax_template = mathjax_script_template.read() return mathjax_template.format(**mathjax_settings) def mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings): """Instantiates a customized markdown extension for handling mathjax related content""" # Create the configuration for the markdown template config = {} config['mathjax_script'] = mathjax_script config['math_tag_class'] = 'math' config['auto_insert'] = mathjax_settings['auto_insert'] # Instantiate markdown extension and append it to the current extensions try: pelicanobj.settings['MD_EXTENSIONS'].append(PelicanMathJaxExtension(config)) except: sys.excepthook(*sys.exc_info()) sys.stderr.write("\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\n") sys.stderr.flush() def mathjax_for_rst(pelicanobj, mathjax_script): """Setup math for RST""" pelicanobj.settings['DOCUTILS_SETTINGS'] = {'math_output': 'MathJax'} rst_add_mathjax.mathjax_script = mathjax_script def pelican_init(pelicanobj): """ Loads the mathjax script according to the settings. Instantiate the Python markdown extension, passing in the mathjax script as config parameter. """ # Process settings, and set global var mathjax_settings = process_settings(pelicanobj) # Generate mathjax script mathjax_script = process_mathjax_script(mathjax_settings) # Configure Typogrify configure_typogrify(pelicanobj, mathjax_settings) # Configure Mathjax For Markdown if PelicanMathJaxExtension: mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings) # Configure Mathjax For RST mathjax_for_rst(pelicanobj, mathjax_script) # Set process_summary's mathjax_script variable process_summary.mathjax_script = None if mathjax_settings['process_summary']: process_summary.mathjax_script = mathjax_script def rst_add_mathjax(content): """Adds mathjax script for reStructuredText""" # .rst is the only valid extension for reStructuredText files _, ext = os.path.splitext(os.path.basename(content.source_path)) if ext != '.rst': return # If math class is present in text, add the javascript # note that RST hardwires mathjax to be class "math" if 'class="math"' in content._content: content._content += "<script type='text/javascript'>%s</script>" % rst_add_mathjax.mathjax_script def process_rst_and_summaries(content_generators): """ Ensure mathjax script is applied to RST and summaries are corrected if specified in user settings. Handles content attached to ArticleGenerator and PageGenerator objects, since the plugin doesn't know how to handle other Generator types. For reStructuredText content, examine both articles and pages. If article or page is reStructuredText and there is math present, append the mathjax script. Also process summaries if present (only applies to articles) and user wants summaries processed (via user settings) """ for generator in content_generators: if isinstance(generator, generators.ArticlesGenerator): for article in generator.articles + generator.translations: rst_add_mathjax(article) #optionally fix truncated formulae in summaries. if process_summary.mathjax_script is not None: process_summary(article) elif isinstance(generator, generators.PagesGenerator): for page in generator.pages: rst_add_mathjax(page) def register(): """Plugin registration""" signals.initialized.connect(pelican_init) signals.all_generators_finalized.connect(process_rst_and_summaries)
cmacmackin/scribbler
scribbler/render_math/math.py
Python
gpl-3.0
14,159
"""BleBox air quality entity.""" from homeassistant.components.air_quality import AirQualityEntity from . import BleBoxEntity, create_blebox_entities async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a BleBox air quality entity.""" create_blebox_entities( hass, config_entry, async_add_entities, BleBoxAirQualityEntity, "air_qualities" ) class BleBoxAirQualityEntity(BleBoxEntity, AirQualityEntity): """Representation of a BleBox air quality feature.""" _attr_icon = "mdi:blur" @property def particulate_matter_0_1(self): """Return the particulate matter 0.1 level.""" return self._feature.pm1 @property def particulate_matter_2_5(self): """Return the particulate matter 2.5 level.""" return self._feature.pm2_5 @property def particulate_matter_10(self): """Return the particulate matter 10 level.""" return self._feature.pm10
jawilson/home-assistant
homeassistant/components/blebox/air_quality.py
Python
apache-2.0
966
from conans import ConanFile, CMake import os class OoutTestConan(ConanFile): settings = "os", "compiler", "build_type", "arch" generators = "cmake" def build(self): cmake = CMake(self) cmake.configure() cmake.build() def test(self): cmake = CMake(self) cmake.test()
DronMDF/2out
test_package/conanfile.py
Python
mit
284
# -*- coding: utf-8 -*- from __future__ import print_function import pytest import numpy as np from pandas import DataFrame, Series, MultiIndex, Panel, Index import pandas as pd import pandas.util.testing as tm from pandas.tests.frame.common import TestData class TestDataFrameSubclassing(TestData): def test_frame_subclassing_and_slicing(self): # Subclass frame and ensure it returns the right class on slicing it # In reference to PR 9632 class CustomSeries(Series): @property def _constructor(self): return CustomSeries def custom_series_function(self): return 'OK' class CustomDataFrame(DataFrame): """ Subclasses pandas DF, fills DF with simulation results, adds some custom plotting functions. """ def __init__(self, *args, **kw): super(CustomDataFrame, self).__init__(*args, **kw) @property def _constructor(self): return CustomDataFrame _constructor_sliced = CustomSeries def custom_frame_function(self): return 'OK' data = {'col1': range(10), 'col2': range(10)} cdf = CustomDataFrame(data) # Did we get back our own DF class? assert isinstance(cdf, CustomDataFrame) # Do we get back our own Series class after selecting a column? cdf_series = cdf.col1 assert isinstance(cdf_series, CustomSeries) assert cdf_series.custom_series_function() == 'OK' # Do we get back our own DF class after slicing row-wise? cdf_rows = cdf[1:5] assert isinstance(cdf_rows, CustomDataFrame) assert cdf_rows.custom_frame_function() == 'OK' # Make sure sliced part of multi-index frame is custom class mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')]) cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) assert isinstance(cdf_multi['A'], CustomDataFrame) mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')]) cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) assert isinstance(cdf_multi2['A'], CustomSeries) def test_dataframe_metadata(self): df = tm.SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]}, index=['a', 'b', 'c']) df.testattr = 'XXX' assert df.testattr == 'XXX' assert df[['X']].testattr == 'XXX' assert df.loc[['a', 'b'], :].testattr == 'XXX' assert df.iloc[[0, 1], :].testattr == 'XXX' # see gh-9776 assert df.iloc[0:1, :].testattr == 'XXX' # see gh-10553 unpickled = tm.round_trip_pickle(df) tm.assert_frame_equal(df, unpickled) assert df._metadata == unpickled._metadata assert df.testattr == unpickled.testattr def test_indexing_sliced(self): # GH 11559 df = tm.SubclassedDataFrame({'X': [1, 2, 3], 'Y': [4, 5, 6], 'Z': [7, 8, 9]}, index=['a', 'b', 'c']) res = df.loc[:, 'X'] exp = tm.SubclassedSeries([1, 2, 3], index=list('abc'), name='X') tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.iloc[:, 1] exp = tm.SubclassedSeries([4, 5, 6], index=list('abc'), name='Y') tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.loc[:, 'Z'] exp = tm.SubclassedSeries([7, 8, 9], index=list('abc'), name='Z') tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.loc['a', :] exp = tm.SubclassedSeries([1, 4, 7], index=list('XYZ'), name='a') tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.iloc[1, :] exp = tm.SubclassedSeries([2, 5, 8], index=list('XYZ'), name='b') tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.loc['c', :] exp = tm.SubclassedSeries([3, 6, 9], index=list('XYZ'), name='c') tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") def test_to_panel_expanddim(self): # GH 9762 class SubclassedFrame(DataFrame): @property def _constructor_expanddim(self): return SubclassedPanel class SubclassedPanel(Panel): pass index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)]) df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index) result = df.to_panel() assert isinstance(result, SubclassedPanel) expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]], items=['X', 'Y'], major_axis=[0], minor_axis=[0, 1, 2], dtype='int64') tm.assert_panel_equal(result, expected) def test_subclass_attr_err_propagation(self): # GH 11808 class A(DataFrame): @property def bar(self): return self.i_dont_exist with tm.assert_raises_regex(AttributeError, '.*i_dont_exist.*'): A().bar def test_subclass_align(self): # GH 12983 df1 = tm.SubclassedDataFrame({'a': [1, 3, 5], 'b': [1, 3, 5]}, index=list('ACE')) df2 = tm.SubclassedDataFrame({'c': [1, 2, 4], 'd': [1, 2, 4]}, index=list('ABD')) res1, res2 = df1.align(df2, axis=0) exp1 = tm.SubclassedDataFrame({'a': [1, np.nan, 3, np.nan, 5], 'b': [1, np.nan, 3, np.nan, 5]}, index=list('ABCDE')) exp2 = tm.SubclassedDataFrame({'c': [1, 2, np.nan, 4, np.nan], 'd': [1, 2, np.nan, 4, np.nan]}, index=list('ABCDE')) assert isinstance(res1, tm.SubclassedDataFrame) tm.assert_frame_equal(res1, exp1) assert isinstance(res2, tm.SubclassedDataFrame) tm.assert_frame_equal(res2, exp2) res1, res2 = df1.a.align(df2.c) assert isinstance(res1, tm.SubclassedSeries) tm.assert_series_equal(res1, exp1.a) assert isinstance(res2, tm.SubclassedSeries) tm.assert_series_equal(res2, exp2.c) def test_subclass_align_combinations(self): # GH 12983 df = tm.SubclassedDataFrame({'a': [1, 3, 5], 'b': [1, 3, 5]}, index=list('ACE')) s = tm.SubclassedSeries([1, 2, 4], index=list('ABD'), name='x') # frame + series res1, res2 = df.align(s, axis=0) exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5], 'b': [1, np.nan, 3, np.nan, 5]}, index=list('ABCDE')) # name is lost when exp2 = pd.Series([1, 2, np.nan, 4, np.nan], index=list('ABCDE'), name='x') assert isinstance(res1, tm.SubclassedDataFrame) tm.assert_frame_equal(res1, exp1) assert isinstance(res2, tm.SubclassedSeries) tm.assert_series_equal(res2, exp2) # series + frame res1, res2 = s.align(df) assert isinstance(res1, tm.SubclassedSeries) tm.assert_series_equal(res1, exp2) assert isinstance(res2, tm.SubclassedDataFrame) tm.assert_frame_equal(res2, exp1) def test_subclass_iterrows(self): # GH 13977 df = tm.SubclassedDataFrame({'a': [1]}) for i, row in df.iterrows(): assert isinstance(row, tm.SubclassedSeries) tm.assert_series_equal(row, df.loc[i]) def test_subclass_sparse_slice(self): rows = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]] ssdf = tm.SubclassedSparseDataFrame(rows) ssdf.testattr = "testattr" tm.assert_sp_frame_equal(ssdf.loc[:2], tm.SubclassedSparseDataFrame(rows[:3])) tm.assert_sp_frame_equal(ssdf.iloc[:2], tm.SubclassedSparseDataFrame(rows[:2])) tm.assert_sp_frame_equal(ssdf[:2], tm.SubclassedSparseDataFrame(rows[:2])) assert ssdf.loc[:2].testattr == "testattr" assert ssdf.iloc[:2].testattr == "testattr" assert ssdf[:2].testattr == "testattr" tm.assert_sp_series_equal(ssdf.loc[1], tm.SubclassedSparseSeries(rows[1]), check_names=False, check_kind=False) tm.assert_sp_series_equal(ssdf.iloc[1], tm.SubclassedSparseSeries(rows[1]), check_names=False, check_kind=False) def test_subclass_sparse_transpose(self): ossdf = tm.SubclassedSparseDataFrame([[1, 2, 3], [4, 5, 6]]) essdf = tm.SubclassedSparseDataFrame([[1, 4], [2, 5], [3, 6]]) tm.assert_sp_frame_equal(ossdf.T, essdf) def test_subclass_stack(self): # GH 15564 df = tm.SubclassedDataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'b', 'c'], columns=['X', 'Y', 'Z']) res = df.stack() exp = tm.SubclassedSeries( [1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list('aaabbbccc'), list('XYZXYZXYZ')]) tm.assert_series_equal(res, exp) def test_subclass_stack_multi(self): # GH 15564 df = tm.SubclassedDataFrame([ [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], index=MultiIndex.from_tuples( list(zip(list('AABB'), list('cdcd'))), names=['aaa', 'ccc']), columns=MultiIndex.from_tuples( list(zip(list('WWXX'), list('yzyz'))), names=['www', 'yyy'])) exp = tm.SubclassedDataFrame([ [10, 12], [11, 13], [20, 22], [21, 23], [30, 32], [31, 33], [40, 42], [41, 43]], index=MultiIndex.from_tuples(list(zip( list('AAAABBBB'), list('ccddccdd'), list('yzyzyzyz'))), names=['aaa', 'ccc', 'yyy']), columns=Index(['W', 'X'], name='www')) res = df.stack() tm.assert_frame_equal(res, exp) res = df.stack('yyy') tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame([ [10, 11], [12, 13], [20, 21], [22, 23], [30, 31], [32, 33], [40, 41], [42, 43]], index=MultiIndex.from_tuples(list(zip( list('AAAABBBB'), list('ccddccdd'), list('WXWXWXWX'))), names=['aaa', 'ccc', 'www']), columns=Index(['y', 'z'], name='yyy')) res = df.stack('www') tm.assert_frame_equal(res, exp) def test_subclass_stack_multi_mixed(self): # GH 15564 df = tm.SubclassedDataFrame([ [10, 11, 12.0, 13.0], [20, 21, 22.0, 23.0], [30, 31, 32.0, 33.0], [40, 41, 42.0, 43.0]], index=MultiIndex.from_tuples( list(zip(list('AABB'), list('cdcd'))), names=['aaa', 'ccc']), columns=MultiIndex.from_tuples( list(zip(list('WWXX'), list('yzyz'))), names=['www', 'yyy'])) exp = tm.SubclassedDataFrame([ [10, 12.0], [11, 13.0], [20, 22.0], [21, 23.0], [30, 32.0], [31, 33.0], [40, 42.0], [41, 43.0]], index=MultiIndex.from_tuples(list(zip( list('AAAABBBB'), list('ccddccdd'), list('yzyzyzyz'))), names=['aaa', 'ccc', 'yyy']), columns=Index(['W', 'X'], name='www')) res = df.stack() tm.assert_frame_equal(res, exp) res = df.stack('yyy') tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame([ [10.0, 11.0], [12.0, 13.0], [20.0, 21.0], [22.0, 23.0], [30.0, 31.0], [32.0, 33.0], [40.0, 41.0], [42.0, 43.0]], index=MultiIndex.from_tuples(list(zip( list('AAAABBBB'), list('ccddccdd'), list('WXWXWXWX'))), names=['aaa', 'ccc', 'www']), columns=Index(['y', 'z'], name='yyy')) res = df.stack('www') tm.assert_frame_equal(res, exp) def test_subclass_unstack(self): # GH 15564 df = tm.SubclassedDataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'b', 'c'], columns=['X', 'Y', 'Z']) res = df.unstack() exp = tm.SubclassedSeries( [1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list('XXXYYYZZZ'), list('abcabcabc')]) tm.assert_series_equal(res, exp) def test_subclass_unstack_multi(self): # GH 15564 df = tm.SubclassedDataFrame([ [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], index=MultiIndex.from_tuples( list(zip(list('AABB'), list('cdcd'))), names=['aaa', 'ccc']), columns=MultiIndex.from_tuples( list(zip(list('WWXX'), list('yzyz'))), names=['www', 'yyy'])) exp = tm.SubclassedDataFrame([ [10, 20, 11, 21, 12, 22, 13, 23], [30, 40, 31, 41, 32, 42, 33, 43]], index=Index(['A', 'B'], name='aaa'), columns=MultiIndex.from_tuples(list(zip( list('WWWWXXXX'), list('yyzzyyzz'), list('cdcdcdcd'))), names=['www', 'yyy', 'ccc'])) res = df.unstack() tm.assert_frame_equal(res, exp) res = df.unstack('ccc') tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame([ [10, 30, 11, 31, 12, 32, 13, 33], [20, 40, 21, 41, 22, 42, 23, 43]], index=Index(['c', 'd'], name='ccc'), columns=MultiIndex.from_tuples(list(zip( list('WWWWXXXX'), list('yyzzyyzz'), list('ABABABAB'))), names=['www', 'yyy', 'aaa'])) res = df.unstack('aaa') tm.assert_frame_equal(res, exp) def test_subclass_unstack_multi_mixed(self): # GH 15564 df = tm.SubclassedDataFrame([ [10, 11, 12.0, 13.0], [20, 21, 22.0, 23.0], [30, 31, 32.0, 33.0], [40, 41, 42.0, 43.0]], index=MultiIndex.from_tuples( list(zip(list('AABB'), list('cdcd'))), names=['aaa', 'ccc']), columns=MultiIndex.from_tuples( list(zip(list('WWXX'), list('yzyz'))), names=['www', 'yyy'])) exp = tm.SubclassedDataFrame([ [10, 20, 11, 21, 12.0, 22.0, 13.0, 23.0], [30, 40, 31, 41, 32.0, 42.0, 33.0, 43.0]], index=Index(['A', 'B'], name='aaa'), columns=MultiIndex.from_tuples(list(zip( list('WWWWXXXX'), list('yyzzyyzz'), list('cdcdcdcd'))), names=['www', 'yyy', 'ccc'])) res = df.unstack() tm.assert_frame_equal(res, exp) res = df.unstack('ccc') tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame([ [10, 30, 11, 31, 12.0, 32.0, 13.0, 33.0], [20, 40, 21, 41, 22.0, 42.0, 23.0, 43.0]], index=Index(['c', 'd'], name='ccc'), columns=MultiIndex.from_tuples(list(zip( list('WWWWXXXX'), list('yyzzyyzz'), list('ABABABAB'))), names=['www', 'yyy', 'aaa'])) res = df.unstack('aaa') tm.assert_frame_equal(res, exp) def test_subclass_pivot(self): # GH 15564 df = tm.SubclassedDataFrame({ 'index': ['A', 'B', 'C', 'C', 'B', 'A'], 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'], 'values': [1., 2., 3., 3., 2., 1.]}) pivoted = df.pivot( index='index', columns='columns', values='values') expected = tm.SubclassedDataFrame({ 'One': {'A': 1., 'B': 2., 'C': 3.}, 'Two': {'A': 1., 'B': 2., 'C': 3.}}) expected.index.name, expected.columns.name = 'index', 'columns' tm.assert_frame_equal(pivoted, expected) def test_subclassed_melt(self): # GH 15564 cheese = tm.SubclassedDataFrame({ 'first': ['John', 'Mary'], 'last': ['Doe', 'Bo'], 'height': [5.5, 6.0], 'weight': [130, 150]}) melted = pd.melt(cheese, id_vars=['first', 'last']) expected = tm.SubclassedDataFrame([ ['John', 'Doe', 'height', 5.5], ['Mary', 'Bo', 'height', 6.0], ['John', 'Doe', 'weight', 130], ['Mary', 'Bo', 'weight', 150]], columns=['first', 'last', 'variable', 'value']) tm.assert_frame_equal(melted, expected) def test_subclassed_wide_to_long(self): # GH 9762 np.random.seed(123) x = np.random.randn(3) df = tm.SubclassedDataFrame({ "A1970": {0: "a", 1: "b", 2: "c"}, "A1980": {0: "d", 1: "e", 2: "f"}, "B1970": {0: 2.5, 1: 1.2, 2: .7}, "B1980": {0: 3.2, 1: 1.3, 2: .1}, "X": dict(zip(range(3), x))}) df["id"] = df.index exp_data = {"X": x.tolist() + x.tolist(), "A": ['a', 'b', 'c', 'd', 'e', 'f'], "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], "year": [1970, 1970, 1970, 1980, 1980, 1980], "id": [0, 1, 2, 0, 1, 2]} expected = tm.SubclassedDataFrame(exp_data) expected = expected.set_index(['id', 'year'])[["X", "A", "B"]] long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year") tm.assert_frame_equal(long_frame, expected) def test_subclassed_apply(self): # GH 19822 def check_row_subclass(row): assert isinstance(row, tm.SubclassedSeries) def strech(row): if row["variable"] == "height": row["value"] += 0.5 return row df = tm.SubclassedDataFrame([ ['John', 'Doe', 'height', 5.5], ['Mary', 'Bo', 'height', 6.0], ['John', 'Doe', 'weight', 130], ['Mary', 'Bo', 'weight', 150]], columns=['first', 'last', 'variable', 'value']) df.apply(lambda x: check_row_subclass(x)) df.apply(lambda x: check_row_subclass(x), axis=1) expected = tm.SubclassedDataFrame([ ['John', 'Doe', 'height', 6.0], ['Mary', 'Bo', 'height', 6.5], ['John', 'Doe', 'weight', 130], ['Mary', 'Bo', 'weight', 150]], columns=['first', 'last', 'variable', 'value']) result = df.apply(lambda x: strech(x), axis=1) assert isinstance(result, tm.SubclassedDataFrame) tm.assert_frame_equal(result, expected) expected = tm.SubclassedDataFrame([ [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1) assert isinstance(result, tm.SubclassedDataFrame) tm.assert_frame_equal(result, expected) result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") assert isinstance(result, tm.SubclassedDataFrame) tm.assert_frame_equal(result, expected) expected = tm.SubclassedSeries([ [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) result = df.apply(lambda x: [1, 2, 3], axis=1) assert not isinstance(result, tm.SubclassedDataFrame) tm.assert_series_equal(result, expected)
harisbal/pandas
pandas/tests/frame/test_subclass.py
Python
bsd-3-clause
20,679
"""Implements PHPfpmInfo Class for gathering stats from PHP FastCGI Process Manager using the status page. The status interface of PHP FastCGI Process Manager must be enabled. """ import re import util __author__ = "Ali Onur Uyar" __copyright__ = "Copyright 2011, Ali Onur Uyar" __credits__ = [] __license__ = "GPL" __version__ = "0.9.12" __maintainer__ = "Ali Onur Uyar" __email__ = "aouyar at gmail.com" __status__ = "Development" defaultHTTPport = 80 defaultHTTPSport = 443 class PHPfpmInfo: """Class to retrieve stats from APC from Web Server.""" def __init__(self, host=None, port=None, user=None, password=None, monpath=None, ssl=False): """Initialize URL for PHP FastCGI Process Manager status page. @param host: Web Server Host. (Default: 127.0.0.1) @param port: Web Server Port. (Default: 80, SSL: 443) @param user: Username. (Not needed unless authentication is required to access status page. @param password: Password. (Not needed unless authentication is required to access status page. @param monpath: PHP FPM path relative to Document Root. (Default: fpm_status.php) @param ssl: Use SSL if True. (Default: False) """ if host is not None: self._host = host else: self._host = '127.0.0.1' if port is not None: self._port = int(port) else: if ssl: self._port = defaultHTTPSport else: self._port = defaultHTTPport self._user = user self._password = password if ssl: self._proto = 'https' else: self._proto = 'http' if monpath: self._monpath = monpath else: self._monpath = 'fpm_status.php' def getStats(self): """Query and parse Web Server Status Page. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) stats = {} for line in response.splitlines(): mobj = re.match('([\w\s]+):\s+(\w+)$', line) if mobj: stats[mobj.group(1)] = util.parse_value(mobj.group(2)) return stats
Elbandi/PyMunin
pysysinfo/phpfpm.py
Python
gpl-3.0
2,487
# This file is part of Pimlico # Copyright (C) 2020 Mark Granroth-Wilding # Licensed under the GNU LGPL v3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html from builtins import str from builtins import zip from pimlico.core.external.java import Py4JInterface, JavaProcessError from pimlico.core.modules.execute import ModuleExecutionError from pimlico.core.modules.map import skip_invalid from pimlico.core.modules.map.multiproc import multiprocessing_executor_factory from pimlico.old_datatypes.base import InvalidDocument from pimlico.old_datatypes.coref.opennlp import Entity from pimlico.old_datatypes.modules.opennlp.coreference import WORDNET_DIR from py4j.java_gateway import get_field def _get_full_queue(q): output = [] while not q.empty(): output.append(q.get_nowait()) return u"\n".join(output) @skip_invalid def process_document(worker, archive, filename, doc): interface = worker.interface # Resolve coreference, passing in PTB parse trees as strings coref_result = interface.gateway.entry_point.resolveCoreference(doc) if coref_result is None: # Coref failed, return an invalid doc # Fetch all output from stdout and stderr stdout = _get_full_queue(interface.stdout_queue) stderr = _get_full_queue(interface.stderr_queue) return InvalidDocument( u"opennlp_coref", u"Error running coref\nJava stdout:\n%s\nJava stderr:\n%s" % (stdout, stderr) ) outputs = [] for output_name in worker.info.output_names: if output_name == "coref": # Pull all of the information out of the java objects to get ready to store as JSON outputs.append([Entity.from_java_object(e) for e in get_field(coref_result, "entities")]) elif output_name == "tokenized": outputs.append([sentence.split() for sentence in get_field(coref_result, "tokenizedSentences")]) elif output_name == "pos": tokens = [sentence.split() for sentence in get_field(coref_result, "tokenizedSentences")] pos_tags = [sentence.split() for sentence in get_field(coref_result, "posTags")] outputs.append(list(zip(tokens, pos_tags))) elif output_name == "parse": outputs.append(list(coref_result.getParseTrees())) return tuple(outputs) def start_interface(info): """ Start up a Py4J interface to a new JVM. """ # Start a parser process running in the background via Py4J gateway_args=[ info.sentence_model_path, info.token_model_path, info.pos_model_path, info.parse_model_path, info.coref_model_path ] if info.options["timeout"] is not None: # Set a timeout for coref tasks gateway_args.extend(["--timeout", str(info.options["timeout"])]) interface = Py4JInterface( "pimlico.opennlp.CoreferenceResolverRawTextGateway", gateway_args=gateway_args, pipeline=info.pipeline, system_properties={"WNSEARCHDIR": WORDNET_DIR}, java_opts=["-Xmx%s" % info.get_heap_memory_limit(), "-Xms%s" % info.get_heap_memory_limit()], print_stderr=False, print_stdout=False ) try: interface.start() except JavaProcessError as e: raise ModuleExecutionError("error starting coref process: %s" % e) return interface def worker_set_up(worker): worker.interface = start_interface(worker.info) def worker_tear_down(worker): worker.interface.stop() def preprocess(executor): if executor.info.options["timeout"] is not None: executor.log.info("Allow up to %d seconds for each coref job" % executor.info.options["timeout"]) # Don't parse the parse trees, since OpenNLP does that for us, straight from the text executor.input_corpora[0].raw_data = True ModuleExecutor = multiprocessing_executor_factory( process_document, preprocess_fn=preprocess, worker_set_up_fn=worker_set_up, worker_tear_down_fn=worker_tear_down )
markgw/pimlico
src/python/pimlico/old_datatypes/modules/opennlp/coreference_pipeline/execute.py
Python
gpl-3.0
3,946
#! /usr/bin/python # -*- mode: python -*- from contextlib import contextmanager import glob import itertools import logbook import os import re import stat import shutil import subprocess import tempfile from raven import Client from rq_queues import default_queue, retry_queue from sentry_dsn import SENTRY_DSN sentry = Client(SENTRY_DSN) _logger = logbook.Logger(__name__) def unzip_docs(filename, dest, package_name, version): directory = os.path.dirname(filename) sphinx_dir = os.path.join(directory, "sphinx", "html") if not os.path.isdir(sphinx_dir): os.makedirs(sphinx_dir) subprocess.check_call(["tar", "xzf", filename, "-C", sphinx_dir]) os.unlink(filename) _write_metadata(directory, package_name=package_name, version=version) _move_to_dest(directory, os.path.join(dest, package_name)) def build_docs(repo, dest, pypi=None, retries_left=5): try: temp_dest = tempfile.mkdtemp() with _ensuring_virtualenv() as env: with _temporary_checkout(repo, env, pypi) as temp_checkout: temp_checkout.write_metadata(temp_dest) temp_checkout.generate_sphinx(os.path.join(temp_dest, "sphinx")) temp_checkout.generate_dash(os.path.join(temp_dest, "dash")) temp_checkout.write_metadata(temp_dest) _move_to_dest(temp_dest, os.path.join(dest, temp_checkout.get_package_name())) return 0 except: _logger.error("Exception while building docs", exc_info=True) retries_left -= 1 if retries_left <= 0: sentry.captureException() else: retry_queue.enqueue_call( retry_build_docs, args=(repo, dest, pypi, retries_left), ) def retry_build_docs(*args): """ Meant to be called from cron job. Should only push the rebuild job again to the main (default) queue """ default_queue.enqueue_call(build_docs, args=args) def _temporary_checkout(repo, env, pypi): directory = os.path.join(tempfile.mkdtemp(), "src") _execute_assert_success("git clone {} {}".format(repo, directory)) return Checkout(directory, env, pypi) @contextmanager def _ensuring_virtualenv(): virtualenv_path = "/tmp/virtualenvs/builder" _execute_assert_success("virtualenv {}".format(virtualenv_path)) try: for package in ["doc2dash", "Sphinx==1.1.3"]: _execute_assert_success("{0}/bin/pip install --use-wheel --find-links /opt/devdocs/wheels {1}".format(virtualenv_path, package)) yield virtualenv_path finally: shutil.rmtree(virtualenv_path) class Checkout(object): def __init__(self, path, venv, pypi): super(Checkout, self).__init__() self._path = path self._venv = venv self._package_name = self._version = self._description = None self._fetch_metadata() self._install(pypi) def get_package_name(self): return self._package_name def _fetch_metadata(self): _execute_in_venv(self._venv, "python setup.py sdist", cwd=self._path) [pkg_info_filename] = glob.glob("{}/*.egg-info/PKG-INFO".format(self._path)) with open(pkg_info_filename) as pkg_info_file: metadata = {} for metadata_entry in pkg_info_file: key, value = metadata_entry.split(":", 1) metadata[key.strip()] = value.strip() self._package_name = metadata["Name"] self._description = metadata["Description"] self._version = _execute_assert_success( "git describe --tags", cwd=self._path, stdout=subprocess.PIPE).stdout.read().strip() _logger.info("Processing %s (version %s)", self._package_name, self._version) def _install(self, pypi): command = "pip install --use-wheel --find-links /opt/devdocs/wheels" if pypi: command += " -i {0}".format(pypi) command += " -e {0}".format(self._path) _execute_in_venv(self._venv, command, cwd=self._path) def __enter__(self): return self def __exit__(self, *_): pass def generate_sphinx(self, dest_dir): _execute_in_venv( self._venv, "python setup.py build_sphinx --version {version} --release {version} -E -a --build-dir {dest}".format(dest=dest_dir, version=self._version), cwd=self._path ) def generate_dash(self, dest_dir): temp_path = tempfile.mkdtemp() try: temp_sphinx_dir = os.path.join(tempfile.mkdtemp(), "sphinx") with self._patched_repository_context(): self.generate_sphinx(temp_sphinx_dir) _execute_in_venv(self._venv, "doc2dash {temp_sphinx_dir}/html -i {icon} -n {self._package_name} --destination {dest}/".format( icon=_get_icon_path(), self=self, temp_sphinx_dir=temp_sphinx_dir, dest=dest_dir, )) _execute_assert_success("tar -czvf {0}.tgz {0}.docset".format(self._package_name), cwd=dest_dir) shutil.rmtree(os.path.join(dest_dir, "{0}.docset".format(self._package_name))) finally: shutil.rmtree(temp_path) @contextmanager def _patched_repository_context(self): try: self._patch_repo() yield finally: _execute_assert_success("git reset --hard", cwd=self._path) def _patch_repo(self): config_filename = os.path.join(self._path, "doc", "conf.py") with open(config_filename) as f: config = f.read() config_dict = {"__file__" : config_filename} exec(config, config_dict) html_options = config_dict.get("html_theme_options", {}) html_options["nosidebar"] = True re.sub(r"#?html_theme_options\s+=\s+\{[^\}]*\}", "html_theme_options={!r}".format(html_options), config) with open(config_filename, "w") as f: f.write(config) def write_metadata(self, dest_dir): _write_metadata(dest_dir, package_name=self._package_name, version=self._version, description=self._description) def _get_icon_path(): return os.path.abspath(os.path.join(os.path.dirname(__file__), "docs_icon.png")) def _fix_permissions(directory): _fix_permissions_single_file(directory) for path, dirnames, filenames in os.walk(directory): for name in itertools.chain(dirnames, filenames): full_path = os.path.join(path, name) _fix_permissions_single_file(full_path) def _fix_permissions_single_file(path): mode = os.stat(path).st_mode | stat.S_IRGRP | stat.S_IROTH if os.path.isdir(path): mode |= stat.S_IXGRP | stat.S_IXOTH os.chmod(path, mode) def _move_to_dest(src, dest): _logger.debug("move: {} --> {}", src, dest) deleted = dest + ".deleted" if os.path.exists(dest): os.rename(dest, deleted) os.rename(src, dest) _fix_permissions(dest) if os.path.exists(deleted): shutil.rmtree(deleted) def _execute_assert_success(cmd, *args, **kwargs): _logger.debug("exec: {}", cmd) p = subprocess.Popen(cmd, shell=True, *args, **kwargs) if 0 != p.wait(): raise ExecutionError("Command failed: cmd: {!r}".format(cmd)) return p def _execute_in_venv(venv, cmd, *args, **kwargs): _execute_assert_success("bash -c 'source {}/bin/activate && {}'".format(venv, cmd), *args, **kwargs) class ExecutionError(Exception): pass def _write_metadata(dest, package_name, version, description=None): metadata_dir = os.path.join(dest, "metadata") if not os.path.isdir(metadata_dir): os.makedirs(metadata_dir) for field, value in (("package_name", package_name), ("version", version), ("description", description)): if value is None: continue with open(os.path.join(metadata_dir, field), "w") as f: f.write(value)
vmalloc/devdocs
webapp/builder.py
Python
bsd-3-clause
8,012
''' Extraire information sur les conditions de ski de fond au Mont-Royal À faire: * lister toutes les possibilités ''' import requests from bs4 import BeautifulSoup from sopel.module import commands, example def ski(): url_base = 'http://ville.montreal.qc.ca/portal/page?_pageid=7377,94551572&_dad=portal&_schema=PORTAL' # paramètres pour ski de fond (7) au Mont-Royal (81) num_parc = 81 num_activite = 7 # construire et effectuer la requête payload = {'id' : num_parc, 'sc' : num_activite} requete = requests.get(url_base, params = payload) # vérifier si la page existe if requete.status_code != 200: #print('Information non disponible') bot.say('Information non disponible') else: return(requete) def soupe(requete): # faire la soupe page = BeautifulSoup(requete.text, 'html.parser') # extraire l'information pertinente section_info = page.find('div', {'class' : 'fiche_contenu'}) nom_parc = section_info.find('h1').text section_pistes = section_info.find('div', {'class' : 'a_noter'}) pistes = section_pistes.find_all('p') section_etat = section_info.find_all('h3') conditions = {'État des pistes de ski' : section_etat[0].text, 'Qualité de la neige' : section_etat[9].text, 'Enneigement' : section_etat[10].text, #'Commentaires' : pistes[3].text } return(nom_parc, conditions) #@example('.skidefond montroyal') @commands('skidefond') def main(bot, trigger): requete = ski() nom_parc, conditions = soupe(requete) #print(nom_parc) bot.say(nom_parc) bot.say(conditions['État des pistes de ski']) bot.say(conditions['Qualité de la neige']) #bot.say(conditions['Enneigement'])
normcyr/sopel-modules
skidefond-vieux.py
Python
gpl-3.0
1,805
from __future__ import absolute_import import json from diagnostic_feedback.quiz import QuizBlock from xblock.field_data import DictFieldData from ..utils import MockRuntime from .base_test import BaseTest class StudentViewDataTest(BaseTest): """ Tests for XBlock Diagnostic Feedback. Student View Data """ def setUp(self): """ Test case setup """ super(StudentViewDataTest, self).setUp() self.runtime = MockRuntime() self.diagnostic_feedback_data = { 'questions': [ { 'group': 'Default Group', 'title': 'Test Title', 'text': '<p>Test Question Text</p>', 'choices': [ { 'name': '<p>Dummy</p>' }, { 'name': '<p>Test Dummy</p>' } ], } ], 'description': 'Test Description', 'title': 'New Quiz', 'quiz_type': 'BFQ' } self.diagnostic_feedback_block = QuizBlock( self.runtime, DictFieldData(self.diagnostic_feedback_data), None ) def test_student_view_data(self): """ Test the student_view_data results. """ expected_diagnostic_feedback_data = { 'quiz_type': self.diagnostic_feedback_data['quiz_type'], 'quiz_title': self.diagnostic_feedback_data['title'], 'questions': self.diagnostic_feedback_data['questions'], 'description': self.diagnostic_feedback_data['description'], } student_view_data = self.diagnostic_feedback_block.student_view_data() self.assertEqual(student_view_data, expected_diagnostic_feedback_data) def test_student_view_user_state_handler(self): """ Test the student_view_user_state handler results. """ response = json.loads( self.diagnostic_feedback_block.handle( 'student_view_user_state', self.make_request('', method='GET') ).body.decode('utf-8') ) expected_diagnostic_feedback_response = { u'student_choices': {}, u'student_result': u'', u'current_step': 0, u'completed': False, } self.assertEqual(response, expected_diagnostic_feedback_response)
mckinseyacademy/xblock-diagnosticfeedback
diagnostic_feedback/tests/unit/test_student_view_data.py
Python
agpl-3.0
2,525
# proxy module from pyface.i_window import *
enthought/etsproxy
enthought/pyface/i_window.py
Python
bsd-3-clause
45
# Generated by Django 2.2.1 on 2019-09-19 11:28 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dojo', '0021_cve_index'), ] operations = [ migrations.AddField( model_name='system_settings', name='credentials', field=models.CharField(max_length=3000, blank=True), ), migrations.AddField( model_name='system_settings', name='column_widths', field=models.CharField(max_length=1500, blank=True), ), migrations.AddField( model_name='system_settings', name='drive_folder_ID', field=models.CharField(max_length=100, blank=True), ), migrations.AddField( model_name='system_settings', name='enable_google_sheets', field=models.BooleanField(null=True, blank=True, default=False), ), ]
rackerlabs/django-DefectDojo
dojo/db_migrations/0022_google_sheet_sync_additions.py
Python
bsd-3-clause
963
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.internet.stdio}. """ import os, sys, itertools from twisted.trial import unittest from twisted.python import filepath, log from twisted.python.runtime import platform from twisted.internet import error, defer, protocol, stdio, reactor from twisted.test.test_tcp import ConnectionLostNotifyingProtocol # A short string which is intended to appear here and nowhere else, # particularly not in any random garbage output CPython unavoidable # generates (such as in warning text and so forth). This is searched # for in the output from stdio_test_lastwrite.py and if it is found at # the end, the functionality works. UNIQUE_LAST_WRITE_STRING = 'xyz123abc Twisted is great!' skipWindowsNopywin32 = None if platform.isWindows(): try: import win32process except ImportError: skipWindowsNopywin32 = ("On windows, spawnProcess is not available " "in the absence of win32process.") class StandardIOTestProcessProtocol(protocol.ProcessProtocol): """ Test helper for collecting output from a child process and notifying something when it exits. @ivar onConnection: A L{defer.Deferred} which will be called back with C{None} when the connection to the child process is established. @ivar onCompletion: A L{defer.Deferred} which will be errbacked with the failure associated with the child process exiting when it exits. @ivar onDataReceived: A L{defer.Deferred} which will be called back with this instance whenever C{childDataReceived} is called, or C{None} to suppress these callbacks. @ivar data: A C{dict} mapping file descriptors to strings containing all bytes received from the child process on each file descriptor. """ onDataReceived = None def __init__(self): self.onConnection = defer.Deferred() self.onCompletion = defer.Deferred() self.data = {} def connectionMade(self): self.onConnection.callback(None) def childDataReceived(self, name, bytes): """ Record all bytes received from the child process in the C{data} dictionary. Fire C{onDataReceived} if it is not C{None}. """ self.data[name] = self.data.get(name, '') + bytes if self.onDataReceived is not None: d, self.onDataReceived = self.onDataReceived, None d.callback(self) def processEnded(self, reason): self.onCompletion.callback(reason) class StandardInputOutputTestCase(unittest.TestCase): skip = skipWindowsNopywin32 def _spawnProcess(self, proto, sibling, *args, **kw): """ Launch a child Python process and communicate with it using the given ProcessProtocol. @param proto: A L{ProcessProtocol} instance which will be connected to the child process. @param sibling: The basename of a file containing the Python program to run in the child process. @param *args: strings which will be passed to the child process on the command line as C{argv[2:]}. @param **kw: additional arguments to pass to L{reactor.spawnProcess}. @return: The L{IProcessTransport} provider for the spawned process. """ import twisted subenv = dict(os.environ) subenv['PYTHONPATH'] = os.pathsep.join( [os.path.abspath( os.path.dirname(os.path.dirname(twisted.__file__))), subenv.get('PYTHONPATH', '') ]) args = [sys.executable, filepath.FilePath(__file__).sibling(sibling).path, reactor.__class__.__module__] + list(args) return reactor.spawnProcess( proto, sys.executable, args, env=subenv, **kw) def _requireFailure(self, d, callback): def cb(result): self.fail("Process terminated with non-Failure: %r" % (result,)) def eb(err): return callback(err) return d.addCallbacks(cb, eb) def test_loseConnection(self): """ Verify that a protocol connected to L{StandardIO} can disconnect itself using C{transport.loseConnection}. """ errorLogFile = self.mktemp() log.msg("Child process logging to " + errorLogFile) p = StandardIOTestProcessProtocol() d = p.onCompletion self._spawnProcess(p, 'stdio_test_loseconn.py', errorLogFile) def processEnded(reason): # Copy the child's log to ours so it's more visible. for line in file(errorLogFile): log.msg("Child logged: " + line.rstrip()) self.failIfIn(1, p.data) reason.trap(error.ProcessDone) return self._requireFailure(d, processEnded) def test_readConnectionLost(self): """ When stdin is closed and the protocol connected to it implements L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method is called. """ errorLogFile = self.mktemp() log.msg("Child process logging to " + errorLogFile) p = StandardIOTestProcessProtocol() p.onDataReceived = defer.Deferred() def cbBytes(ignored): d = p.onCompletion p.transport.closeStdin() return d p.onDataReceived.addCallback(cbBytes) def processEnded(reason): reason.trap(error.ProcessDone) d = self._requireFailure(p.onDataReceived, processEnded) self._spawnProcess( p, 'stdio_test_halfclose.py', errorLogFile) return d def test_lastWriteReceived(self): """ Verify that a write made directly to stdout using L{os.write} after StandardIO has finished is reliably received by the process reading that stdout. """ p = StandardIOTestProcessProtocol() # Note: the OS X bug which prompted the addition of this test # is an apparent race condition involving non-blocking PTYs. # Delaying the parent process significantly increases the # likelihood of the race going the wrong way. If you need to # fiddle with this code at all, uncommenting the next line # will likely make your life much easier. It is commented out # because it makes the test quite slow. # p.onConnection.addCallback(lambda ign: __import__('time').sleep(5)) try: self._spawnProcess( p, 'stdio_test_lastwrite.py', UNIQUE_LAST_WRITE_STRING, usePTY=True) except ValueError, e: # Some platforms don't work with usePTY=True raise unittest.SkipTest(str(e)) def processEnded(reason): """ Asserts that the parent received the bytes written by the child immediately after the child starts. """ self.assertTrue( p.data[1].endswith(UNIQUE_LAST_WRITE_STRING), "Received %r from child, did not find expected bytes." % ( p.data,)) reason.trap(error.ProcessDone) return self._requireFailure(p.onCompletion, processEnded) def test_hostAndPeer(self): """ Verify that the transport of a protocol connected to L{StandardIO} has C{getHost} and C{getPeer} methods. """ p = StandardIOTestProcessProtocol() d = p.onCompletion self._spawnProcess(p, 'stdio_test_hostpeer.py') def processEnded(reason): host, peer = p.data[1].splitlines() self.failUnless(host) self.failUnless(peer) reason.trap(error.ProcessDone) return self._requireFailure(d, processEnded) def test_write(self): """ Verify that the C{write} method of the transport of a protocol connected to L{StandardIO} sends bytes to standard out. """ p = StandardIOTestProcessProtocol() d = p.onCompletion self._spawnProcess(p, 'stdio_test_write.py') def processEnded(reason): self.assertEqual(p.data[1], 'ok!') reason.trap(error.ProcessDone) return self._requireFailure(d, processEnded) def test_writeSequence(self): """ Verify that the C{writeSequence} method of the transport of a protocol connected to L{StandardIO} sends bytes to standard out. """ p = StandardIOTestProcessProtocol() d = p.onCompletion self._spawnProcess(p, 'stdio_test_writeseq.py') def processEnded(reason): self.assertEqual(p.data[1], 'ok!') reason.trap(error.ProcessDone) return self._requireFailure(d, processEnded) def _junkPath(self): junkPath = self.mktemp() junkFile = file(junkPath, 'w') for i in xrange(1024): junkFile.write(str(i) + '\n') junkFile.close() return junkPath def test_producer(self): """ Verify that the transport of a protocol connected to L{StandardIO} is a working L{IProducer} provider. """ p = StandardIOTestProcessProtocol() d = p.onCompletion written = [] toWrite = range(100) def connectionMade(ign): if toWrite: written.append(str(toWrite.pop()) + "\n") proc.write(written[-1]) reactor.callLater(0.01, connectionMade, None) proc = self._spawnProcess(p, 'stdio_test_producer.py') p.onConnection.addCallback(connectionMade) def processEnded(reason): self.assertEqual(p.data[1], ''.join(written)) self.failIf(toWrite, "Connection lost with %d writes left to go." % (len(toWrite),)) reason.trap(error.ProcessDone) return self._requireFailure(d, processEnded) def test_consumer(self): """ Verify that the transport of a protocol connected to L{StandardIO} is a working L{IConsumer} provider. """ p = StandardIOTestProcessProtocol() d = p.onCompletion junkPath = self._junkPath() self._spawnProcess(p, 'stdio_test_consumer.py', junkPath) def processEnded(reason): self.assertEqual(p.data[1], file(junkPath).read()) reason.trap(error.ProcessDone) return self._requireFailure(d, processEnded) def test_normalFileStandardOut(self): """ If L{StandardIO} is created with a file descriptor which refers to a normal file (ie, a file from the filesystem), L{StandardIO.write} writes bytes to that file. In particular, it does not immediately consider the file closed or call its protocol's C{connectionLost} method. """ onConnLost = defer.Deferred() proto = ConnectionLostNotifyingProtocol(onConnLost) path = filepath.FilePath(self.mktemp()) self.normal = normal = path.open('w') self.addCleanup(normal.close) kwargs = dict(stdout=normal.fileno()) if not platform.isWindows(): # Make a fake stdin so that StandardIO doesn't mess with the *real* # stdin. r, w = os.pipe() self.addCleanup(os.close, r) self.addCleanup(os.close, w) kwargs['stdin'] = r connection = stdio.StandardIO(proto, **kwargs) # The reactor needs to spin a bit before it might have incorrectly # decided stdout is closed. Use this counter to keep track of how # much we've let it spin. If it closes before we expected, this # counter will have a value that's too small and we'll know. howMany = 5 count = itertools.count() def spin(): for value in count: if value == howMany: connection.loseConnection() return connection.write(str(value)) break reactor.callLater(0, spin) reactor.callLater(0, spin) # Once the connection is lost, make sure the counter is at the # appropriate value. def cbLost(reason): self.assertEqual(count.next(), howMany + 1) self.assertEqual( path.getContent(), ''.join(map(str, range(howMany)))) onConnLost.addCallback(cbLost) return onConnLost if reactor.__class__.__name__ == 'EPollReactor': test_normalFileStandardOut.skip = ( "epoll(7) does not support normal files. See #4429. " "This should be a todo but technical limitations prevent " "this.") elif platform.isWindows(): test_normalFileStandardOut.skip = ( "StandardIO does not accept stdout as an argument to Windows. " "Testing redirection to a file is therefore harder.") def test_normalFileStandardOutGoodEpollError(self): """ Using StandardIO with epollreactor with stdout redirected to a normal file fails with a comprehensible error (until it is supported, when #4429 is resolved). See also #2259 and #3442. """ path = filepath.FilePath(self.mktemp()) normal = path.open('w') fd = normal.fileno() self.addCleanup(normal.close) exc = self.assertRaises( RuntimeError, stdio.StandardIO, protocol.Protocol(), stdout=fd) self.assertEqual( str(exc), "This reactor does not support this type of file descriptor (fd " "%d, mode %d) (for example, epollreactor does not support normal " "files. See #4429)." % (fd, os.fstat(fd).st_mode)) if reactor.__class__.__name__ != 'EPollReactor': test_normalFileStandardOutGoodEpollError.skip = ( "Only epollreactor is expected to fail with stdout redirected " "to a normal file.")
Kagami/kisa
lib/twisted/test/test_stdio.py
Python
cc0-1.0
14,093
""" Wrapper for glossary objects to SRW objects. """ from srwlib import * class SRWAdapter: def SRW_electron_beam(self, electron_beam): """ Translate generic electron beam to srw "electron beam". """ srw_electron_beam = SRWLPartBeam() srw_electron_beam.Iavg = electron_beam._current srw_electron_beam.partStatMom1.x = 0.0 srw_electron_beam.partStatMom1.y = 0.0 srw_electron_beam.partStatMom1.z = 0 srw_electron_beam.partStatMom1.xp = 0 srw_electron_beam.partStatMom1.yp = 0 srw_electron_beam.partStatMom1.gamma = electron_beam.gamma() #2nd order statistical moments: srw_electron_beam.arStatMom2[0] = electron_beam._moment_xx # <(x-x0)^2> [m^2] srw_electron_beam.arStatMom2[1] = electron_beam._moment_xxp # <(x-x0)*(x'-x'0)> [m] srw_electron_beam.arStatMom2[2] = electron_beam._moment_xpxp # <(x'-x'0)^2> srw_electron_beam.arStatMom2[3] = electron_beam._moment_yy #<(y-y0)^2> srw_electron_beam.arStatMom2[4] = electron_beam._moment_yyp #<(y-y0)*(y'-y'0)> [m] srw_electron_beam.arStatMom2[5] = electron_beam._moment_ypyp #<(y'-y'0)^2> srw_electron_beam.arStatMom2[10] = electron_beam._energy_spread**2 #<(E-E0)^2>/E0^2 return srw_electron_beam def SRW_undulator(self, undulator): """ Translate generic undulator to srw "undulator". """ magnetic_fields = [] if undulator.K_vertical() > 0.0: vertical_field = SRWLMagFldH(1, 'v', undulator.B_vertical(), 0, 1, 1) magnetic_fields.append(vertical_field) if undulator.K_horizontal() > 0.0: horizontal_field = SRWLMagFldH(1, 'h', undulator.B_horizontal(), 0, -1, 1) magnetic_fields.append(horizontal_field) srw_undulator = SRWLMagFldU(magnetic_fields, undulator.periodLength(), undulator.periodNumber()) return srw_undulator def magnetic_field_from_undulator(self, undulator): """ Generate srw magnetic fields. """ srw_undulator = self.SRW_undulator(undulator) magnetic_fields = SRWLMagFldC([srw_undulator], array('d', [0]), array('d', [0]), array('d', [0])) return magnetic_fields def SRW_bending_magnet(self, bending_magnet): """ Translate generic bending magnet to srw "multipole magnet". """ magnetic_fields = [] B = bending_magnet._magnetic_field length = bending_magnet._length srw_bending_magnet = SRWLMagFldM(B, 1, 'n', length) return srw_bending_magnet def magnetic_field_from_bending_magnet(self, bending_magnet): """ Generate srw magnetic fields. """ srw_bending_magnet = self.SRW_bending_magnet(bending_magnet) magnetic_fields = SRWLMagFldC([srw_bending_magnet], array('d', [0]), array('d', [0]), array('d', [0])) return magnetic_fields def create_rectangular_SRW_wavefront(self, grid_size, grid_length_vertical, grid_length_horizontal, z_start, srw_electron_beam, energy_min, energy_max): """ Generates a rectangular srw wavefront. """ srw_wavefront = SRWLWfr() srw_wavefront.allocate(1, grid_size, grid_size) srw_wavefront.mesh.zStart = float(z_start) srw_wavefront.mesh.eStart = energy_min srw_wavefront.mesh.eFin = energy_max srw_wavefront.mesh.xStart = -grid_length_vertical srw_wavefront.mesh.xFin = grid_length_vertical srw_wavefront.mesh.yStart = -grid_length_horizontal srw_wavefront.mesh.yFin = grid_length_horizontal srw_wavefront.partBeam = srw_electron_beam return srw_wavefront def create_quadratic_SRW_wavefront_single_energy(self, grid_size, grid_length, z_start, srw_electron_beam, energy): """ Generates a quadratic srw wavefront. """ return self.createRectangularSRWWavefrontSingleEnergy(grid_size, grid_length, grid_length, z_start, srw_electron_beam, energy, energy)
radiasoft/optics
code_drivers/SRW/SRW_adapter.py
Python
apache-2.0
4,272
import csv import sys replaceDict = { 'Family ID': 'Household ID', 'Last Name': 'Last Name', 'First Name': 'First Name', 'Middle Name': 'Middle Name', 'Home Phone': 'Home Phone Number', 'Wedding Date': 'Anniversary', 'Address': 'Home Address Street Line 1', 'Age': '', 'Baptized': 'Baptized', 'Baptized Date': 'Baptized Date', 'Birth Date': 'Birthdate', 'Cell Phone': 'Mobile Phone Number', 'E-Mail': 'Home Email', 'Email 2': 'Work Email', 'Employer': '', 'Gender': 'Gender', 'Include in Directory': 'Include in Directory', 'Individual ID': '', 'Marital Status': 'Marital Status', 'Member Status': 'Membership', 'Occupation': '', 'Preferred Name': '', 'City': 'Home Address City', 'Date Last Edited': '', 'Phone': '', 'State': 'Home Address State', 'Work Phone': 'Work Phone Number', 'Zip Code': 'Home Address Zip Code', } if len(sys.argv) != 3: print """Program usage: python 'SK to PCO conversion.py' <csv from SK.csv> <csv output to PCO.csv> Converts a csv export from ServantKeeper to Planning Center Online People import. Edit replaceDict to add or remove conversion fields. This converts Individual ID and Family ID to hex string equivalents; decimal values are too large for PCO People import. """ sys.exit(1) outputHeader = [] for key in replaceDict: if replaceDict[key] != '': outputHeader.append(replaceDict[key]) # print outputHeader with open(sys.argv[1], 'rb') as readfile, open(sys.argv[2], 'w') as writefile: # csvreader = csv.reader(readfile, delimiter=',', quotechar='"') # csvdata = list(csvreader) csvreader = csv.DictReader(readfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) csvwriter = csv.DictWriter(writefile, outputHeader, quotechar='"', quoting=csv.QUOTE_ALL) csvwriter.writeheader() for row in csvreader: rowDict = {} for key in replaceDict: if replaceDict[key] != '': # remote_id or family id if replaceDict[key] == 'remote_id' or \ replaceDict[key] == 'Household ID': rowDict[replaceDict[key]] = format(int(row[key]), 'x') # dates with ' / / ' elif row[key] == ' / / ': rowDict[replaceDict[key]] = '' # else else: rowDict[replaceDict[key]] = row[key] csvwriter.writerow(rowDict)
zrgravity/SK-to-PCO-Conversion
SK to PCO conversion.py
Python
mit
2,590
#!/usr/bin/python # encoding: utf-8 # (c) 2015, Jiri Tyr <jiri.tyr@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import ConfigParser import os DOCUMENTATION = ''' --- module: yum_repository author: Jiri Tyr (@jtyr) version_added: '2.1' short_description: Add and remove YUM repositories description: - Add or remove YUM repositories in RPM-based Linux distributions. options: bandwidth: required: false default: 0 description: - Maximum available network bandwidth in bytes/second. Used with the I(throttle) option. - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth throttling will be disabled. If I(throttle) is expressed as a data rate (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth throttling). baseurl: required: false default: None description: - URL to the directory where the yum repository's 'repodata' directory lives. - This or the I(mirrorlist) parameter is required. cost: required: false default: 1000 description: - Relative cost of accessing this repository. Useful for weighing one repo's packages as greater/less than any other. description: required: false default: None description: - A human readable string describing the repository. enabled: required: false choices: ['yes', 'no'] default: 'yes' description: - This tells yum whether or not use this repository. enablegroups: required: false choices: ['yes', 'no'] default: 'yes' description: - Determines whether yum will allow the use of package groups for this repository. exclude: required: false default: None description: - List of packages to exclude from updates or installs. This should be a space separated list. Shell globs using wildcards (eg. C(*) and C(?)) are allowed. - The list can also be a regular YAML array. failovermethod: required: false choices: [roundrobin, priority] default: roundrobin description: - C(roundrobin) randomly selects a URL out of the list of URLs to start with and proceeds through each of them as it encounters a failure contacting the host. - C(priority) starts from the first baseurl listed and reads through them sequentially. file: required: false default: None description: - File to use to save the repo in. Defaults to the value of I(name). gpgcakey: required: false default: None description: - A URL pointing to the ASCII-armored CA key file for the repository. gpgcheck: required: false choices: ['yes', 'no'] default: 'no' description: - Tells yum whether or not it should perform a GPG signature check on packages. gpgkey: required: false default: None description: - A URL pointing to the ASCII-armored GPG key file for the repository. http_caching: required: false choices: [all, packages, none] default: all description: - Determines how upstream HTTP caches are instructed to handle any HTTP downloads that Yum does. - C(all) means that all HTTP downloads should be cached. - C(packages) means that only RPM package downloads should be cached (but not repository metadata downloads). - C(none) means that no HTTP downloads should be cached. includepkgs: required: false default: None description: - List of packages you want to only use from a repository. This should be a space separated list. Shell globs using wildcards (eg. C(*) and C(?)) are allowed. Substitution variables (e.g. C($releasever)) are honored here. - The list can also be a regular YAML array. keepalive: required: false choices: ['yes', 'no'] default: 'no' description: - This tells yum whether or not HTTP/1.1 keepalive should be used with this repository. This can improve transfer speeds by using one connection when downloading multiple files from a repository. metadata_expire: required: false default: 21600 description: - Time (in seconds) after which the metadata will expire. - Default value is 6 hours. metalink: required: false default: None description: - Specifies a URL to a metalink file for the repomd.xml, a list of mirrors for the entire repository are generated by converting the mirrors for the repomd.xml file to a baseurl. mirrorlist: required: false default: None description: - Specifies a URL to a file containing a list of baseurls. - This or the I(baseurl) parameter is required. mirrorlist_expire: required: false default: 21600 description: - Time (in seconds) after which the mirrorlist locally cached will expire. - Default value is 6 hours. name: required: true description: - Unique repository ID. params: required: false default: None description: - Option used to allow the user to overwrite any of the other options. To remove an option, set the value of the option to C(null). password: required: false default: None description: - Password to use with the username for basic authentication. protect: required: false choices: ['yes', 'no'] default: 'no' description: - Protect packages from updates from other repositories. proxy: required: false default: None description: - URL to the proxy server that yum should use. proxy_password: required: false default: None description: - Username to use for proxy. proxy_username: required: false default: None description: - Password for this proxy. repo_gpgcheck: required: false choices: ['yes', 'no'] default: 'no' description: - This tells yum whether or not it should perform a GPG signature check on the repodata from this repository. reposdir: required: false default: /etc/yum.repos.d description: - Directory where the C(.repo) files will be stored. retries: required: false default: 10 description: - Set the number of times any attempt to retrieve a file should retry before returning an error. Setting this to C(0) makes yum try forever. skip_if_unavailable: required: false choices: ['yes', 'no'] default: 'no' description: - If set to C(yes) yum will continue running if this repository cannot be contacted for any reason. This should be set carefully as all repos are consulted for any given command. sslcacert: required: false default: None description: - Path to the directory containing the databases of the certificate authorities yum should use to verify SSL certificates. ssl_check_cert_permissions: required: false choices: ['yes', 'no'] default: 'no' description: - Whether yum should check the permissions on the paths for the certificates on the repository (both remote and local). - If we can't read any of the files then yum will force I(skip_if_unavailable) to be true. This is most useful for non-root processes which use yum on repos that have client cert files which are readable only by root. sslclientcert: required: false default: None description: - Path to the SSL client certificate yum should use to connect to repos/remote sites. sslclientkey: required: false default: None description: - Path to the SSL client key yum should use to connect to repos/remote sites. sslverify: required: false choices: ['yes', 'no'] default: 'yes' description: - Defines whether yum should verify SSL certificates/hosts at all. state: required: false choices: [absent, present] default: present description: - A source string state. throttle: required: false default: None description: - Enable bandwidth throttling for downloads. - This option can be expressed as a absolute data rate in bytes/sec. An SI prefix (k, M or G) may be appended to the bandwidth value. timeout: required: false default: 30 description: - Number of seconds to wait for a connection before timing out. username: required: false default: None description: - Username to use for basic authentication to a repo or really any url. extends_documentation_fragment: - files notes: - All comments will be removed if modifying an existing repo file. - Section order is preserved in an existing repo file. - Parameters in a section are ordered alphabetically in an existing repo file. - The repo file will be automatically deleted if it contains no repository. ''' EXAMPLES = ''' - name: Add repository yum_repository: name: epel description: EPEL YUM repo baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/ - name: Add multiple repositories into the same file (1/2) yum_repository: name: epel description: EPEL YUM repo file: external_repos baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/ gpgcheck: no - name: Add multiple repositories into the same file (2/2) yum_repository: name: rpmforge description: RPMforge YUM repo file: external_repos baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge enabled: no - name: Remove repository yum_repository: name: epel state: absent - name: Remove repository from a specific repo file yum_repository: name: epel file: external_repos state: absent # # Allow to overwrite the yum_repository parameters by defining the parameters # as a variable in the defaults or vars file: # # my_role_somerepo_params: # # Disable GPG checking # gpgcheck: no # # Remove the gpgkey option # gpgkey: null # - name: Add Some repo yum_repository: name: somerepo description: Some YUM repo baseurl: http://server.com/path/to/the/repo gpgkey: http://server.com/keys/somerepo.pub gpgcheck: yes params: "{{ my_role_somerepo_params }}" ''' RETURN = ''' repo: description: repository name returned: success type: string sample: "epel" state: description: state of the target, after execution returned: success type: string sample: "present" ''' class YumRepo(object): # Class global variables module = None params = None section = None repofile = ConfigParser.RawConfigParser() # List of parameters which will be allowed in the repo file output allowed_params = [ 'bandwidth', 'baseurl', 'cost', 'enabled', 'enablegroups', 'exclude', 'failovermethod', 'gpgcakey', 'gpgcheck', 'gpgkey', 'http_caching', 'includepkgs', 'keepalive', 'metadata_expire', 'metalink', 'mirrorlist', 'mirrorlist_expire', 'name', 'password', 'protect', 'proxy', 'proxy_password', 'proxy_username', 'repo_gpgcheck', 'retries', 'skip_if_unavailable', 'sslcacert', 'ssl_check_cert_permissions', 'sslclientcert', 'sslclientkey', 'sslverify', 'throttle', 'timeout', 'username'] # List of parameters which can be a list list_params = ['exclude', 'includepkgs'] def __init__(self, module): # To be able to use fail_json self.module = module # Shortcut for the params self.params = self.module.params # Section is always the repoid self.section = self.params['repoid'] # Check if repo directory exists repos_dir = self.params['reposdir'] if not os.path.isdir(repos_dir): self.module.fail_json( msg='Repo directory "%s" does not exist.' % repos_dir) # Get the given or the default repo file name repo_file = self.params['repoid'] if self.params['file'] is not None: repo_file = self.params['file'] # Set dest; also used to set dest parameter for the FS attributes self.params['dest'] = os.path.join(repos_dir, "%s.repo" % repo_file) # Read the repo file if it exists if os.path.isfile(self.params['dest']): self.repofile.read(self.params['dest']) def add(self): # Remove already existing repo and create a new one if self.repofile.has_section(self.section): self.repofile.remove_section(self.section) # Add section self.repofile.add_section(self.section) # Baseurl/mirrorlist is not required because for removal we need only # the repo name. This is why we check if the baseurl/mirrorlist is # defined. if (self.params['baseurl'], self.params['mirrorlist']) == (None, None): self.module.fail_json( msg='Paramater "baseurl" or "mirrorlist" is required for ' 'adding a new repo.') # Set options for key, value in sorted(self.params.items()): if key in self.list_params and isinstance(value, list): # Join items into one string for specific parameters value = ' '.join(value) elif isinstance(value, bool): # Convert boolean value to integer value = int(value) # Set the value only if it was defined (default is None) if value is not None and key in self.allowed_params: self.repofile.set(self.section, key, value) def save(self): if len(self.repofile.sections()): # Write data into the file try: fd = open(self.params['dest'], 'wb') except IOError: self.module.fail_json( msg='Cannot open repo file %s.' % self.params['dest']) try: try: self.repofile.write(fd) except Error: self.module.fail_json( msg='Cannot write repo file %s.' % self.params['dest']) finally: fd.close() else: # Remove the file if there are not repos try: os.remove(self.params['dest']) except OSError: self.module.fail_json( msg='Cannot remove empty repo file %s.' % self.params['dest']) def remove(self): # Remove section if exists if self.repofile.has_section(self.section): self.repofile.remove_section(self.section) def dump(self): repo_string = "" # Compose the repo file for section in sorted(self.repofile.sections()): repo_string += "[%s]\n" % section for key, value in sorted(self.repofile.items(section)): repo_string += "%s = %s\n" % (key, value) repo_string += "\n" return repo_string def main(): # Module settings module = AnsibleModule( argument_spec=dict( bandwidth=dict(), baseurl=dict(), cost=dict(), description=dict(), enabled=dict(type='bool'), enablegroups=dict(type='bool'), exclude=dict(), failovermethod=dict(choices=['roundrobin', 'priority']), file=dict(), gpgcakey=dict(), gpgcheck=dict(type='bool'), gpgkey=dict(), http_caching=dict(choices=['all', 'packages', 'none']), includepkgs=dict(), keepalive=dict(type='bool'), metadata_expire=dict(), metalink=dict(), mirrorlist=dict(), mirrorlist_expire=dict(), name=dict(required=True), params=dict(), password=dict(no_log=True), protect=dict(type='bool'), proxy=dict(), proxy_password=dict(no_log=True), proxy_username=dict(), repo_gpgcheck=dict(type='bool'), reposdir=dict(default='/etc/yum.repos.d'), retries=dict(), skip_if_unavailable=dict(type='bool'), sslcacert=dict(), ssl_check_cert_permissions=dict(type='bool'), sslclientcert=dict(), sslclientkey=dict(), sslverify=dict(type='bool'), state=dict(choices=['present', 'absent'], default='present'), throttle=dict(), timeout=dict(), username=dict(), ), add_file_common_args=True, supports_check_mode=True, ) # Update module parameters by user's parameters if defined if 'params' in module.params and isinstance(module.params['params'], dict): module.params.update(module.params['params']) # Remove the params module.params.pop('params', None) name = module.params['name'] state = module.params['state'] # Rename "name" and "description" to ensure correct key sorting module.params['repoid'] = module.params['name'] module.params['name'] = module.params['description'] del module.params['description'] # Instantiate the YumRepo object yumrepo = YumRepo(module) # Get repo status before change yumrepo_before = yumrepo.dump() # Perform action depending on the state if state == 'present': yumrepo.add() elif state == 'absent': yumrepo.remove() # Get repo status after change yumrepo_after = yumrepo.dump() # Compare repo states changed = yumrepo_before != yumrepo_after # Save the file only if not in check mode and if there was a change if not module.check_mode and changed: yumrepo.save() # Change file attributes if needed if os.path.isfile(module.params['dest']): file_args = module.load_file_common_arguments(module.params) changed = module.set_fs_attributes_if_different(file_args, changed) # Print status of the change module.exit_json(changed=changed, repo=name, state=state) # Import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
marcosdiez/ansible-modules-extras
packaging/os/yum_repository.py
Python
gpl-3.0
19,079
#!/usr/bin/env python # -*- mode: python; coding: koi8-r; -*- # import sys import os from glob import glob from math import sqrt, sin, cos, pi from six.moves import tkinter try: from PIL import Image, ImageTk except ImportError: Image = None cardset_type = { '1': 'French', '2': 'Hanafuda', '3': 'Tarock', '4': 'Mahjongg', '5': 'Hexadeck', '6': 'Mughal Ganjifa', '7': 'Navagraha Ganjifa', '8': 'Dashavatara Ganjifa', '9': 'Trump only', } all_imgs = False class Cardset: def __init__(self, dir, name, type, ext, x, y): self.dir, self.name, self.type, self.ext, self.x, self.y = \ dir, name, type, ext, x, y def create_cs_list(ls): cardsets_list = {} for f in ls: dir = os.path.split(f)[0] lines = open(f).readlines() l0 = lines[0].split(';') try: ext = l0[2] except IndexError: # print f ext = '.gif' if len(l0) > 3: type = cardset_type[l0[3]] else: # type = 'Unknown' type = 'French' l1 = lines[1].split(';') name = l1[1].strip() l2 = lines[2].split() x, y = int(l2[0]), int(l2[1]) cs = Cardset(dir, name, type, ext, x, y) cardsets_list[name] = cs return cardsets_list tk_images = [] zoom = 0 def show_cardset(*args): global tk_images tk_images = [] if list_box.curselection(): cs_name = list_box.get(list_box.curselection()) cs = cardsets_dict[cs_name] ls = glob(os.path.join(cs.dir, '[0-9][0-9][a-z]'+cs.ext)) ls += glob(os.path.join(cs.dir, 'back*'+cs.ext)) if all_imgs: ls += glob(os.path.join(cs.dir, 'bottom*'+cs.ext)) ls += glob(os.path.join(cs.dir, 'l*'+cs.ext)) # ls = glob(os.path.join(cs.dir, '*.gif')) # if not ls: return ls.sort() pf = None x, y = 10, 10 width, height = 0, 0 canvas.delete('all') for f in ls: if Image: filter = { 'NEAREST': Image.NEAREST, 'BILINEAR': Image.BILINEAR, 'BICUBIC': Image.BICUBIC, 'ANTIALIAS': Image.ANTIALIAS, }[filter_var.get()] # filter = Image.BILINEAR # filter = Image.BICUBIC # filter = Image.ANTIALIAS # print f im = Image.open(f) if zoom != 0: w, h = im.size im = im.convert('RGBA') # for save transparency if rotate_var.get(): # rotate # if filter == Image.ANTIALIAS: # filter = Image.BICUBIC z = zoom*5 a = abs(pi/2/90*z) neww = int(w*cos(a)+h*sin(a)) newh = int(h*cos(a)+w*sin(a)) # print w, h, neww, newh d = int(sqrt(w*w+h*h)) dx, dy = (d-w)/2, (d-h)/2 newim = Image.new('RGBA', (d, d)) newim.paste(im, (dx, dy)) im = newim im = im.rotate(z, resample=filter) x0, y0 = (d-neww)/2, (d-newh)/2 x1, y1 = d-x0, d-y0 im = im.crop((x0, y0, x1, y1)) t = str(z) else: # zoom z = 1.0 + zoom/10.0 z = max(0.2, z) if 1: tmp = Image.new('RGBA', (w+2, h+2)) tmp.paste(im, (1, 1), im) im = tmp.resize((int(w*z), int(h*z)), resample=filter) else: im = im.resize((int(w*z), int(h*z)), resample=filter) t = '%d %%' % int(z*100) zoom_label.config(text=t) else: zoom_label.config(text='') image = ImageTk.tkinter.PhotoImage(im) else: image = tkinter.PhotoImage(file=f) tk_images.append(image) ff = os.path.split(f)[1] if pf is None: pf = ff[:2] x, y = 10, 10 elif ff[:2] != pf: pf = ff[:2] x = 10 y += image.height()+10 else: x += image.width()+10 canvas.create_image(x, y, image=image, anchor=tkinter.NW) # canvas.create_rectangle(x, y, x+image.width(), y+image.height()) width = max(width, x) height = max(height, y) width, height = width+image.width()+10, height+image.height()+10 canvas.config(scrollregion=(0, 0, width, height)) # print image.width(), image.height() label.config(text='''\ Name: %s Type: %s Directory: %s''' % (cs.name, cs.type, cs.dir)) def zoom_in(*args): global zoom zoom += 1 show_cardset() def zoom_out(*args): global zoom zoom -= 1 show_cardset() def zoom_cancel(*args): global zoom zoom = 0 show_cardset() def show_info(*args): if list_box.curselection(): cs_name = list_box.get(list_box.curselection()) cs = cardsets_dict[cs_name] fn = os.path.join(cs.dir, 'COPYRIGHT') top = tkinter.Toplevel() text = tkinter.Text(top) text.insert('insert', open(fn).read()) text.pack(expand=tkinter.YES, fill=tkinter.BOTH) b_frame = tkinter.Frame(top) b_frame.pack(fill=tkinter.X) button = tkinter.Button(b_frame, text='Close', command=top.destroy) button.pack(side=tkinter.RIGHT) def create_widgets(): global list_box, canvas, label, zoom_label # root = tkinter.Tk() # list_box = tkinter.Listbox(root, exportselection=False) list_box.grid(row=0, column=0, rowspan=2, sticky=tkinter.NS) cardsets_list = list(cardsets_dict) cardsets_list.sort() for cs in cardsets_list: list_box.insert(tkinter.END, cs) list_box.bind('<<ListboxSelect>>', show_cardset) # sb = tkinter.Scrollbar(root) sb.grid(row=0, column=1, rowspan=2, sticky=tkinter.NS) list_box.config(yscrollcommand=sb.set) sb.config(command=list_box.yview) # canvas = tkinter.Canvas(root, bg='#5eab6b') canvas.grid(row=0, column=2, sticky=tkinter.NSEW) canvas.bind('<4>', lambda e: canvas.yview_scroll(-5, 'unit')) canvas.bind('<5>', lambda e: canvas.yview_scroll(5, 'unit')) # sb = tkinter.Scrollbar(root) sb.grid(row=0, column=3, sticky=tkinter.NS) canvas.config(yscrollcommand=sb.set) sb.config(command=canvas.yview) # if True: sb = tkinter.Scrollbar(root, orient=tkinter.HORIZONTAL) sb.grid(row=1, column=2, sticky=tkinter.EW) canvas.config(xscrollcommand=sb.set) sb.config(command=canvas.xview) # label = tkinter.Label(root) label.grid(row=2, column=0, columnspan=4) # b_frame = tkinter.Frame(root) b_frame.grid(row=3, column=0, columnspan=4, sticky=tkinter.EW) button = tkinter.Button(b_frame, text='Quit', command=root.quit, width=8) button.pack(side=tkinter.RIGHT) button = tkinter.Button(b_frame, text='Info', command=show_info, width=8) button.pack(side=tkinter.RIGHT) if Image: global rotate_var, filter_var rotate_var = tkinter.IntVar(root) filter_var = tkinter.StringVar(root) button = tkinter.Button(b_frame, text=' + ', command=zoom_in) button.pack(side=tkinter.LEFT) button = tkinter.Button(b_frame, text=' - ', command=zoom_out) button.pack(side=tkinter.LEFT) button = tkinter.Button(b_frame, text=' = ', command=zoom_cancel) button.pack(side=tkinter.LEFT) button = tkinter.Checkbutton( b_frame, text='Rotate', indicatoron=0, selectcolor=b_frame['bg'], width=8, variable=rotate_var, command=show_cardset) button.pack(side=tkinter.LEFT, fill='y') om = tkinter.OptionMenu( b_frame, filter_var, 'NEAREST', 'BILINEAR', 'BICUBIC', 'ANTIALIAS', command=show_cardset) filter_var.set('NEAREST') om.pack(side=tkinter.LEFT, fill='y') zoom_label = tkinter.Label(b_frame) zoom_label.pack(side=tkinter.LEFT) # root.columnconfigure(2, weight=1) root.rowconfigure(0, weight=1) root.title('Show Cardsets') return root if __name__ == '__main__': if '-a' in sys.argv: sys.argv.remove('-a') all_imgs = True if len(sys.argv) > 1: data_dir = sys.argv[1] else: data_dir = os.path.normpath( os.path.join(sys.path[0], os.pardir, 'data')) ls = glob(os.path.join(data_dir, '*', 'config.txt')) cardsets_dict = create_cs_list(ls) root = create_widgets() root.mainloop()
jimsize/PySolFC
scripts/cardset_viewer.py
Python
gpl-3.0
9,234
# -*- coding: utf-8 -*- # # Copyright (C) 2015 Edgewall Software # Copyright (C) 2006 Alec Thomas <alec@swapoff.org> # Copyright (C) 2015 Dirk Stöcker <trac@dstoecker.de> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.com/license.html. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://projects.edgewall.com/trac/. # # Author: Alec Thomas <alec@swapoff.org> import time from trac.config import ExtensionOption, IntOption from trac.core import Component, ExtensionPoint, Interface, TracError, \ implements from trac.util.html import Markup from trac.web.api import IRequestFilter, IRequestHandler from tracspamfilter.api import _, IFilterStrategy, IRejectHandler, N_, \ RejectContent class ICaptchaMethod(Interface): """ A CAPTCHA implementation. """ def generate_captcha(req): """Return a tuple of `(result, html)`, where `result` is the expected response and `html` is a HTML fragment for displaying the CAPTCHA challenge.""" """When "" is returned for result, then standard HTML code is used, but verification done by verify_captcha() nevertheless.""" def verify_captcha(req): """Checks if captcha is valid (only in case generate_captcha returns None for challenge parameter, otherwise check directly.""" def is_usable(req): """Check if captcha can be used for the request.""" class CaptchaSystem(Component): """ Main captcha handling system required to allow captcha based score updating in case submission was rejected. """ implements(IRequestHandler, IRejectHandler, IFilterStrategy, IRequestFilter) handlers = ExtensionPoint(IRequestHandler) _name = "Captcha" captcha = ExtensionOption('spam-filter', 'captcha', ICaptchaMethod, 'ExpressionCaptcha', """CAPTCHA method to use for verifying humans.""", doc_domain="tracspamfilter") karma_points = IntOption('spam-filter', 'captcha_karma', 20, """By how many points a successful CAPTCHA response increases the overall score.""", doc_domain="tracspamfilter") repeat_karma_points = IntOption('spam-filter', 'captcha_failed_karma', 1, """By how many points a failed CAPTCHA impacts the overall score.""", doc_domain="tracspamfilter") karma_lifetime = IntOption('spam-filter', 'captcha_karma_lifetime', 86400, """Time in seconds that a successful CAPTCHA response increases karma.""", doc_domain="tracspamfilter") captcha_lifetime = IntOption('spam-filter', 'captcha_lifetime', 120, """Time in seconds before CAPTCHA is removed.""", doc_domain="tracspamfilter") captcha_cleantime = IntOption('spam-filter', 'captcha_lifetime', 3600, """Time in seconds before database cleanup is called.""", doc_domain="tracspamfilter") # IFilterStrategy methods def is_external(self): return False def _getcaptchaname(self, req): name = self.captcha.__class__.__name__ if name.endswith("Captcha"): name = name[:-7] return req.session.get('captcha_verified_name', name) def test(self, req, author, content, ip): if not self._expired(req): self.log.debug("CAPTCHA: Test %s not expired", self._getcaptchaname(req)) return (self.karma_points, N_("Human verified via CAPTCHA (%s)"), self._getcaptchaname(req)) else: # simply test to down-weight wrong captcha solutions val = int(req.session.get('captcha_reject_count', 0)) self.log.debug("CAPTCHA: Test %s reject %d", self._getcaptchaname(req), val) if val > 0: return (-self.repeat_karma_points * val, N_("Failed CAPTCHA (%s) attempts"), self._getcaptchaname(req)) def train(self, req, author, content, ip, spam=True): pass # IRejectHandler methods def reject_content(self, req, message): self._cleanup() if self._expired(req): req.session['captcha_reject_time'] = int(time.time()) val = int(req.session.get('captcha_reject_count', 0)) req.session['captcha_reject_count'] = val + 1 req.session['captcha_reject_reason'] = message req.session['captcha_redirect'] = req.path_info for key, value in req.args.iteritems(): req.session['captcha_arg_%s' % key] = value req.redirect(req.href.captcha()) else: raise RejectContent(message) # IRequestHandler methods def match_request(self, req): return req.path_info == '/captcha' def process_request(self, req): data = {} exp = req.session.get('captcha_expected') if req.method == 'POST': if req.args.get('captcha_response') == exp: data['error'] = _("CAPTCHA failed to handle original request") else: data['error'] = _("CAPTCHA verification failed") else: data['error'] = Markup(req.session.get("captcha_reject_reason")) # cleanup old values if exp is not None: del req.session['captcha_expected'] # generate new captcha result, html = self.captcha.generate_captcha(req) data['challenge'] = html if self.captcha.__class__.__name__ == 'RandomCaptcha': data['random'] = 1 if result == "": data['defaultform'] = 1 elif result is not None: data['defaultform'] = 1 req.session['captcha_expected'] = result req.session.save() return 'verify_captcha.html', data, None def pre_process_request(self, req, handler): if req.path_info == '/captcha' and req.method == 'POST': valid = False exp = req.session.get('captcha_expected') try: name = self.captcha.name(req) except Exception: name = self.captcha.__class__.__name__ if name.endswith('Captcha'): name = name[:-7] if exp is None: valid = self.captcha.verify_captcha(req) elif req.args.get('captcha_response', '') == exp: valid = True if valid: req.environ['PATH_INFO'] = \ req.session.get('captcha_redirect', req.href()) if 'SCRIPT_NAME' in req.environ and \ len(req.environ['SCRIPT_NAME']) > 1: path_info = req.environ['PATH_INFO'] req.environ['PATH_INFO'] = \ path_info.replace(req.environ['SCRIPT_NAME'], '') req.environ['PATH_INFO'] = \ req.environ['PATH_INFO'].encode('utf-8') if 'captcha_redirect' in req.session: del req.session['captcha_redirect'] if 'captcha_reject_reason' in req.session: del req.session['captcha_reject_reason'] if 'captcha_reject_time' in req.session: del req.session['captcha_reject_time'] keys = req.session.keys() for key in keys: if key.startswith('captcha_arg_'): arg = key[12:] req.args[arg] = req.session[key] del req.session[key] try: for newhandler in self.handlers: try: if newhandler.match_request(req): keys = req.session.keys() for key in keys: if key.startswith('captcha_'): self.log.info("Remove useless key: " "%s", key) del req.session[key] handler = newhandler break except Exception, e: self.log.debug("Exception when parsing handlers: " "(%s)", e) except TracError, e: self.log.debug("CAPTCHA: PreProcess End %s %s", name, e) req.session['captcha_verified_name'] = name return handler req.session['captcha_verified_name'] = name req.session['captcha_verified'] = int(time.time()) self.log.debug("CAPTCHA: PreProcess OK %s", name) req.session.save() return handler def post_process_request(self, req, template, content_type): return template, content_type def post_process_request(self, req, template, data, content_type): return template, data, content_type # Internal methods def _expired(self, req): return int(req.session.get('captcha_verified', 0)) + \ self.karma_lifetime < time.time() # remove old entries from database def _cleanup(self): last = 0 for value, in self.env.db_query(""" SELECT value FROM system WHERE name='spamfilter_lastclean' """): last = int(value) break tim = int(time.time()) if last+self.captcha_cleantime < tim: self.log.debug('CAPTCHA: Cleanup captcha %s+%s < %s', last, self.captcha_cleantime, tim) t = tim - self.captcha_lifetime tc = tim - self.karma_lifetime with self.env.db_transaction as db: if last == 0: db("INSERT INTO system VALUES " "('spamfilter_lastclean', %s)", (tim,)) else: db("UPDATE system SET value=%s WHERE " "name='spamfilter_lastclean'", (tim,)) db("""DELETE FROM session_attribute WHERE name LIKE 'captcha%%' AND (name != 'captcha_verified' OR %s < %%s) AND (name != 'captcha_verified_name' OR ( sid IN (SELECT * FROM (SELECT sid FROM session_attribute WHERE name = 'captcha_verified' AND %s < %%s ) AS tmp1 ) ) ) AND ( sid IN (SELECT * FROM (SELECT sid FROM session_attribute WHERE name = 'captcha_reject_time' AND %s < %%s ) AS tmp1 ) OR sid NOT IN (SELECT * FROM (SELECT sid FROM session_attribute WHERE name = 'captcha_reject_time' ) AS tmp2 ) ) """ % (db.cast('value', 'int'), db.cast('value', 'int'), db.cast('value', 'int')), (tc, tc, t))
Puppet-Finland/trac
files/spam-filter/tracspamfilter/captcha/api.py
Python
bsd-2-clause
12,430
#http://theory.stanford.edu/~amitp/GameProgramming/Heuristics.html#breaking-ties class Node(object): def __init__(self, x, y, width=None, height=None, parent=None): self.parent = parent self.x = x self.y = y if parent == None: if width == None or height == None: raise ValueError('Orphan nodes must have dimensions.') self.width = width self.height = height else: self.width= parent.width self.height = parent.height def up(self): if self.y - 1 >= 0: return self.__class__(self.x, self.y - 1, parent=self) def down(self): if self.y + 1 < self.height: return self.__class__(self.x, self.y + 1, parent=self) def left(self): if self.x - 1 >= 0: return self.__class__(self.x - 1, self.y, parent=self) def right(self): if self.x + 1 < self.width: return self.__class__(self.x + 1, self.y, parent=self) def __hash__(self): return hash(repr(self)) def __eq__(self, other): try: return self.x == other.x and self.y == other.y except AttributeError: return False def __repr__(self): return 'Node({}, {})'.format(self.x, self.y) class Pathfinder(object): ''' A* Path-finding base class. ''' def __init__(self, path, start, end): ''' Constructor ''' self.output = [] # Set has O(1) lookup instead of O(n) of a list x, y = start h, w = len(path), len(path[0]) node = Node(x, y, w, h) self.openSet = set([node]) self.closedSet = set() self.path = path self.end = Node(end[0], end[1], w, h) self.start = node self.add(node) while True: node = next(iter(sorted(self.openSet, key=self.score))) self.add(node) if self.end in self.closedSet: break if not len(self.openSet): print('No route from ({0[0]:}, {0[1]:}) to ({1[0]:}, {1[1]:})'.format(start, end)) return # Retrace path walk = node while walk: self.output.append((walk.x, walk.y)) walk = walk.parent def add(self, node): self.openSet.discard(node) self.closedSet.add(node) for item in self.adjacent(node): if item in self.closedSet: # Closed node continue for i in self.openSet: if item == i: # Item is on open list, check if it's better if self.route(item) < self.route(i): # It's better, so reparent self.openSet.remove(i) self.openSet.add(item) break else: self.openSet.add(item) def heuristic(self, node): ''' Estimate the distance from the 'node' Node to the final Node in the path. ''' # Tie break dx1 = node.x - self.end.x dy1 = node.y - self.end.y # Manhattan distance return (dx1 + dy1) def weight(self, start, end): ''' Return an integer representing how costly it is to move from Node 'start' to Node 'end'. ''' return 1 def valid(self, start, end): ''' Return True if the move from Node start to Node end is valid, and False otherwise. ''' if start and end: p = self.path[end.y][end.x] return p == 12 return False def route(self, node): current = node score = 0 while current.parent: score += self.weight(current, current.parent) current = current.parent return score def score(self, node): return self.route(node) + self.heuristic(node) def adjacent(self, node): ''' Return a tuple of nodes that are adjacent to this one. ''' return filter(lambda x: self.valid(node, x), (node.up(), node.down(), node.left(), node.right())) if __name__ == '__main__': path = [ [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], ] p = Pathfinder(path, (2, 3), (4, 3))
Touched/subscript
cartographer/pathfinder.py
Python
gpl-3.0
4,565
#!/usr/bin/python # -*- encoding: utf-8 -*- ################################################################################ # # Copyright (C) 2010 - 2015 Dharmesh Patel <mr.dlpatel@gmail.com>. # Licence : BSD, see LICENSE for more details. # ################################################################################ from shoppingCart.tax import calculate from shoppingCart.utils import id_from_object, get_dict_of_ids, get_list_of_ids __all__ = ['Cart'] class Cart(object): """ Collection of :class:`CartItem` objects. """ def __init__(self, site=None, customer=None): """ :param site: Unique id or name of :class:`Site` object or instance of :class:`Site`. :param customer: Unique id or name of :class:`Customer` object or instance of :class:`Customer`. """ self.site = site self.customer = customer self.__intiallize() def __intiallize(self): """ To intiallize or reset class member variable's value. """ self.__items = list() self.__taxes = list() self.currency_rate = 1 self.price_accuracy = 2 self.currency_symbol = '€' self.currency_code = 'EUR' self.__shipping_charge = 0.0 self.shipping_method = 'Flat Rate Per Order' self.tax_type = 'excluded' self.__discounts = list() def add_item(self, product, price=0.0, quantity=1, taxes=[], options={}): """ To add or update product, price, quantity, taxes and options in :class:`CartItem` object. :param product: Unique id or name of :class:`Product` object or instance of :class:`Product`. :param price: Product price. :param quantity: Product quantity(default 1). :param taxes: Taxes of the product(default []). :param options: Options of the product(default {}). """ if not isinstance(quantity, (int, float)): raise TypeError('quantity field value must be integer or float type', 'quantity') elif not (quantity or quantity >= 1): raise ValueError('quantity field value must be greater then 1', 'quantity') option_values = [] for option_value in options.values(): if isinstance(option_value, dict): option_value = option_value.keys()[0] option_values.append(option_value) cart_item = self.find_item(product, option_values) if cart_item: cart_item.update_quantity(cart_item.quantity + quantity) else: if not price: raise ValueError('price value is 0.0') elif not isinstance(price, (int, float)): raise TypeError('price value must be integer or float type', 'price') cart_item = CartItem(self, product, price, quantity, taxes, options) self.__items.append(cart_item) def update_item(self, product, quantity, option_values=[]): """ To update :class:`CartItem` object quantity. :param product: Unique id or name of :class:`Product` object or instance of :class:`Product`. :param quantity: Updated quantity. :param option_values: Option values of the product(default []). """ if not isinstance(quantity, (int, float)): raise TypeError('quantity field value must be integer or float type', 'price') elif not (quantity or quantity >= 1): raise ValueError('quantity field value must be greater then 1') cart_item = self.find_item(product, option_values) if cart_item: cart_item.update_quantity(quantity) def remove_item(self, product, option_values=[]): """ To remove existing :class:`CartItem` object related to product. :param product: Unique id or name of :class:`Product` object or instance of :class:`Product`. :param option_values: Option values of the product(default []). """ cart_item = self.find_item(product, option_values) if cart_item: self.__items.remove(cart_item) def remove_items(self): """ To remove all existing :class:`CartItem` objects. """ self.__items = list() def find_item(self, product, option_values=[]): """ To find :class:`CartItem` object related to product. :param product: Unique id or name of :class:`Product` object or instance of :class:`Product`. :param option_values: Option values of the product(default []). :return: :class:`CartItem` object if cart item is exist else None. """ product = id_from_object(product) option_values = get_list_of_ids(option_values) for cart_item in self.__items: cart_product = id_from_object(cart_item.product) if not cmp(cart_product, product): if option_values: if not cart_item.has_options: continue cart_item_option_values = [] cart_item_options = get_dict_of_ids(cart_item.get_options()) for option, option_value in cart_item_options.items(): if isinstance(option_value, dict): option_value = option_value.keys()[0] cart_item_option_values.append(option_value) if not cmp(option_values, cart_item_option_values): return cart_item else: if not cart_item.has_options: return cart_item return None def get_items(self): """ :return: List of :class:`CartItem` objects. """ return self.__items def add_discount(self, amount, type='percentage'): """ To apply discount. :param amount: Discount amount. :param type: Discount type like 'percentage' or 'amount'(default amount). """ self.__discounts.append({'amount': amount, 'type': type}) def remove_discounts(self): """ To remove all applied discounts. """ self.__discounts = list() def get_discounts(self): """ :return: List of applied discounts. """ return self.__discounts def add_tax(self, amount, type='percentage'): """ To apply taxes. :param tax: Tax amount according to country region. :param type: Tax type like 'percentage' or 'fixed'(default percentage). :return: True if tax is applied and tax is already exist then False. """ if not isinstance(amount, (int, float)): raise TypeError('tax field value must be integer or float type', 'price') if type not in ('percentage', 'fixed'): raise ValueError('type field value must be `percentage` or `fixed`', 'type') tax_exist = self.find_tax(amount, type) if not tax_exist: self.__taxes.append({'amount': amount, 'type': type}) return True return False def remove_tax(self, amount, type='percentage'): """ To remove existing tax. :param amount: Tax amount according to country region. :param type: Tax type like 'percentage' or 'fixed'(defualt percentage). """ if not isinstance(amount, (int, float)): raise TypeError('tax field value must be integer or float type', 'price') tax_exist = self.find_tax(amount, type) if tax_exist: self.__taxes.remove({'amount': amount, 'type': type}) return True return False def remove_taxes(self): """ To remove all applied taxes. """ self.__taxes = list() def find_tax(self, amount, type='percentage'): """ To find applied tax. :param amount: Tax amount according to country region. :param type: Tax type like 'percentage' or 'fixed'(defualt percentage). :return: True if tax is exist else False. """ if {'amount': amount, 'type': type} in self.__taxes: return True return False def get_taxes(self): """ :return: list of applied taxes. """ taxes = [] for cart_item in self.get_items(): taxes.extend(cart_item.get_taxes()) taxes.extend(self.__taxes) return taxes def sub_total(self): """ :return: Total cart amount(without discount deduction). """ total = 0.0 for cart_item in self.__items: total += cart_item.sub_total() return round(total, self.price_accuracy) def total_discount(self): """ :return: Total discount amount. """ total_discount = 0.0 for cart_item in self.get_items(): total_discount += cart_item.discount_amount() return round(total_discount, self.price_accuracy) def total_untaxed_amount(self): """ :return: Untaxed amount after deducating discount amount. """ total_untaxed_amount = self.sub_total() - self.total_discount() if self.tax_type == 'included': total_untaxed_amount -= self.total_tax() return round(total_untaxed_amount, self.price_accuracy) def total_tax(self): """ :return: Total tax amount. """ total_tax = 0.0 if self.tax_type == 'included': total = self.sub_total() - self.total_discount() else: total = self.total_untaxed_amount() total_tax = calculate(total, self.__taxes, self.tax_type, self.currency_rate, self.price_accuracy) for cart_item in self.get_items(): total_tax += cart_item.tax_amount() return round(total_tax, self.price_accuracy) def total(self): """ :return: Total amount(`tax excluded` or `tax included`) by adding total untaxed amount, total tax and shipping charge. """ return round(self.total_untaxed_amount() + self.total_tax() + self.shipping_charge, self.price_accuracy) def count(self): """ :return: Total quantity. """ self.total_items=0 for cart_item in self.get_items(): self.total_items += cart_item.quantity return self.total_items def clear(self): """ To clean :class:`Cart` Object. """ self.__intiallize() @property def is_empty(self): """ :return: True if cart has an item else False. """ if self.__items: return False return True @property def is_discount_applied(self): """ :return: True if discount is applied else False. """ if self.__discounts: return True return False @property def has_taxes(self): """ :return: True if tax is applied else False. """ if self.get_taxes(): return True return False def _get__shipping_charge(self): """ :return: Shipping charge. """ return round(self.__shipping_charge * self.currency_rate, self.price_accuracy) def _set__shipping_charge(self, value): """ To set shipping charge amount. :param value: Shipping charge amount. """ self.__shipping_charge = value shipping_charge = property(_get__shipping_charge, _set__shipping_charge) class CartItem(object): """ Collection of :class:`Product` and it's quantity, taxes and options. """ def __init__(self, cart, product, price, quantity, taxes=[], options={}): """ :param cart: Instance of :class:`Cart`. :param product: Unique id or name of :class:`Product` object or instance of :class:`Product`. :param price: Product price. :param quantity: Product quantity. :param taxes: Taxes(default []). :param options: Options(default {}). """ for tax in taxes: if not isinstance(tax, dict): raise TypeError('taxes should be list of `dict` type value', 'taxes') self.product = product self.__price = price self.quantity = quantity self.__taxes = taxes self.__options = options self.__cart = cart def update_quantity(self, quantity): """ To update existing quantity related to :class:`Product` object. :param quantity: Product quantity. """ self.quantity = quantity def get_options(self): """ :return: Dict of product's option. """ for option, option_value in self.__options.items(): if isinstance(option_value, dict): for key, value in option_value.items(): if value.has_key('price'): value['price'] = round(value['price'] * self.__cart.currency_rate, self.__cart.price_accuracy) return self.__options def get_taxes(self): """ :return: List of applied taxes. """ return self.__taxes def sub_total(self): """ :return: Total amount by multiplying product price and quantity(without discount deduction). """ price = self.price for option, option_value in self.get_options().items(): if isinstance(option_value, dict): for key, value in option_value.items(): if value.has_key('price'): price += value['price'] return round(price * self.quantity, self.__cart.price_accuracy) def discount_amount(self): """ :return: Discount amount. """ discount_amount = 0.0 sub_total = self.sub_total() for discount in self.__cart.get_discounts(): if discount['type'] == 'percentage': discount_amount += round(sub_total * (float(discount['amount'])/100), self.__cart.price_accuracy) elif discount['type'] == 'amount': discount_amount += round(float(discount['amount']) * self.__cart.currency_rate, self.__cart.price_accuracy) return round(discount_amount, self.__cart.price_accuracy) def untaxed_amount(self): """ :return: Untaxed amount(after deducating discount amount). """ total = self.sub_total() - self.discount_amount() if self.__cart.tax_type == 'included': total -= self.tax_amount() return round(total, self.__cart.price_accuracy) def tax_amount(self): """ :return: Tax amount. """ tax_amount = 0.0 if self.__cart.tax_type == 'included': total = self.sub_total() - self.discount_amount() else: total = self.untaxed_amount() tax_amount = calculate(total, self.__taxes, self.__cart.tax_type, self.__cart.currency_rate, self.__cart.price_accuracy) return tax_amount def total(self): """ :return: Total amount(`tax excluded` or `tax included`) by adding untaxed and taxed amount. """ return round(self.untaxed_amount() + self.tax_amount(), self.__cart.price_accuracy) @property def has_options(self): """ :return: True if product has an option else False. """ if self.__options: return True return False @property def has_taxes(self): """ :return: True if product has tax else False. """ if self.__taxes: return True return False def _get_price(self): """ :return: Price by multiplying currency rate. """ return round(self.__price * self.__cart.currency_rate, self.__cart.price_accuracy) def _set_price(self, value): """ To set product price. :param value: Product price. """ self.__price = value price = property(_get_price, _set_price) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
dharmeshpatel/shoppingcart
shoppingCart/cart.py
Python
bsd-3-clause
16,706
#!/usr/bin/python3 """ Copyright (c) 2014 Guillaume Havard - BVS """ import tkinter as tk from application import * root = tk.Tk() app = Application(master=root) app.mainloop()
guillaume-havard/com_elements_gui
src/interface.py
Python
mit
182
import sys t = int(sys.stdin.readline()) gs = None for tix in range(t): s = sys.stdin.readline()[:-1] ss = set() for c in s: ss.add(c) if gs is None: gs = ss else: gs &= ss print len(gs)
pbl64k/HackerRank-Contests
2014-04-26-101/GemStones/gs.accepted.py
Python
bsd-2-clause
235
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies building a subsidiary dependent target from a .gyp file in a subdirectory, without specifying an explicit output build directory, and using the subdirectory's solution or project file as the entry point. """ import TestGyp import errno # Ninja doesn't support relocation. # CMake produces a single CMakeLists.txt in the output directory. test = TestGyp.TestGyp(formats=['!ninja', '!cmake']) test.run_gyp('prog1.gyp', chdir='src') test.relocate('src', 'relocate/src') chdir = 'relocate/src/subdir' test.build('prog2.gyp', chdir=chdir) test.built_file_must_not_exist('prog1', type=test.EXECUTABLE, chdir=chdir) test.run_built_executable('prog2', chdir=chdir, stdout="Hello from prog2.c\n") test.pass_test()
ibc/MediaSoup
worker/deps/gyp/test/subdirectory/gyptest-subdir-default.py
Python
isc
959
# -*- coding: utf-8 -*- """ Class definitions to support grid and point-set objects, as defined in GSLIB_. .. _GSLIB: http://www.gslib.com Created on 12 de Out de 2013 @author: julio """ import os import time import bottleneck as bn import numpy as np import pandas as pd from tools.utils import skip_lines, filename_indexing class PointSet(object): """Class for storing point-set data (irregular mesh). Attributes ---------- path : string File path. name : string Descriptive name. nvars : int Number of variables. nodata : number Missing data value. varnames : list of string Variables names. values : pandas.DataFrame Variables values. Notes ----- According to the GSLIB standard, a point-set file has the following format - descriptive name - number of variables - variables names, one per line (must have two LOCATION variables) - variables values, one per column, numeric values, space separated Examples -------- snirh_data_set 5 x y year station wetdayscount 271329 260536 1981 1 54 271329 260536 1982 1 52 271329 260536 1983 1 53 271329 260536 1984 1 65 271329 260536 1985 1 63 References ---------- GSLIB Help Page: File Formats : http://www.gslib.com/gslib_help/format.html """ def __init__(self, name=None, nodata=-999.9, nvars=0, varnames=None, values=None, psetpath=None, header=True): """Constructor to initialise a PointSet instance. Parameters ---------- name : string, optional Descriptive name. nodata : number, default -999.9 Missing data value. nvars : int, default 0 Number of variables. varnames : list of string, optional Variables names. values : pandas.DataFrame, optional Variables values. psetpath : string, optional File path. header : boolean, default True PointSet file has the GSLIB standard header lines. """ self.path = psetpath if self.path and os.path.isfile(self.path): self.load(self.path, nodata, header) else: self.name = name or '' self.nvars = nvars self.nodata = nodata self.varnames = varnames or [] values = values or np.zeros((0, 0)) self.values = pd.DataFrame(values) if len(self.values.columns) == len(self.varnames): self.values.columns = self.varnames def __repr__(self): """Print a PointSet instance like a pd.DataFrame. """ return str(self.values) def add_var(self, values, varname=None): """Append a new variable to an existing PointSet. Parameters ---------- values : array_like Set of values that will be added to the PointSet as a new variable. varname : string, optional The name of the new variable. If not provided, the new variable will be named as 'varNUMBER', according to the total NUMBER of variables in the PointSet. If there is an existing variable with the same name, it will not be overwritten, and the variable will be added with name `'varname' + '_new'`. """ self.nvars += 1 if varname is None: varname = 'var{0}'.format(self.nvars) if varname in self.varnames: varname += '_new' self.varnames.append(varname) self.values[varname] = values def load(self, psetfile, nd=-999.9, header=True): """Load a point-set from a file in GSLIB format. Parameters ---------- psetfile : string File path. nd : number Missing data value. header : boolean, default True PointSet file has the GSLIB standard header lines. Notes ----- If `header` is False, all variables will have name 'varNUMBER'. """ self.path = psetfile self.nodata = nd self.varnames = list() fid = open(psetfile, 'r') if header: self.name = fid.readline().strip() self.nvars = int(fid.readline()) for i in xrange(self.nvars): # @UnusedVariable self.varnames.append(fid.readline().strip()) values = np.loadtxt(fid) # TODO: pd.read_table if not header: self.name = os.path.splitext(os.path.basename(psetfile))[0] self.nvars = values.shape[1] self.varnames = ['var{0}'.format(i) for i in xrange(1, self.nvars + 1)] self.values = pd.DataFrame(values, columns=self.varnames) fid.close() def save(self, psetfile=None, header=True): """Write a point-set to a file in GSLIB format. Parameters ---------- psetfile : string, optional File path. If not specified, will rewrite the original file. header : boolean, default True PointSet file has the GSLIB standard header lines. """ if not psetfile: psetfile = self.path else: self.path = psetfile fid = open(psetfile, 'w') if header: fid.write(self.name + '\n' + str(self.nvars) + '\n' + '\n'.join(self.varnames) + '\n') np.savetxt(fid, self.values, fmt='%-10.6f') fid.close() def flush_varnames(self, varnames=None): """Update the DataFrame column labels with the current varnames list or with a given list of names. Parameters ---------- varnames : list of string, optional Variables names. """ if varnames: if 'Flag' in varnames: varnames.remove('Flag') # TODO: this is a hack! self.values.columns = varnames self.varnames = varnames else: self.values.columns = self.varnames # def to_costhome(self, ftype='data', status='xx', variable='vv', # resolution='r', station_id='ssssssss', content='c'): # """Convert a point-set to the COST-HOME format. # # """ # station = ch.Station(spec=(ftype, status, variable, resolution, # station_id, content), md=self.nodata) # station.data class GridArr(object): """Class to store a grid (regular mesh). Attributes ---------- name : string Descriptive name. dx : int Number of nodes in X-axis. dy : int Number of nodes in Y-axis. dz : int Number of nodes in Z-axis. xi : number Initial value in X-axis. yi : number Initial value in Y-axis. zi : number Initial value in Z-axis. cellx : number Node size in X-axis. celly : number Node size in Y-axis. cellz : number Node size in Z-axis. nodata : number Missing data value. val : ndarray One dimension array containing the grid values. Notes ----- According to the GSLIB standard, a grid file has the following format - descriptive name - number of variables - variables names, one per line - variables values, one per column, numeric values, space separated, does not need coordinates, location is deduced by a special ordering, point by point towards the east, then row by row to the north, and finally level by level upward, i.e., x cycles fastest, then y, and finally z (FORTRAN-like order). Examples -------- snirh_simulated 1 wetdayscount 85.3 93.1 65.2 72.4 References ---------- GSLIB Help Page: File Formats : http://www.gslib.com/gslib_help/format.html .. TODO: support multiple variables in the same grid. """ def __init__(self, name='', dx=0, dy=0, dz=0, xi=0, yi=0, zi=0, cellx=1, celly=1, cellz=1, nodata=-999.9, val=np.zeros(1)): """ Constructor to initialise a GridArr instance. """ self.name = name self.dx = dx self.dy = dy self.dz = dz self.xi = xi self.yi = yi self.zi = zi self.cellx = cellx self.celly = celly self.cellz = cellz self.nodata = nodata self.val = val def load(self, gridfile, dims, first, cells_size, nd=-999.9, skipheader=3): """Load a grid from a file in GSLIB format. Parameters ---------- gridfile : string File path. dims : array_like of int Same as (dx, dy, dz). first : array_like of number Same as (xi, yi, zi). cells_size : array_like of number Same as (cellx, celly, cellz) nd : number, default -999.9 Missing data value. skipheader : int, default 3 Number of lines in the header. """ self.dx = dims[0] self.dy = dims[1] self.dz = dims[2] self.xi = first[0] self.yi = first[1] self.zi = first[2] self.cellx = cells_size[0] self.celly = cells_size[1] self.cellz = cells_size[2] self.nodata = nd self.val = np.loadtxt(gridfile, skiprows=skipheader) def save(self, outfile, varname='var', header=True): """Write a grid to a file in GSLIB format. Parameters ---------- outfile : string File path. varname : string, default 'var' Variable name. header : boolean, default True PointSet file has the GSLIB standard header lines. """ fid = open(outfile, 'w') if header: fid.write(os.path.splitext(os.path.basename(outfile))[0] + '\n1\n' + varname + '\n') # np.savetxt(fid, outvar.reshape(outvar.shape, order='F'), fmt='%10.4') np.savetxt(fid, self.val, fmt='%-10.6f') fid.close() def drill(self, wellxy, save=False, outfile=None, header=True): """Extract a vertical line from a grid. Parameters ---------- wellxy : array_like Coordinates (x, y) of the drilling location. save : boolean, default False Write the result into a file. outfile : string, optional File path. header : boolean, default True PointSet file has the GSLIB standard header lines. .. TODO: needs fix, it is not drilling in the right place """ well = PointSet() well.name = self.name + ' drilled at ' + str(wellxy) well.nodata = self.nodata well.nvars = 4 well.varnames = ['x', 'y', 'z', 'var'] well.values = pd.DataFrame(np.zeros((self.dz, 4))) well.values.iloc[:, :3] = (np.column_stack (((np.repeat(np.array(wellxy) [np.newaxis, :], self.dz, axis=0)), np.arange(1, self.dz + 1)))) xy_nodes = coord_to_grid(wellxy, [self.cellx, self.celly, self.cellz], [self.xi, self.yi, self.zi]) for z in xrange(self.dz): p = (xy_nodes[0] + self.dx * (xy_nodes[1] - 1) + self.dx * self.dy * z) well.values.iloc[z, 3] = self.val[p] if save and outfile is not None: well.save(outfile, header) return well class GridFiles(object): """This class keeps track of all the files containing simulation results, i.e., Direct Sequential Simulation (DSS) realisations. All PointSet files must have the same properties. Attributes ---------- files : list of file List containing the files handler (type 'file'). nfiles : int Number of files. dx : int Number of nodes in X-axis. dy : int Number of nodes in Y-axis. dz : int Number of nodes in Z-axis. xi : number Initial value in X-axis. yi : number Initial value in Y-axis. zi : number Initial value in Z-axis. cellx : number Node size in X-axis. celly : number Node size in Y-axis. cellz : number Node size in Z-axis. cells : int Total number of nodes in each grid. header : boolean, default True PointSet file has the GSLIB standard header lines. nodata : number Missing data value. .. TODO: make class child of GridArr? """ def __init__(self): """Constructor to initialise a GridFiles instance. """ self.files = list() self.nfiles = 0 self.dx = 0 self.dy = 0 self.dz = 0 self.xi = 0 self.yi = 0 self.zi = 0 self.cellx = 1 self.celly = 1 self.cellz = 1 self.cells = 0 self.header = 0 self.nodata = -999.9 def load(self, first_file, n, dims, first_coord, cells_size, no_data, headerin=3): """Open several grid files and provide a list containing each file handler (delivered in the `files` attribute). Parameters ---------- first_file : string File path to the first file to be opened. See notes for file names format. n : int Number of files. dims : array_like Number of nodes in each direction, [dx, dy, dz] first_coord : array_like First coordinate in each direction, [xi, yi, zi]. cells_size : array_like Nodes size in each direction, [cellx, celly, cellz]. no_data : number Missing data value. headerin : int, default 3 Number of lines in the header. Raises ------ IOError Could not find a file with an expected file name. Notes ----- It is assumed that the files are numbered in the following manner: - 1st file_with_this_name.extension - 2nd file_with_this_name2.extension - 3rd file_with_this_name3.extension - nth file_with_this_namen.extension """ self.nfiles = n self.dx = dims[0] self.dy = dims[1] self.dz = dims[2] self.xi = first_coord[0] self.yi = first_coord[1] self.zi = first_coord[2] self.cellx = cells_size[0] self.celly = cells_size[1] self.cellz = cells_size[2] self.cells = np.prod(dims) self.header = headerin self.nodata = no_data self.files.append(open(first_file, 'rb')) for i in xrange(2, n + 1): another = filename_indexing(first_file, i) if os.path.isfile(another): self.files.append(open(another, 'rb')) else: raise IOError('File {0} not found.'. format(os.path.basename(another))) def open_files(self, files_list, dims, first_coord, cells_size, no_data, headerin=3, only_paths=False): """Open a list of given grid files and provide a list containing each file handler (delivered in the `files` attribute). Parameters ---------- files_list : list List of file paths to the files to be opened dims : array_like Number of nodes in each direction, [dx, dy, dz] first_coord : array_like First coordinate in each direction, [xi, yi, zi]. cells_size : array_like Nodes size in each direction, [cellx, celly, cellz]. no_data : number Missing data value. headerin : int, default 3 Number of lines in the header. only_paths : bool Do not open the files, just save their paths. Raises ------ IOError Could not find a file with an expected file name. Notes ----- It is assumed that the files are numbered in the following manner: - 1st file_with_this_name.extension - 2nd file_with_this_name2.extension - 3rd file_with_this_name3.extension - nth file_with_this_namen.extension """ def append_opened(path): "Auxiliary function to minimise the number of conditions verified." self.files.append(open(path, 'rb')) self.nfiles = len(files_list) self.dx = dims[0] self.dy = dims[1] self.dz = dims[2] self.xi = first_coord[0] self.yi = first_coord[1] self.zi = first_coord[2] self.cellx = cells_size[0] self.celly = cells_size[1] self.cellz = cells_size[2] self.cells = np.prod(dims) self.header = headerin self.nodata = no_data if only_paths: open_file = self.files.append else: open_file = append_opened for gridfile in files_list: try: open_file(gridfile) except IOError, msg: print(msg) raise IOError('File {0} not found.'.format(gridfile)) def reset_read(self): """Reset the pointer that reads each file to the beginning. """ for grid in self.files: grid.seek(os.SEEK_SET) def dump(self): """Close all files. """ for grid in self.files: grid.close() self.nfiles = 0 def purge(self): """Remove all simulated map files from the filesystem permanently. """ self.dump() # workaround for delay issue on NT systems time.sleep(1) for grid in self.files: os.remove(grid.name) def stats(self, lmean=False, lmed=False, lskew=False, lvar=False, lstd=False, lcoefvar=False, lperc=False, p=0.95): """Calculate some statistics among every realisation. Each statistic is calculated node-wise along the complete number of realisations. Parameters ---------- lmean : boolean, default False Calculate the mean. lmed : boolean, default False Calculate the median. lskew : boolean, default False Calculate skewness. lvar : boolean, default False Calculate the variance. lstd : boolean, default False Calculate the standard deviation. lcoefvar : boolean, default False Calculate the coefficient of variation. lperc : boolean, default False Calculate the percentile `100 * (1 - p)`. p : number, default 0.95 Probability value. Returns ------- retdict : dict of GridArr Dictionary containing one GridArr for each calculated statistic. See Also -------- stats_area : same but considering a circular (and horizontal) area of a specified radius around a given point. """ # check if the map files are already opened or not if isinstance(self.files[0], file): opened_files = True else: opened_files = False if lmean: meanmap = np.zeros(self.cells) if lmed: medmap = np.zeros(self.cells) if lskew: skewmap = np.zeros(self.cells) if lvar: varmap = np.zeros(self.cells) if lstd: stdmap = np.zeros(self.cells) if lcoefvar: coefvarmap = np.zeros(self.cells) if lperc: percmap = np.zeros((self.cells, 2)) arr = np.zeros(self.nfiles) skip = True offset = os.SEEK_SET for cell in xrange(self.cells - self.header): for i, gridfile in enumerate(self.files): # deal with map files not open yet if opened_files: grid = gridfile else: grid = open(gridfile, 'rb') grid.seek(offset) if skip: skip_lines(grid, self.header) arr[i] = grid.readline() if not opened_files: offset = grid.tell() grid.close() skip = False # replace no data's with NaN bn.replace(arr, self.nodata, np.nan) if lmean: meanmap[cell] = bn.nanmean(arr) if lmed: medmap[cell] = bn.nanmedian(arr) if lskew: skewmap[cell] = pd.Series(arr).skew() if lvar: varmap[cell] = bn.nanvar(arr, ddof=1) if lstd: stdmap[cell] = bn.nanstd(arr, ddof=1) if lcoefvar: if lstd and lmean: coefvarmap[cell] = stdmap[cell] / meanmap[cell] * 100 else: std = bn.nanstd(arr, ddof=1) mean = bn.nanmean(arr) coefvarmap[cell] = std / mean * 100 if lperc: percmap[cell] = pd.Series(arr).quantile([(1 - p) / 2, 1 - (1 - p) / 2]) retdict = dict() if lmean: meangrid = GridArr(name='meanmap', dx=self.dx, dy=self.dy, dz=self.dz, nodata=self.nodata, val=meanmap) retdict['meanmap'] = meangrid if lmed: medgrid = GridArr(name='medianmap', dx=self.dx, dy=self.dy, dz=self.dz, nodata=self.nodata, val=medmap) retdict['medianmap'] = medgrid if lskew: skewgrid = GridArr(name='skewmap', dx=self.dx, dy=self.dy, dz=self.dz, nodata=self.nodata, val=skewmap) retdict['skewmap'] = skewgrid if lvar: vargrid = GridArr(name='varmap', dx=self.dx, dy=self.dy, dz=self.dz, nodata=self.nodata, val=varmap) retdict['varmap'] = vargrid if lstd: stdgrid = GridArr(name='stdmap', dx=self.dx, dy=self.dy, dz=self.dz, nodata=self.nodata, val=stdmap) retdict['stdmap'] = stdgrid if lcoefvar: coefvargrid = GridArr(name='coefvarmap', dx=self.dx, dy=self.dy, dz=self.dz, nodata=self.nodata, val=coefvarmap) retdict['coefvarmap'] = coefvargrid if lperc: percgrid = GridArr(name='percmap', dx=self.dx, dy=self.dy, dz=self.dz, nodata=self.nodata, val=percmap) retdict['percmap'] = percgrid return retdict def stats_area(self, loc, tol=0, lmean=False, lmed=False, lskew=False, lvar=False, lstd=False, lcoefvar=False, lperc=False, p=0.95, save=False): """Calculate some statistics among every realisation, considering a circular (only horizontaly) area of radius `tol` around the point located at `loc`. Parameters ---------- loc : array_like Location of the vertical line [x, y]. tol : number, default 0 Tolerance radius used to search for neighbour nodes. lmean : boolean, default False Calculate the mean. lmed : boolean, default False Calculate the median. lskew : boolean, default False Calculate skewness. lvar : boolean, default False Calculate the variance. lstd : boolean, default False Calculate the standard deviation. lcoefvar : boolean, default False Calculate the coefficient of variation. lperc : boolean, default False Calculate the percentile `100 * (1 - p)`. p : number, default 0.95 Probability value. save : boolean, default False Write the points used to calculate the chosen statistics in PointSet format to a file named 'sim values at (x, y, line).prn'. Returns ------- statspset : PointSet PointSet instance containing the calculated statistics. .. TODO: checkar stats variance com geoms """ if lmean: meanline = np.zeros(self.dz) if lmed: medline = np.zeros(self.dz) if lskew: skewline = np.zeros(self.dz) if lvar: varline = np.zeros(self.dz) if lstd: stdline = np.zeros(self.dz) if lcoefvar: coefvarline = np.zeros(self.dz) if lperc: percline = np.zeros((self.dz, 2)) # convert the coordinates of the first point to grid nodes loc = coord_to_grid(loc, [self.cellx, self.celly, self.cellz], [self.xi, self.yi, self.zi])[:2] # find the nodes coordinates within a circle centred in the first point neighbours_nodes = circle(loc[0], loc[1], tol) # compute the lines numbers for each point in the neighbourhood, across # each grid layer. this yields a N*M matrix, with N equal to the number # of neighbour nodes, and M equal to the number of layers in the grid. neighbours_lines = [line_zmirror(node, [self.dx, self.dy, self.dz]) for node in neighbours_nodes] # sort the lines in ascending order neighbours_lines = np.sort(neighbours_lines, axis=0) # create an array to store the neighbour nodes in each grid file nnodes = neighbours_lines.shape[0] arr = np.zeros(self.nfiles * nnodes) skip = True curr_line = np.zeros(self.nfiles) for layer in xrange(neighbours_lines.shape[1]): for i, line in enumerate(neighbours_lines[:, layer]): for j, grid in enumerate(self.files): # skip header lines only once per grid file if skip and self.header: skip_lines(grid, self.header) # advance to the next line with a neighbour node skip_lines(grid, int(line - curr_line[j] - 1)) # read the line and store its value a = grid.readline() arr[i + j * nnodes] = float(a) curr_line[j] = line skip = False # replace no data's with NaN bn.replace(arr, self.nodata, np.nan) # compute the required statistics if lmean: meanline[layer] = bn.nanmean(arr) if lmed: medline[layer] = bn.nanmedian(arr) if lskew: skewline[layer] = pd.Series(arr).skew() if lvar: varline[layer] = bn.nanvar(arr, ddof=1) if lstd: stdline[layer] = bn.nanstd(arr, ddof=1) if lcoefvar: if lstd and lmean: coefvarline[layer] = stdline[layer] / meanline[layer] * 100 else: std = bn.nanstd(arr, ddof=1) mean = bn.nanmean(arr) coefvarline[layer] = std / mean * 100 if lperc: percline[layer] = pd.Series(arr).quantile([(1 - p) / 2, 1 - (1 - p) / 2]) if save and tol == 0: # FIXME: not working with the tolerance feature # need to adjust the arrpset or cherry-pick arr arrpset = PointSet('realisations at location ({0}, {1}, {2})'. format(loc[0], loc[1], layer * self.cellz + self.zi), self.nodata, 3, ['x', 'y', 'value'], values=np.zeros((self.nfiles, 3))) arrout = os.path.join(os.path.dirname(self.files[0].name), 'sim values at ({0}, {1}, {2}).prn'.format( loc[0], loc[1], layer * self.cellz + self.zi)) arrpset.values.iloc[:, 2] = arr arrpset.values.iloc[:, :2] = np.repeat(np.array(loc) [np.newaxis, :], self.nfiles, axis=0) arrpset.save(arrout, header=True) ncols = sum((lmean, lmed, lvar, lstd, lcoefvar, lskew)) if lperc: ncols += 2 statspset = PointSet(name='vertical line stats at (x,y) = ({0},{1})'. format(loc[0], loc[1]), nodata=self.nodata, nvars=3 + ncols, varnames=['x', 'y', 'z'], values=np.zeros((self.dz, 3 + ncols))) statspset.values.iloc[:, :3] = (np.column_stack (((np.repeat(np.array(loc) [np.newaxis, :], self.dz, axis=0)), np.arange(self.zi, self.zi + self.cellz * self.dz)))) j = 3 if lmean: statspset.varnames.append('mean') statspset.values.iloc[:, j] = meanline j += 1 if lmed: statspset.varnames.append('median') statspset.values.iloc[:, j] = medline j += 1 if lskew: statspset.varnames.append('skewness') statspset.values.iloc[:, j] = skewline j += 1 if lvar: statspset.varnames.append('variance') statspset.values.iloc[:, j] = varline j += 1 if lstd: statspset.varnames.append('std') statspset.values.iloc[:, j] = stdline j += 1 if lcoefvar: statspset.varnames.append('coefvar') statspset.values.iloc[:, j] = coefvarline j += 1 if lperc: statspset.varnames.append('lperc') statspset.varnames.append('rperc') statspset.values.iloc[:, -2:] = percline # reset the reading pointer in each grid file self.reset_read() # update varnames statspset.flush_varnames() return statspset def coord_to_grid(coord, cells_size, first): """Upscale the given coordinates to the grid coordinate system (in number of nodes). It accepts coordinates in 2D (x, y) or 3D (x, y, z). Parameters ---------- coord : array_like Coordinates to convert. cells_size : array_like Nodes dimension in each direction. first : array_like Initial coordinate value in each direction. Returns ------- grid_coord : ndarray Upscaled coordinates. Notes ----- The result is always in 3D (x, y, z). """ if len(coord) < len(first): coord = list(coord) coord.append(first[2]) coord = np.array(coord).astype('float') cells_size = np.array(cells_size) first = np.array(first) grid_coord = np.around((coord - first) / cells_size + 1).astype('int') return grid_coord def grid_to_line(coord, dims): """Convert the coordinates of a point in a grid into the number of the line where it is located in the a grid file which follows the GSLIB standard. The header lines are not considered. Parameters ---------- coord : array_like Coordinates to convert (x, y, z). dims : array_like Number of nodes in the grid, in each direction (x, y). The third dimension is not needed. Returns ------- int Number of the line where the given point is located, given that the grid file is in the GSLIB standard. """ return (coord[0] + dims[0] * (coord[1] - 1) + dims[0] * dims[1] * (coord[2] - 1)) def line_zmirror(loc, dims): """Compute the lines numbers corresponding to the vertical line starting in a given point. Parameters ---------- loc : array_like Grid coordinates of the starting point (x, y). The third dimension is not needed. dims : array_like Number of nodes in the grid, in each direction (x, y, z). Returns ------- list List of the number of the lines below the given point, in a vertical line. """ return [grid_to_line([loc[0], loc[1], z], dims) for z in xrange(1, dims[2] + 1)] def loadcheck(s, header): """Check if s is a file path or PointSet instance. Parameters ---------- s : PointSet object or string Instance of PointSet type or string with the full path to the PointSet file. header : string True if `obs_file` has the GSLIB standard header lines. Returns ------- PointSet Raises ------ IOError `s` does not refer to an existing file. TypeError `s` must be a string or PointSet. """ if isinstance(s, PointSet): return s elif isinstance(s, str): if not os.path.isfile(s): raise IOError("file {0} not found".format(s)) pset = PointSet() pset.load(s, header) return pset else: raise TypeError("need string or PointSet, {0} found".format(type(s))) def add_header(path, name=None, varnames=None, out=None): """Add standard GSLIB header to a file. Parameters ---------- path : string or PointSet object File path or instance of PointSet type. name : string, optional Data set name. If not specified, it will write the file name. varnames : list of string, optional Variables names. If not specified, it will write *var*. out : string, optional File path. If not specified, it will write to a copy with *_nohead* appended to the file name. """ with open(path, 'r+') as f: values = np.loadtxt(f) if out is None: fname, ext = os.path.splitext(path) out = fname + '_head' + ext with open(out, 'w+') as f: if name is None: name = os.path.splitext(os.path.basename(path))[0] f.write(name + '\n') if varnames is None: nvars = values.shape[1] varnames = ['var {0}'.format(i) for i in xrange(nvars)] else: nvars = len(varnames) f.write(str(nvars) + '\n') for varname in varnames: f.write(varname + '\n') np.savetxt(f, values, fmt='%-10.6f') def remove_header(path, out=None): """Remove standard GSLIB header from a file. Parameters ---------- path : string or PointSet object File path or instance of PointSet type. out : string, optional File path. If not specified, it will write to a copy with *_nohead* appended to the file name. """ if has_header(path): with open(path, 'r+') as f: f.readline() nvars = int(f.readline()) skip_lines(f, nvars) values = np.loadtxt(f) if out is None: fname, ext = os.path.splitext(path) out = fname + '_nohead' + ext np.savetxt(out, values, fmt='%-10.6f') def has_header(path): """Try to detect if standard GSLIB header is present. It checks for - second line value is an integer - that integer is equal to the number of columns Parameters ---------- path : string File path. Returns ------- boolean Notes ----- It will return True in a case where there is not a header but is seems so, for instance ` 4 1 5 2 2 4 ... ` """ checklist = list() fh = open(path, 'r+') fh.readline() # check if the value in the 2nd line is integer nvars = fh.readline() try: checklist.append(nvars.strip() == str(int(nvars))) except ValueError: return False nvars = int(nvars) skip_lines(fh, nvars) # check if the number of variables matches with the number of columns first_values = fh.readline() checklist.append(len(first_values.split()) == nvars) return all(checklist) def circle(xc, yc, r): """Compute a circle in a grid, centred in the point (xc, yc) and with radius equal to r. Return the coordinates of the nodes which are inside the circle. Parameteres ----------- xc : int First coordinate (in nodes) of the circle centre. yc : int Second coordinate (in nodes) of the circle centre. r : int Circle radius (in nodes). Returns ------- ndarray 2D Matrix with the first and second coordinates of the nodes which are inside the circle. """ # squared grid of the circle centre neighbourhood xx, yy = np.mgrid[xc - r:xc + r + 1, yc - r:yc + r + 1].astype('float32') # eliminate negative values xx[xx < 0] = np.nan yy[yy < 0] = np.nan # squared distance circle = (xx - xc) ** 2 + (yy - yc) ** 2 # retrieve the points inside the circle inside = circle <= r ** 2 return np.column_stack((xx[inside], yy[inside])).astype('int') def _wrap1(): # print 'loading grids' grids = GridFiles() grids.load(fstpar, 10, [50, 50, 10], [0, 0, 0], [1, 1, 1], -999.9, 0) # print 'calculating stats' gstats = grids.stats(lmean=True, lvar=True, lperc=True, p=0.95) # print 'saving output' meangrid = GridArr(name='meanmap', dx=50, dy=50, dz=10, val=gstats[0]) meangrid.save(os.path.join(outpath, 'meanmap.out'), 'mean') vargrid = GridArr(name='varmap', dx=50, dy=50, dz=10, val=gstats[1]) vargrid.save(os.path.join(outpath, 'varmap.out'), 'var') grids.dump() # print 'drilling' vline = meangrid.drill((5, 5), True, os.path.join(outpath, 'psetout.prn')) return vline def _wrap2(): # print 'loading grids' grids = GridFiles() grids.load(fstpar, nsims, griddims, fstcoord, nodesize, -999.9, 0) vstats = grids.stats_area(pointloc, tol=rad, lmean=True, lvar=True, lperc=True, p=0.95) vstats.save(os.path.join(outpath, 'statsmap_t' + str(rad) + '.out'), 'var') grids.dump() return vstats if __name__ == '__main__': import timeit fstpar = '/home/julio/Testes/cost-home/rede000010_work/1990-1999/1990-1999_dss_map_st0_sim.out' outpath = '/home/julio/Testes/cost-home/rede000010_work/test' psetpath = '/home/julio/Testes/cost-home/rede000010_work/1990-1999/1990-1999_candidate_0.prn' nsims = 500 pointloc = [1815109.147, 7190958.185] griddims = [81, 122, 10] fstcoord = [1770000, 7094000, 1990] nodesize = [1000, 1000, 1] rad = 0 print(timeit.timeit("_wrap2()", setup="from __main__ import _wrap2", number=10)) for rad in xrange(0, 5): # (0, 3): # """ timer # print 'calculating grid stats + drill' # print(timeit.timeit("_wrap1()", setup="from __main__ import _wrap1", # number=100)) print 'calculating vline stats' print 'tolerance: ', rad print(timeit.timeit("_wrap2()", setup="from __main__ import _wrap2", number=10)) # """ # snirh = '/home/julio/Testes/snirh500_dssim_narrow/snirh.prn' # bench = '/Users/julio/Desktop/testes/cost-home/rede000005/1900_1909.txt' # # """ # pset = PointSet() # pset.load(snirh, header=False) # pset.load(bench, header=True) # pset.save(os.path.join(outpath, 'psetout.prn')) # """ # pset.to_costhome() print 'done'
iled/gsimcli
GSIMCLI/tools/grid.py
Python
gpl-3.0
40,370
#!/usr/bin/env python # -*- coding: UTF-8 -*- import calendar import logging import module.collector.config as config import module.common.const as const from datetime import datetime from module.common.topologydb import * from module.common.md import post_md from module.common.cp_md import create_monitoring_data_xml,DBUploader from module.common.util import to_array from pyzabbix import ZabbixAPI # constants COL_NAME = 'monitoring-data(cp)' POST_URI = config.post_uri + "/" + const.TYPE_MON_CP # logger. logger = logging.getLogger(const.MODULE_NAME_COL) # set data uploader script file path. db_upld = DBUploader(log_name=const.MODULE_NAME_COL, db_name=config.mon_data_cp_db, db_host=config.db_addr,db_port=config.db_port, db_user=config.db_user,db_pass=config.db_pass) # set interval. interval = 0 for module in config.module_list: if module['class-name'].split('.')[-1] == 'MonitoringDataCP': interval = module['interval'] break class MonitoringDataCP(): def __check_timestamp(self,timestamp,now_time,ts_type): # If timestamp_type is 1. if ts_type == const.TYPE_TS_REP: # to replace timestamp with now_time if timestamp is smaller than "now_time - interval" if int(timestamp) < int(now_time) - interval: return str(now_time) return str(timestamp) def __get_monitoring_data(self,zapi,node_name,node_type,now_time): logger.debug("get monitoring-data.") # Query item[status] item_sts = "" target_item_list = None if node_type == const.TYPE_NODE_SRV: item_sts = const.ZBX_SRV_STS target_item_list = config.zabbix_cp_mon_item_srv_list elif node_type == const.TYPE_NODE_VM: item_sts = const.ZBX_VM_STS target_item_list = config.zabbix_cp_mon_item_vm_list item_sts = item_sts + '[{0}]'.format(node_name) items = zapi.item.get(output=['hostid','lastclock','lastvalue'] ,filter={"key_":item_sts}) if not len(items): logger.warn('item is not found.({0})'.format(item_sts)) return None elif len(items) != 1: logger.warn('item is not one.({0})'.format(item_sts)) return None logger.debug(items) # Print out each datapoint val_list = list() for item in items: val_dict = dict() val_dict['status'] = item['lastvalue'] val_dict['timestamp'] = self.__check_timestamp(item['lastclock'],now_time,const.TYPE_TS_REP) val_list.append(val_dict) hostid = item['hostid'] for target_item in target_item_list: # Query item[set in the configuration file values] items = zapi.item.get(output=['lastclock','lastvalue'] ,filter={"hostid":hostid,"key_":target_item['item-name']}) # If nothing was found, try getting it from history if not len(items): logger.warn('item is not found.({0})'.format(target_item['item-name'])) continue elif len(items) != 1: logger.warn('item is not one.({0})'.format(item_sts)) logger.debug(items) # Print out each datapoint for item in items: val_dict = dict() val_dict[target_item['data-name']] = item['lastvalue'] val_dict['timestamp'] = self.__check_timestamp(item['lastclock'],now_time,target_item['ts-type']) val_list.append(val_dict) res_dict = {'node_name':node_name,'node_type':node_type,'val_list':val_list} return res_dict def main(self): # now monitoring timesamp(etime) now_time = 0 try: print(COL_NAME + ' -start-') logger.debug(COL_NAME + ' -start-') # get now time.(UTC:0) now_time = calendar.timegm(datetime.utcnow().timetuple()) # Login to the Zabbix API zapi = ZabbixAPI(config.zabbix_uri) zapi.login(config.zabbix_user,config.zabbix_pass) node_list =list() # get all of the VM from DB. vm_list = get_all_vm() if vm_list: node_list.extend(vm_list) # get all of the Server from DB. srv_list = get_all_srv() if srv_list: node_list.extend(srv_list) all_md_list = [] for node in node_list: # get monitoring-data from Zabbix. node_name = node.node_name node_type = node.type md_dict = self.__get_monitoring_data(zapi,node_name,node_type,now_time) if not md_dict: logger.debug('monitoring-data is no data.(node={0},type={1})'.format(node_name,node_type)) continue md_dict['network_name'] = node.network_name md_dict['network_type'] = node.network.type ### md_dict={nw_name:xxx,nw_type,node_name:xxx,node_type:server|vm, ### val_list:list(val_dict[param_name:value])} logger.debug(md_dict) all_md_list.append(md_dict) if not all_md_list: logger.debug('monitoring-data is no data.(all node)') return # parse monitoring-data-list to monitoring-data-xml. md_xml = create_monitoring_data_xml(logger,all_md_list) if not md_xml: logger.debug('monitoring-data-xml is null.') return logger.debug(md_xml) # upload monitoring-data to DB. logger.debug('upload monitoring-data to DB.') if not db_upld.upload_monitoring_data_all(md_xml): logger.debug('upload monitoring-data is null.') return # post the monitoring-data to the master-monitoring-server. logger.debug('post the monitoring-data to the master-monitoring-server.') res_flg,res = post_md(POST_URI,md_xml,'yes') if res_flg is False: logger.error('post monitoring-data error.(post_uri={0})'.format(POST_URI)) if res: logger.debug("HTTP Response({0}):{1}".format(res.status_code,res.text)) except Exception: logger.exception(COL_NAME) print(COL_NAME + ' -exception-') finally: zapi.logout logger.debug(COL_NAME + ' -end-') print(COL_NAME + ' -end-') return
ict-felix/stack
mseu/module/collector/zabbix/cp.py
Python
apache-2.0
6,803
# $Id$ # # Copyright (C) 2005 greg Landrum and Rational Discovery LLC # All Rights Reserved # import gzip import os import unittest from rdkit import RDConfig from rdkit.DataStructs import ExplicitBitVect from rdkit.DataStructs.VectCollection import VectCollection from rdkit.ML import InfoTheory from rdkit.ML.DecTree.BuildSigTree import BuildSigTree, _GenerateRandomEnsemble from rdkit.ML.DecTree.SigTree import SigTreeNode from rdkit.TestRunner import redirect_stdout from io import StringIO class TestCase(unittest.TestCase): def setUp(self): t1 = SigTreeNode(None, 'root', 0) t2 = SigTreeNode(t1, 'nodeL1', 1) t1.AddChildNode(t2) t3 = SigTreeNode(t2, 'nodeLTerm0', 0, isTerminal=1) t4 = SigTreeNode(t2, 'nodeLTerm1', 1, isTerminal=1) t2.AddChildNode(t3) t2.AddChildNode(t4) t2 = SigTreeNode(t1, 'nodeR1', 2) t1.AddChildNode(t2) t3 = SigTreeNode(t2, 'nodeRTerm0', 1, isTerminal=1) t4 = SigTreeNode(t2, 'nodeRTerm1', 0, isTerminal=1) t2.AddChildNode(t3) t2.AddChildNode(t4) self.tree = t1 def test1(self): t1 = self.tree bv = ExplicitBitVect(5) ex = ['nm', bv] self.assertFalse(t1.ClassifyExample(ex)) bv.SetBit(1) self.assertTrue(t1.ClassifyExample(ex)) bv.SetBit(0) self.assertTrue(t1.ClassifyExample(ex)) bv.SetBit(2) self.assertFalse(t1.ClassifyExample(ex)) def test2(self): t1 = self.tree vc = VectCollection() bv = ExplicitBitVect(5) bv.SetBitsFromList([0]) vc.AddVect(1, bv) bv = ExplicitBitVect(5) bv.SetBitsFromList([1, 2]) vc.AddVect(2, bv) ex = ['nm', bv, 1] self.assertTrue(t1.ClassifyExample(ex)) bv = ExplicitBitVect(5) bv.SetBitsFromList([0, 2]) vc.AddVect(1, bv) ex = ['nm', bv, 1] self.assertFalse(t1.ClassifyExample(ex)) def test3(self): examples = [] bv = ExplicitBitVect(2) vc = VectCollection() vc.AddVect(1, bv) examples.append(['a', vc, 1]) bv = ExplicitBitVect(2) bv.SetBit(1) vc = VectCollection() vc.AddVect(1, bv) examples.append(['c', vc, 0]) bv = ExplicitBitVect(2) bv.SetBit(1) vc = VectCollection() vc.AddVect(1, bv) examples.append(['c2', vc, 0]) bv = ExplicitBitVect(2) bv.SetBit(0) vc = VectCollection() vc.AddVect(1, bv) examples.append(['d', vc, 0]) bv = ExplicitBitVect(2) bv.SetBit(0) vc = VectCollection() vc.AddVect(1, bv) bv = ExplicitBitVect(2) bv.SetBit(1) vc.AddVect(2, bv) examples.append(['d2', vc, 0]) bv = ExplicitBitVect(2) bv.SetBit(0) bv.SetBit(1) vc = VectCollection() vc.AddVect(1, bv) examples.append(['d', vc, 1]) bv = ExplicitBitVect(2) bv.SetBit(0) bv.SetBit(1) vc = VectCollection() vc.AddVect(1, bv) examples.append(['e', vc, 1]) f = StringIO() with redirect_stdout(f): t = BuildSigTree(examples, 2, metric=InfoTheory.InfoType.ENTROPY, maxDepth=2, verbose=True) self.assertIn('Build', f.getvalue()) self.assertEqual(t.GetName(), 'Bit-0') self.assertEqual(t.GetLabel(), 0) c0 = t.GetChildren()[0] self.assertEqual(c0.GetName(), 'Bit-1') self.assertEqual(c0.GetLabel(), 1) c1 = t.GetChildren()[1] self.assertEqual(c1.GetName(), 'Bit-1') self.assertEqual(c1.GetLabel(), 1) bv = ExplicitBitVect(2) bv.SetBit(0) vc = VectCollection() vc.AddVect(1, bv) bv = ExplicitBitVect(2) bv.SetBit(1) vc.AddVect(2, bv) r = t.ClassifyExample(['t', vc, 0]) self.assertEqual(r, 0) def test4(self): import pickle gz = gzip.open( os.path.join(RDConfig.RDCodeDir, 'ML', 'DecTree', 'test_data', 'cdk2-few.pkl.gz'), 'rb') examples = pickle.load(gz, encoding='Latin1') t = BuildSigTree(examples, 2, maxDepth=3) self.assertEqual(t.GetLabel(), 2181) self.assertEqual(t.GetChildren()[0].GetLabel(), 2861) self.assertEqual(t.GetChildren()[1].GetLabel(), 8182) def test_GenerateRandomEnsemble(self): ensemble = _GenerateRandomEnsemble(2, 4) self.assertEqual(len(ensemble), 2) self.assertTrue(all(r < 4 for r in ensemble)) ensemble = _GenerateRandomEnsemble(4, 4) self.assertEqual(len(ensemble), 4) self.assertTrue(all(r < 4 for r in ensemble)) ensemble = _GenerateRandomEnsemble(4, 40) self.assertEqual(len(ensemble), 4) self.assertTrue(all(r < 40 for r in ensemble)) if __name__ == '__main__': # pragma: nocover unittest.main()
bp-kelley/rdkit
rdkit/ML/DecTree/UnitTestSigTree.py
Python
bsd-3-clause
4,996
from sketch import build_sketch, _dump_json from IPython.display import HTML from scipy.constants import golden_ratio __version__ = '0.9.3' def init_javascript(path='/static/custom/matta'): ''' Returns the Javascript code needed to load matta libraries. In the IPython notebook this is loaded automatically by adding the output of this function to custom.js script. ''' paths = { 'd3': '{0}/d3.v3.min'.format(path), 'wordcloud': '{0}/d3.layout.cloud'.format(path), 'tile': '{0}/d3.geo.tile'.format(path), 'sankey': '{0}/d3.sankey'.format(path), 'matta': '{0}/matta'.format(path), 'force_directed': '{0}/matta.force-directed'.format(path), 'force_edge_bundling': '{0}/d3.ForceEdgeBundling'.format(path), 'topojson': '{0}/topojson'.format(path), 'leaflet': '{0}/leaflet-0.7.3/leaflet-src'.format(path), 'colajs': '{0}/cola.v3.min'.format(path), 'cartogram': '{0}/cartogram '.format(path), } template = ''' <script> $(document).ready(function() {{ if (!require.defined('matta')) {{ require.config({{ paths: {0} }}); require(['matta'], function(matta) {{ matta.add_css('{1}'); }}); }} }}); </script> <span class="label label-info">matta</span> Javascript code added. '''.format(_dump_json(paths), path + '/matta.css') return HTML(template) def dump_data(data, json_name): with open(json_name, 'w') as f: f.write(_dump_json(data)) # Visualizations ## Sankey Diagram __sankey_args = { 'requirements': ['d3', 'matta', 'sankey'], 'visualization_name': 'matta.sankey', 'figure_id': None, 'container_type': 'svg', 'data': { 'graph': None, }, 'options': { 'horizontal': False, 'background_color': None, }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 150, 'top': 10, 'right': 150, 'bottom': 10}, 'font_size': 10, 'layout_iterations': 64, 'link_opacity': 0.95, 'node_opacity': 0.85, 'node_color': 'steelblue', 'node_width': 15, 'node_padding': 10, 'node_label': 'name', 'link_color': '#efefef', 'link_weight': 'weight', 'link_color_scale_width': 150, 'link_color_scale_height': 8, 'link_color_scale_extent': None, 'link_color_scale_title': None, 'link_color_scale_domain': None, 'link_color_scale_range': None, 'link_color_variable': None, }, } sankey = build_sketch(__sankey_args) ## Maps using TopoJSON __topojson_args = { 'requirements': ['d3', 'matta', 'topojson'], 'visualization_name': 'matta.topojson', 'figure_id': None, 'container_type': 'div', 'data': { 'geojson': None, 'geometry': None, 'area_dataframe': None, 'mark_dataframe': None, 'graph': None }, 'options': { 'leaflet': False, 'background_color': False, 'graph_bundle_links': False }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 0, 'top': 0, 'right': 0, 'bottom': 0}, 'feature_id': 'id', 'label': None, 'legend': True, 'path_opacity': 1.0, 'path_stroke': 'gray', 'path_stroke_width': 1.0, 'fill_color': 'none', 'area_feature_name': None, 'area_value': None, 'area_color_scale': None, 'area_opacity': 0.75, 'area_legend': True, 'area_legend_width': 150, 'area_legend_height': 8, 'area_legend_title': None, 'mark_value': None, 'mark_scale': 0.5, 'mark_feature_name': None, 'mark_position': ['lat', 'lon'], 'mark_color': None, 'mark_color_property': None, 'mark_opacity': 0.8, 'mark_stroke': 'gray', 'mark_stroke_width': 1, 'mark_min_ratio': 0, 'mark_max_ratio': 20, 'feature_name': None, 'label_font_size': 10, 'label_color': 'black', 'leaflet_default_zoom': 11, 'leaflet_center': None, 'bounding_box': None, 'leaflet_map_link': '&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, &copy; <a href="http://cartodb.com/attributions">CartoDB</a>', 'leaflet_tile_layer': 'http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', 'graph_feature_name': None, 'graph_position': ['lat', 'lon'], 'force_edge_step_size': 0.0015, 'force_edge_compatibility_threshold': 0.75, 'force_edge_bundling_stiffness': 0.1, 'force_edge_cycles': 6, 'force_edge_iterations': 30, 'link_color': 'steelblue', 'link_opacity': 0.25, 'link_width': 1, 'link_color_value': 'weight', 'link_color_scale_domain': None, 'link_color_scale_range': None, }, 'read_only': {'L', 'map'} } def __topojson_options(config): if config['options']['leaflet'] is True: config['container_type'] = 'div' config['requirements'].append('leaflet') else: config['container_type'] = 'svg' if config['options']['graph_bundle_links'] is True: config['requirements'].append('force_edge_bundling') topojson = build_sketch(__topojson_args, opt_process=__topojson_options) ## Wordclouds __wordcloud_args = { 'requirements': ['d3', 'matta', 'wordcloud'], 'visualization_name': 'matta.wordcloud', 'figure_id': None, 'container_type': 'svg', 'data': { 'items': None, }, 'options': { 'background_color': None, }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 0, 'top': 0, 'right': 0, 'bottom': 0}, 'typeface': 'Arial', 'font_size': 12, 'font_weight': 'normal', 'font_scale': 'sqrt', 'min_font_size': 8, 'max_font_size': 64, 'font_opacity': 1.0, 'rotation': 0.0, 'color_scale_width': 150, 'color_scale_height': 8, 'color_scale_extent': [0.0, 1.0], 'color_scale_domain': None, 'color_scale_range': None, }, } wordcloud = build_sketch(__wordcloud_args) ## Treemap __treemap_args = { 'requirements': ['d3', 'matta'], 'visualization_name': 'matta.treemap', 'figure_id': None, 'container_type': 'div', 'data': { 'tree': None, }, 'options': { 'background_color': None, 'fit_labels': False, }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 0, 'top': 0, 'right': 0, 'bottom': 0}, 'target_ratio': golden_ratio, 'node_padding': 0, 'mode': 'squarify', 'node_value': 'value', 'node_children': 'children', 'node_id': 'id', 'node_label': None, 'font_size': 14, 'node_border': 1, 'border_color': '#777', 'sticky': True, 'label_leaves_only': True }, } treemap = build_sketch(__treemap_args) ## Circle Packing __circlepack_args = { 'requirements': ['d3', 'matta'], 'visualization_name': 'matta.circlepack', 'figure_id': None, 'container_type': 'svg', 'data': { 'tree': None, }, 'options': { 'background_color': None, 'fit_labels': False, 'events': ['node_click'] }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 0, 'top': 0, 'right': 0, 'bottom': 0}, 'node_padding': 0, 'node_value': 'value', 'node_opacity': 0.25, 'node_color': 'rgb(31, 119, 180)', 'node_children': 'children', 'node_id': 'id', 'node_label': None, 'font_size': 14, 'node_border': 1, 'node_border_color': 'rgb(31, 119, 180)', 'sticky': True, 'label_leaves_only': True }, } circlepack = build_sketch(__circlepack_args) ## Parallel Coordinates __parallel_coordinates_args = { 'requirements': ['d3', 'matta'], 'visualization_name': 'matta.pcoordinates', 'figure_id': None, 'container_type': 'svg', 'data': { 'dataframe': None, }, 'options': { 'background_color': None, 'events': ['click'] }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 30, 'top': 30, 'right': 30, 'bottom': 30}, 'line_opacity': 0.75, 'exclude': [], 'line_stroke_width': 1.0, 'labels': None }, } def _pc_opts(options): #print options options['variables']['columns'] = [c for c in options['data']['dataframe'].columns.tolist() if not c in options['variables']['exclude']] parallel_coordinates = build_sketch(__parallel_coordinates_args, opt_process=_pc_opts) ## Force Directed __force_directed_args = { 'requirements': ['d3', 'matta'], 'visualization_name': 'matta.force', 'figure_id': None, 'container_type': 'svg', 'data': { 'graph': None, }, 'options': { 'background_color': None, }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 30, 'top': 30, 'right': 30, 'bottom': 30}, 'force_gravity': 0.1, 'force_charge': -30, 'link_distance': 150, 'link_strength': 1, 'charge_distance': 1000, 'friction': 0.9, 'font_size': 10, 'node_id': 'id', 'node_ratio': 12, 'node_value': None, 'node_min_ratio': 8, 'node_max_ratio': 24, 'node_scale': 'linear', 'node_labels': True, 'theta': 0.8, 'alpha': 0.1, 'node_padding': 16, 'avoid_collisions': False, 'clamp_to_viewport': False, }, } force_directed = build_sketch(__force_directed_args) # Cartogram __cartogram_args = { 'requirements': ['d3', 'matta', 'topojson', 'cartogram'], 'visualization_name': 'matta.cartogram', 'figure_id': None, 'container_type': 'svg', 'data': { 'geojson': None, 'geometry': None, 'area_dataframe': None, }, 'options': { }, 'variables': { 'width': 960, 'height': 500, 'padding': {'left': 0, 'top': 0, 'right': 0, 'bottom': 0}, 'feature_id': 'id', 'label': True, 'legend': False, 'path_opacity': 1.0, 'path_stroke': 'gray', 'path_stroke_width': 1.0, 'fill_color': 'none', 'cartogram_value': 'value', 'area_feature_name': 'id', 'area_value': None, 'area_color_legend': True, 'area_color_scale': None, 'area_legend_width': 150, 'area_legend_height': 8, 'area_legend_title': None, 'area_opacity': 0.75, 'area_transition_delay': 0, 'feature_name': None, 'label_font_size': 10, 'label_color': 'black', 'leaflet_default_zoom': 11, 'leaflet_center': None, 'bounding_box': None, 'na_value': 0.000001 }, 'read_only': { } } cartogram = build_sketch(__cartogram_args)
zhangweiabc/matta
matta/__init__.py
Python
bsd-3-clause
11,214
import sys import time import subprocess from six.moves.urllib.parse import urlencode import scrapy from scrapy.command import ScrapyCommand from scrapy.contrib.linkextractors import LinkExtractor class Command(ScrapyCommand): default_settings = { 'LOG_LEVEL': 'INFO', 'LOGSTATS_INTERVAL': 1, 'CLOSESPIDER_TIMEOUT': 10, } def short_desc(self): return "Run quick benchmark test" def run(self, args, opts): with _BenchServer(): self.crawler_process.crawl(_BenchSpider, total=100000) self.crawler_process.start() class _BenchServer(object): def __enter__(self): from scrapy.utils.test import get_testenv pargs = [sys.executable, '-u', '-m', 'scrapy.utils.benchserver'] self.proc = subprocess.Popen(pargs, stdout=subprocess.PIPE, env=get_testenv()) self.proc.stdout.readline() def __exit__(self, exc_type, exc_value, traceback): self.proc.kill() self.proc.wait() time.sleep(0.2) class _BenchSpider(scrapy.Spider): """A spider that follows all links""" name = 'follow' total = 10000 show = 20 baseurl = 'http://localhost:8998' link_extractor = LinkExtractor() def start_requests(self): qargs = {'total': self.total, 'show': self.show} url = '{}?{}'.format(self.baseurl, urlencode(qargs, doseq=1)) return [scrapy.Request(url, dont_filter=True)] def parse(self, response): for link in self.link_extractor.extract_links(response): yield scrapy.Request(link.url, callback=self.parse)
hy-2013/scrapy
scrapy/commands/bench.py
Python
bsd-3-clause
1,647
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Classes that deal with computing intervals from arrays of values based on various criteria. """ import abc import numpy as np from .transform import BaseTransform __all__ = ['BaseInterval', 'ManualInterval', 'MinMaxInterval', 'AsymmetricPercentileInterval', 'PercentileInterval', 'ZScaleInterval'] class BaseInterval(BaseTransform): """ Base class for the interval classes, which, when called with an array of values, return an interval computed following different algorithms. """ @abc.abstractmethod def get_limits(self, values): """ Return the minimum and maximum value in the interval based on the values provided. Parameters ---------- values : `~numpy.ndarray` The image values. Returns ------- vmin, vmax : float The mininium and maximum image value in the interval. """ raise NotImplementedError('Needs to be implemented in a subclass.') def __call__(self, values, clip=True, out=None): """ Transform values using this interval. Parameters ---------- values : array-like The input values. clip : bool, optional If `True` (default), values outside the [0:1] range are clipped to the [0:1] range. out : `~numpy.ndarray`, optional If specified, the output values will be placed in this array (typically used for in-place calculations). Returns ------- result : `~numpy.ndarray` The transformed values. """ vmin, vmax = self.get_limits(values) if out is None: values = np.subtract(values, float(vmin)) else: if out.dtype.kind != 'f': raise TypeError('Can only do in-place scaling for ' 'floating-point arrays') values = np.subtract(values, float(vmin), out=out) if (vmax - vmin) != 0: np.true_divide(values, vmax - vmin, out=values) if clip: np.clip(values, 0., 1., out=values) return values class ManualInterval(BaseInterval): """ Interval based on user-specified values. Parameters ---------- vmin : float, optional The minimum value in the scaling. Defaults to the image minimum (ignoring NaNs) vmax : float, optional The maximum value in the scaling. Defaults to the image maximum (ignoring NaNs) """ def __init__(self, vmin=None, vmax=None): self.vmin = vmin self.vmax = vmax def get_limits(self, values): # Make sure values is a Numpy array values = np.asarray(values).ravel() # Filter out invalid values (inf, nan) values = values[np.isfinite(values)] vmin = np.min(values) if self.vmin is None else self.vmin vmax = np.max(values) if self.vmax is None else self.vmax return vmin, vmax class MinMaxInterval(BaseInterval): """ Interval based on the minimum and maximum values in the data. """ def get_limits(self, values): # Make sure values is a Numpy array values = np.asarray(values).ravel() # Filter out invalid values (inf, nan) values = values[np.isfinite(values)] return np.min(values), np.max(values) class AsymmetricPercentileInterval(BaseInterval): """ Interval based on a keeping a specified fraction of pixels (can be asymmetric). Parameters ---------- lower_percentile : float The lower percentile below which to ignore pixels. upper_percentile : float The upper percentile above which to ignore pixels. n_samples : int, optional Maximum number of values to use. If this is specified, and there are more values in the dataset as this, then values are randomly sampled from the array (with replacement). """ def __init__(self, lower_percentile, upper_percentile, n_samples=None): self.lower_percentile = lower_percentile self.upper_percentile = upper_percentile self.n_samples = n_samples def get_limits(self, values): # Make sure values is a Numpy array values = np.asarray(values).ravel() # If needed, limit the number of samples. We sample with replacement # since this is much faster. if self.n_samples is not None and values.size > self.n_samples: values = np.random.choice(values, self.n_samples) # Filter out invalid values (inf, nan) values = values[np.isfinite(values)] # Determine values at percentiles vmin, vmax = np.percentile(values, (self.lower_percentile, self.upper_percentile)) return vmin, vmax class PercentileInterval(AsymmetricPercentileInterval): """ Interval based on a keeping a specified fraction of pixels. Parameters ---------- percentile : float The fraction of pixels to keep. The same fraction of pixels is eliminated from both ends. n_samples : int, optional Maximum number of values to use. If this is specified, and there are more values in the dataset as this, then values are randomly sampled from the array (with replacement). """ def __init__(self, percentile, n_samples=None): lower_percentile = (100 - percentile) * 0.5 upper_percentile = 100 - lower_percentile super().__init__( lower_percentile, upper_percentile, n_samples=n_samples) class ZScaleInterval(BaseInterval): """ Interval based on IRAF's zscale. https://iraf.net/forum/viewtopic.php?showtopic=134139 Original implementation: https://github.com/spacetelescope/stsci.numdisplay/blob/master/lib/stsci/numdisplay/zscale.py Licensed under a 3-clause BSD style license (see AURA_LICENSE.rst). Parameters ---------- nsamples : int, optional The number of points in the array to sample for determining scaling factors. Defaults to 1000. contrast : float, optional The scaling factor (between 0 and 1) for determining the minimum and maximum value. Larger values increase the difference between the minimum and maximum values used for display. Defaults to 0.25. max_reject : float, optional If more than ``max_reject * npixels`` pixels are rejected, then the returned values are the minimum and maximum of the data. Defaults to 0.5. min_npixels : int, optional If less than ``min_npixels`` pixels are rejected, then the returned values are the minimum and maximum of the data. Defaults to 5. krej : float, optional The number of sigma used for the rejection. Defaults to 2.5. max_iterations : int, optional The maximum number of iterations for the rejection. Defaults to 5. """ def __init__(self, nsamples=1000, contrast=0.25, max_reject=0.5, min_npixels=5, krej=2.5, max_iterations=5): self.nsamples = nsamples self.contrast = contrast self.max_reject = max_reject self.min_npixels = min_npixels self.krej = krej self.max_iterations = max_iterations def get_limits(self, values): # Sample the image values = np.asarray(values) values = values[np.isfinite(values)] stride = int(max(1.0, values.size / self.nsamples)) samples = values[::stride][:self.nsamples] samples.sort() npix = len(samples) vmin = samples[0] vmax = samples[-1] # Fit a line to the sorted array of samples minpix = max(self.min_npixels, int(npix * self.max_reject)) x = np.arange(npix) ngoodpix = npix last_ngoodpix = npix + 1 # Bad pixels mask used in k-sigma clipping badpix = np.zeros(npix, dtype=bool) # Kernel used to dilate the bad pixels mask ngrow = max(1, int(npix * 0.01)) kernel = np.ones(ngrow, dtype=bool) for niter in range(self.max_iterations): if ngoodpix >= last_ngoodpix or ngoodpix < minpix: break fit = np.polyfit(x, samples, deg=1, w=(~badpix).astype(int)) fitted = np.poly1d(fit)(x) # Subtract fitted line from the data array flat = samples - fitted # Compute the k-sigma rejection threshold threshold = self.krej * flat[~badpix].std() # Detect and reject pixels further than k*sigma from the # fitted line badpix[(flat < - threshold) | (flat > threshold)] = True # Convolve with a kernel of length ngrow badpix = np.convolve(badpix, kernel, mode='same') last_ngoodpix = ngoodpix ngoodpix = np.sum(~badpix) slope, intercept = fit if ngoodpix >= minpix: if self.contrast > 0: slope = slope / self.contrast center_pixel = (npix - 1) // 2 median = np.median(samples) vmin = max(vmin, median - (center_pixel - 1) * slope) vmax = min(vmax, median + (npix - center_pixel) * slope) return vmin, vmax
MSeifert04/astropy
astropy/visualization/interval.py
Python
bsd-3-clause
9,467
# -*- coding: utf-8 -*- """Public forms.""" from flask_wtf import Form from wtforms import PasswordField, StringField from wtforms.validators import DataRequired from AuctionBot.user.models import User class LoginForm(Form): """Login form.""" username = StringField('Username', validators=[DataRequired()]) password = PasswordField('Password', validators=[DataRequired()]) def __init__(self, *args, **kwargs): """Create instance.""" super(LoginForm, self).__init__(*args, **kwargs) self.user = None def validate(self): """Validate the form.""" initial_validation = super(LoginForm, self).validate() if not initial_validation: return False self.user = User.query.filter_by(username=self.username.data).first() if not self.user: self.username.errors.append('Unknown username') return False if not self.user.check_password(self.password.data): self.password.errors.append('Invalid password') return False if not self.user.active: self.username.errors.append('User not activated') return False return True
aaronbenz/auctionbot-back
AuctionBot/public/forms.py
Python
mit
1,203
class Solution(object): def longestCommonPrefix(self, strs): """ :type strs: List[str] :rtype: str """ res = '' if not strs: return res common = strs[0] for s in strs: i = 0 while i < len(s) and i < len(common) and common[i] == s[i]: i +=1 common = common[:i] return common
tedye/leetcode
Python/leetcode.014.Longest-Common-Prefix.py
Python
mit
423
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license (see the COPYING file). """ QiBuild """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import os def test_simple(qidoc_action): """ Test Simple """ world_proj = qidoc_action.add_test_project("world") build_dir = os.path.join(world_proj.path, "build-doc") assert not os.path.exists(build_dir) qidoc_action("build", "world") assert os.path.exists(build_dir) qidoc_action("clean", "world") assert os.path.exists(build_dir) qidoc_action("clean", "world", "--force") assert not os.path.exists(build_dir)
aldebaran/qibuild
python/qidoc/test/test_qidoc_clean.py
Python
bsd-3-clause
785
from .Core import Core Core().run()
GabrielTMartinez/product-delivery-problem
core/__main__.py
Python
apache-2.0
37
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unittests for test_dispatcher.py.""" # pylint: disable=no-self-use # pylint: disable=protected-access import unittest from pylib.base import base_test_result from pylib.base import test_collection from pylib.base import test_dispatcher from pylib.constants import host_paths with host_paths.SysPath(host_paths.DEVIL_PATH): from devil.android import device_utils from devil.android.sdk import adb_wrapper from devil.constants import exit_codes from devil.utils import watchdog_timer with host_paths.SysPath(host_paths.PYMOCK_PATH): import mock # pylint: disable=import-error class TestException(Exception): pass def _MockDevice(serial): d = mock.MagicMock(spec=device_utils.DeviceUtils) d.__str__.return_value = serial d.adb = mock.MagicMock(spec=adb_wrapper.AdbWrapper) d.adb.GetDeviceSerial = mock.MagicMock(return_value=serial) d.IsOnline = mock.MagicMock(return_value=True) return d class MockRunner(object): """A mock TestRunner.""" def __init__(self, device=None, shard_index=0): self.device = device or _MockDevice('0') self.device_serial = self.device.adb.GetDeviceSerial() self.shard_index = shard_index self.setups = 0 self.teardowns = 0 def RunTest(self, test): results = base_test_result.TestRunResults() results.AddResult( base_test_result.BaseTestResult(test, base_test_result.ResultType.PASS)) return (results, None) def SetUp(self): self.setups += 1 def TearDown(self): self.teardowns += 1 class MockRunnerFail(MockRunner): def RunTest(self, test): results = base_test_result.TestRunResults() results.AddResult( base_test_result.BaseTestResult(test, base_test_result.ResultType.FAIL)) return (results, test) class MockRunnerFailTwice(MockRunner): def __init__(self, device=None, shard_index=0): super(MockRunnerFailTwice, self).__init__(device, shard_index) self._fails = 0 def RunTest(self, test): self._fails += 1 results = base_test_result.TestRunResults() if self._fails <= 2: results.AddResult(base_test_result.BaseTestResult( test, base_test_result.ResultType.FAIL)) return (results, test) else: results.AddResult(base_test_result.BaseTestResult( test, base_test_result.ResultType.PASS)) return (results, None) class MockRunnerException(MockRunner): def RunTest(self, test): raise TestException class TestFunctions(unittest.TestCase): """Tests test_dispatcher._RunTestsFromQueue.""" @staticmethod def _RunTests(mock_runner, tests): results = [] tests = test_collection.TestCollection( [test_dispatcher._Test(t) for t in tests]) test_dispatcher._RunTestsFromQueue(mock_runner, tests, results, watchdog_timer.WatchdogTimer(None), 2) run_results = base_test_result.TestRunResults() for r in results: run_results.AddTestRunResults(r) return run_results def testRunTestsFromQueue(self): results = TestFunctions._RunTests(MockRunner(), ['a', 'b']) self.assertEqual(len(results.GetPass()), 2) self.assertEqual(len(results.GetNotPass()), 0) def testRunTestsFromQueueRetry(self): results = TestFunctions._RunTests(MockRunnerFail(), ['a', 'b']) self.assertEqual(len(results.GetPass()), 0) self.assertEqual(len(results.GetFail()), 2) def testRunTestsFromQueueFailTwice(self): results = TestFunctions._RunTests(MockRunnerFailTwice(), ['a', 'b']) self.assertEqual(len(results.GetPass()), 2) self.assertEqual(len(results.GetNotPass()), 0) def testSetUp(self): runners = [] counter = test_dispatcher._ThreadSafeCounter() test_dispatcher._SetUp(MockRunner, _MockDevice('0'), runners, counter) self.assertEqual(len(runners), 1) self.assertEqual(runners[0].setups, 1) def testThreadSafeCounter(self): counter = test_dispatcher._ThreadSafeCounter() for i in xrange(5): self.assertEqual(counter.GetAndIncrement(), i) def testApplyMaxPerRun(self): self.assertEqual( ['A:B', 'C:D', 'E', 'F:G', 'H:I'], test_dispatcher.ApplyMaxPerRun(['A:B', 'C:D:E', 'F:G:H:I'], 2)) class TestThreadGroupFunctions(unittest.TestCase): """Tests test_dispatcher._RunAllTests and test_dispatcher._CreateRunners.""" def setUp(self): self.tests = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] shared_test_collection = test_collection.TestCollection( [test_dispatcher._Test(t) for t in self.tests]) self.test_collection_factory = lambda: shared_test_collection def testCreate(self): runners = test_dispatcher._CreateRunners( MockRunner, [_MockDevice('0'), _MockDevice('1')]) for runner in runners: self.assertEqual(runner.setups, 1) self.assertEqual(set([r.device_serial for r in runners]), set(['0', '1'])) self.assertEqual(set([r.shard_index for r in runners]), set([0, 1])) def testRun(self): runners = [MockRunner(_MockDevice('0')), MockRunner(_MockDevice('1'))] results, exit_code = test_dispatcher._RunAllTests( runners, self.test_collection_factory, 0) self.assertEqual(len(results.GetPass()), len(self.tests)) self.assertEqual(exit_code, 0) def testTearDown(self): runners = [MockRunner(_MockDevice('0')), MockRunner(_MockDevice('1'))] test_dispatcher._TearDownRunners(runners) for runner in runners: self.assertEqual(runner.teardowns, 1) def testRetry(self): runners = test_dispatcher._CreateRunners( MockRunnerFail, [_MockDevice('0'), _MockDevice('1')]) results, exit_code = test_dispatcher._RunAllTests( runners, self.test_collection_factory, 0) self.assertEqual(len(results.GetFail()), len(self.tests)) self.assertEqual(exit_code, exit_codes.ERROR) def testReraise(self): runners = test_dispatcher._CreateRunners( MockRunnerException, [_MockDevice('0'), _MockDevice('1')]) with self.assertRaises(TestException): test_dispatcher._RunAllTests(runners, self.test_collection_factory, 0) class TestShard(unittest.TestCase): """Tests test_dispatcher.RunTests with sharding.""" @staticmethod def _RunShard(runner_factory): return test_dispatcher.RunTests( ['a', 'b', 'c'], runner_factory, [_MockDevice('0'), _MockDevice('1')], shard=True) def testShard(self): results, exit_code = TestShard._RunShard(MockRunner) self.assertEqual(len(results.GetPass()), 3) self.assertEqual(exit_code, 0) def testFailing(self): results, exit_code = TestShard._RunShard(MockRunnerFail) self.assertEqual(len(results.GetPass()), 0) self.assertEqual(len(results.GetFail()), 3) self.assertEqual(exit_code, exit_codes.ERROR) def testNoTests(self): results, exit_code = test_dispatcher.RunTests( [], MockRunner, [_MockDevice('0'), _MockDevice('1')], shard=True) self.assertEqual(len(results.GetAll()), 0) self.assertEqual(exit_code, exit_codes.ERROR) class TestReplicate(unittest.TestCase): """Tests test_dispatcher.RunTests with replication.""" @staticmethod def _RunReplicate(runner_factory): return test_dispatcher.RunTests( ['a', 'b', 'c'], runner_factory, [_MockDevice('0'), _MockDevice('1')], shard=False) def testReplicate(self): results, exit_code = TestReplicate._RunReplicate(MockRunner) # We expect 6 results since each test should have been run on every device self.assertEqual(len(results.GetPass()), 6) self.assertEqual(exit_code, 0) def testFailing(self): results, exit_code = TestReplicate._RunReplicate(MockRunnerFail) self.assertEqual(len(results.GetPass()), 0) self.assertEqual(len(results.GetFail()), 6) self.assertEqual(exit_code, exit_codes.ERROR) def testNoTests(self): results, exit_code = test_dispatcher.RunTests( [], MockRunner, [_MockDevice('0'), _MockDevice('1')], shard=False) self.assertEqual(len(results.GetAll()), 0) self.assertEqual(exit_code, exit_codes.ERROR) if __name__ == '__main__': unittest.main()
junhuac/MQUIC
src/build/android/pylib/base/test_dispatcher_unittest.py
Python
mit
8,256
#/u/GoldenSights import traceback import praw # simple interface to the reddit API, also handles rate limiting of requests import time import sqlite3 '''USER CONFIGURATION''' APP_ID = "" APP_SECRET = "" APP_URI = "" APP_REFRESH = "" # https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/ USERAGENT = "" #This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot" SUBREDDIT = "CopperplateGothic" #This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subs, use "sub1+sub2+sub3+...". For all use "all" KEYWORDS = ['Request', 'Submitted', 'Release', 'Concept'] #Any post containing these words will be saved. RSAVE = False #Do you want the bot to save via Reddit Saving? Use True or False (Use capitals! no quotations!) #praw DOES NOT allow comments to be saved. Don't ask me why. This will save the submission the comment is connected to. MAILME = False #Do you want the bot to send you a PM when it gets something? Use True or False (Use capitals! No quotations!) RECIPIENT = "GoldenSights" #If MAILME is set to True, you will need a name for the PM to go to MTITLE = "SubDump automated message" #If MAILME is set to True, you will need the PM to have a subject line. MHEADER = "Comments containing your keywords:" #This is part of the message body, on a line above the list of results. You can set it to "" if you just want the list by itself. SUBDUMP = True #Do you want the bot to dump into a subreddit as posts? Use True or False (Use capitals! No quotations!) DSUB = "GoldTesting" #If SUBDUMP is set to True, you will need to choose a subreddit to submit to. POSTTITLE = "_title_" #This is the title of the post that will go in DSUB #You may use the following injectors to create a dynamic title #_author_ #_subreddit_ #_score_ #_title_ TRUEURL = False #If this is True, the dumped post will point to the URL that the orginal submission used. #If this is False the dumped post will point to the reddit submission permalink MAXPOSTS = 100 #This is how many posts you want to retrieve all at once. PRAW can download 100 at a time. WAIT = 20 #This is how many seconds you will wait between cycles. The bot is completely inactive during this time. '''All done!''' WAITS = str(WAIT) try: import bot USERAGENT = bot.aG except ImportError: pass sql = sqlite3.connect('sql.db') print('Loaded SQL Database') cur = sql.cursor() cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)') print('Loaded Completed table') sql.commit() r = praw.Reddit(USERAGENT) r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI) r.refresh_access_information(APP_REFRESH) def scansub(): print('Searching '+ SUBREDDIT + '.') subreddit = r.get_subreddit(SUBREDDIT) posts = subreddit.get_new(limit=MAXPOSTS) result = [] authors = [] for post in posts: pid = post.id pbody = post.title.lower() + '\n' + post.selftext.lower() cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid]) if not cur.fetchone(): cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) sql.commit() if post.subreddit.display_name.lower() == DSUB.lower(): continue if KEYWORDS == [] or any(key.lower() in pbody for key in KEYWORDS): try: pauthor = post.author.name print(pid + ', ' + pauthor) if TRUEURL == True: plink = post.url else: plink = post.permalink result.append(plink) authors.append(pauthor + ' in /r/' + post.subreddit.display_name) if RSAVE == True: submission = post.submission submission.save() print('\tSaved submission') if SUBDUMP == True: print('\tDumping to ' + DSUB + '...') newtitle = POSTTITLE newtitle = newtitle.replace('_author_', pauthor) newtitle = newtitle.replace('_subreddit_', post.subreddit.display_name) newtitle = newtitle.replace('_score_', str(post.score) + ' points') newtitle = newtitle.replace('_title_', post.title) if len(newtitle) > 300: newtitle = newtitle[:297] create = r.submit(DSUB, newtitle, url=plink, resubmit=True, captcha = None) print('\tDumped to ' + DSUB + '.') except AttributeError: print(pid + ': Author deleted. Ignoring comment') if len(result) > 0 and MAILME == True: for m in range(len(result)): result[m] = '- [%s](%s)' % (authors[m], result[m]) r.send_message(RECIPIENT, MTITLE, MHEADER + '\n\n' + '\n\n'.join(result), captcha=None) print('Mailed ' + RECIPIENT) while True: try: scansub() except Exception as e: traceback.print_exc() print('Running again in ' + WAITS + ' seconds \n') sql.commit() time.sleep(WAIT)
TacticalGoat/reddit
SubDumpPost/subdumppost.py
Python
mit
5,238
# -*- coding: utf-8 -*- # Generated by Django 1.9.11 on 2016-12-05 14:36 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('organization_projects', '0030_auto_20161107_1828'), ] operations = [ migrations.AlterField( model_name='projectimage', name='type', field=models.CharField(choices=[('logo', 'logo'), ('logo_white', 'logo white'), ('logo_black', 'logo black'), ('slider', 'slider'), ('card', 'card'), ('page_slider', 'page - slider'), ('page_featured', 'page - featured')], max_length=64, verbose_name='type'), ), ]
Ircam-Web/mezzanine-organization
organization/projects/migrations/0031_auto_20161205_1536.py
Python
agpl-3.0
690
# -*- coding: utf-8 -*- ''' Test invoice to pdf ''' from fpdf import FPDF TITLE = u'Δοκιμαστικό κείμενο για τουθ κάλους και καλά κρασιά' class PDF(FPDF): ''' Final class ... ''' def header(self): '''Arial bold 15''' self.add_font('alkaios', '', 'Alkaios.ttf', True) self.add_font('alkaios', 'I', 'Alkaios-Italic.ttf', True) self.add_font('alkaios', 'B', 'Alkaios-Bold.ttf', True) self.set_font('alkaios', 'B', 15) # Calculate width of title and position width = self.get_string_width(TITLE) + 6 self.set_x((210 - width) / 2) # Colors of frame, background and text self.set_draw_color(255, 155, 255) self.set_fill_color(155, 255, 255) self.set_text_color(0, 0, 0) # Thickness of frame (1 mm) self.set_line_width(0.2) # title self.hyd(6) self.hyd(20) self.hyd(30) self.text(10, 20, 'Testing this status') self.cell(width, 9, TITLE, 1, 0, 'C', 0) # Line break self.ln(10) def hyd(self, rotation): '''Rotate''' self.rotate(rotation) self.text(10, 20, 'Testing this status') self.rotate(0) def footer(self): '''Position at 1.5 cm from bottom''' self.set_y(-15) # Arial italic 8 self.set_font('alkaios', '', 8) # Text color in gray self.set_text_color(0) # Page number self.cell(0, 10, u'Σελίδα ' + str(self.page_no()), 0, 0, 'C') def chapter_title(self, num, label): '''Chapter Title''' self.set_font('alkaios', 'B', 12) # Background color self.set_fill_color(200, 220, 255) # Title self.cell(0, 6, u"Κεφάλαιο %d : %s" % (num, label), 0, 1, 'L', 1) # Line break self.ln(4) def chapter_body(self, name): '''Read text file''' txt = file(name).read() # Times 12 self.set_font('alkaios', '', 12) # Output justified text self.multi_cell(0, 5, txt) # Line break self.ln() # Mention in italics self.set_font('', 'I') self.cell(0, 5, '(end of excerpt)') def print_chapter(self, num, title, name): self.add_page() self.chapter_title(num, title) self.chapter_body(name) pdf = PDF(orientation='P') pdf.set_title('just a testing') pdf.set_subject('General Subject !!') pdf.set_author('Jules Verne') pdf.set_keywords('This is a test document') pdf.print_chapter(1, u'ΤΗΣ ΠΟΥΤΑΝΑΣ ΤΟ ΚΑΓΓΕΛΟ', '20k_c1.txt') pdf.print_chapter(2, u'ΓΑΜΗΣΕ ΤΑ ΚΑΙ ΑΦΗΣΕ ΤΑ', '20k_c2.txt') pdf.output('tuto3.pdf')
tedlaz/pyted
tests/pysqlqt/src/fpdf/tuto3.py
Python
gpl-3.0
2,766
from kik.messages.message import Message class StickerMessage(Message): """ A full sticker message object, as documented at `<https://dev.kik.com/#/docs/messaging#sticker>`_. """ def __init__(self, sticker_pack_id=None, sticker_url=None, **kwargs): super(StickerMessage, self).__init__(type='sticker', **kwargs) self.sticker_pack_id = sticker_pack_id self.sticker_url = sticker_url @classmethod def property_mapping(cls): mapping = super(StickerMessage, cls).property_mapping() mapping.update({ 'sticker_pack_id': 'stickerPackId', 'sticker_url': 'stickerUrl' }) return mapping
SPriyaJain/studybuddy
env/lib/python2.7/site-packages/kik/messages/sticker.py
Python
mit
684
import numpy as np import rough_surfaces.params as rp import rough_surfaces.generate as rg import rough_surfaces.analyse as ra import rough_surfaces.surface as rs import rough_surfaces.contact as rc import matplotlib.pyplot as plt from matplotlib import rcParams import rough_surfaces.plot as rplt # TODO dont do this rcParams['savefig.dpi'] = 300 rcParams['legend.loc'] = 'upper right' rcParams['image.cmap'] = 'hot' N_power_of_two = 9 surface_params = rp.SelfAffineParameters() surface = rg.make_self_affine(surface_params, N_power_of_two, seed=0) E, nu = 1.0E+9, 0.3 dxy = 1.0E-3 nominal_stress = 1.0E7 contact = rc.contact_FFT(surface, nominal_stress, E, nu, verbose=2, err_lim=1.0E-8) if 1: fig, ax = plt.subplots() rplt.traces(ax, surface, [contact.u], 128) unit_den = '(m)' ax.set_xlabel('x ' + unit_den) ax.set_ylabel('y ' + unit_den) plt.legend() plt.show() if 1: fig, ax = plt.subplots() N = surface.shape[0] L = rs.length(surface) x = np.linspace(-L/2., L/2., N) XX, YY = np.meshgrid(x, x) pressure_plot = ax.pcolor(XX, YY, contact.p) ax.axis('equal') unit_den = '(m)' ax.set_xlabel('x ' + unit_den) ax.set_ylabel('y ' + unit_den) cbar = plt.colorbar(pressure_plot) cbar.set_label('Pressure (Pa)', rotation=270) plt.show()
plang85/rough_surfaces
examples/example_contact.py
Python
mit
1,319
"""Provide a mock package component.""" from .const import TEST # noqa: F401 DOMAIN = "test_package" async def async_setup(hass, config): """Mock a successful setup.""" return True
aronsky/home-assistant
tests/testing_config/custom_components/test_package/__init__.py
Python
apache-2.0
193
# coding=utf-8 import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D as a3d import scipy.io as sio def plotSurface(): f1 = plt.figure(1) ax = a3d(f1) x = np.arange(-4, 4, 0.2) y = np.arange(-4, 4, 0.2) (x, y) = np.meshgrid(x, y) r = np.sqrt(x * x + y * y) z = np.sin(r) ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap="rainbow") plt.show() def plotScatter(): data = sio.loadmat("4a.mat") m = data["data"] (x, y, z) = (m[0], m[1], m[2]) f2 = plt.figure(2) ax = plt.subplot(projection="3d") #创建一个三维的绘图工程 #将数据点分成三部分画,在颜色上有区分度 ax.scatter(x[:1000], y[:1000], z[:1000], cmap="yellow") ax.scatter(x[1000:4000], y[1000:4000], z[1000:4000], cmap="red") ax.scatter(x[4000:], y[4000:], z[4000:], cmap="green") ax.set_zlabel("z") ax.set_ylabel("y") ax.set_xlabel("x") plt.show() if __name__ == "__main__": plotSurface() plotScatter()
Ginkgo-Biloba/Misc-Python
numpy/Plot3D.py
Python
gpl-3.0
950
#!/usr/bin/env python # -*- coding: utf-8 -*- # Pydoc Checker # Copyright © 2013 Stefano Maggiolo <s.maggiolo@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Test instance.""" def foo(a, b): """foo a (testa.modulea.ClassA): a ClassA. b (int): a int. return (testa.modulea.ClassA): a. """ if b == 0: return a else: return None class ClassA(object): def bar(self, a, b): """bar a (testa.modulea.ClassA): a ClassA. b (int): a int. return (testa.modulea.ClassA): a. """ if b == 0: return a else: return None
stefano-maggiolo/pydocchecker
testsuite/testa/modulea.py
Python
gpl-3.0
1,245
#!/usr/bin/python # Copyright 2012, 2013 Andrew Lamoureux # # This file is a part of FunChess # # FunChess is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #!/usr/bin/python # parse BPGN (game metadata + moves) into position strings import os import re import sys import Common import ChessMove import PgnTokenizer from ChessState import ChessState ############################################################################### # Match # - contains tags, comments, moves, and states of a bughouse chess match # - is able to load itself from bpgn match text ############################################################################### class PgnChessMatch: def __init__(self): self.initState = ChessState(Common.initChessFEN) self.moves = [] self.tags = {} self.comments = [] self.states = [self.initState] def incrMoveNum(self, fullMove): old = fullMove m = re.match(r'^(\d+)([AaBb])$', fullMove) [num, letter] = [int(m.group(1)), m.group(2)] if letter in 'ab': num += 1 letter = {'A':'a', 'a':'A', 'B':'b', 'b':'B'}[letter] new = str(num) + letter #print "incremented from %s to %s" % (old, new) return new def sanityCheck(self): # does the game have ANY moves in it? if len(self.moves) == 0: raise MatchZeroMovesException("no moves recorded") # does the game have missing/out-of-order moves in it? expectA = '1A' expectB = '1B' for m in self.moves: if 'TIME_FORFEIT' in m.flags: continue # bughouse-db games store a repeated move at the end when # that player forfeits on time fullMove = m.moveNum + m.player if fullMove == expectA: expectA = self.incrMoveNum(expectA) elif fullMove == expectB: expectB = self.incrMoveNum(expectB) else: raise MatchMovesOOOException("expected move %s or %s (got instead:\n %s)" % \ (expectA, expectB, str(m))) # - parses, populates the tags member # - parses, populates the moves member # - parses, populates the comments member # - calculates the states member # def parsePgn(self, text): tokens = PgnTokenizer.tokenize(text) moveNum = 1 player = 'W' while tokens: token = tokens.pop(0) #print "on token: -%s-" % token # tag tokens eg: [Event "May 2013 Tourney"] m = re.match(r'\[(.*?) "(.*?)"\]', token) if m: self.tags[m.group(1)] = m.group(2) continue # comment tokens eg: { good move! also consider Rxe8 } m = re.match('^{(.*)}$', token) if m: # if we're in the moves section, comment applies to a move if self.moves: self.moves[-1].addComment(m.group(1)) # else it applies to the match comments else: self.comments.append(m.group(1)) continue # result tokens eg: 0-1 m = re.match(Common.regexResults, token) if m: self.result = token if tokens: raise Exception("result token was not the final token! next is: " + tokens[0]) continue # move number token eg: 34. m = re.match(r'(\d+)\.', token) if m: if moveNum == int(m.group(1)) + 1: raise Exception("out of order move number: " + token) moveNum += 1; player = 'W' # normal move (SAN) m = re.match(Common.regexSanChess, token) if m: move = ChessMove.ChessMove() move.moveNum = moveNum move.player = player move.san = token self.moves.append(move) player = {'W':'B', 'B':'W'}[player] # calculate all board states self.states = [self.initState] for move in self.moves: # exceptions (repeated moves due to time forfeiture, etc.) just carry state along... if 'TIME_FORFEIT' in move.flags: self.states.append(self.states[-1]) continue currState = self.states[-1] nextState = currState.transition(move.san) self.states.append(nextState) def __str__(self): answer = '' #answer = '%s[%s],%s[%s] vs %s[%s],%s[%s]\n' % ( \ # self.tags['WhiteA'], self.tags['WhiteAElo'], self.tags['BlackA'], self.tags['BlackAElo'], \ # self.tags['BlackB'], self.tags['BlackBElo'], self.tags['WhiteA'], self.tags['WhiteAElo'] \ #) answer += "TAGS:\n" for tag,value in self.tags.iteritems(): answer += "%s: \"%s\"\n" % (tag, value) answer += "COMMENTS:\n" for c in self.comments: answer += c + "\n" answer += "MOVES (%d total):\n" % len(self.moves) for m in self.moves: answer += str(m) + "\n" return answer ############################################################################### # PgnChessMatchIteratorFile # - return matches from file containing multiple matches # - basically, split the text around '[Event "..."]' tags ############################################################################### class PgnChessMatchIteratorFile: def __init__(self, path): self.path = path self.fp = open(path, 'r') self.lineNum = -1 def __iter__(self): self.fp.seek(0, 0) self.lineNum = -1 return self def peekLine(self, doStrip=1): line = self.fp.readline() self.fp.seek(-1*len(line), 1) if doStrip: line = line.rstrip() return line def readLine(self): self.lineNum += 1 temp = self.fp.readline() #print "read: %s" % temp return temp def consumeNewLines(self): while 1: line = self.peekLine(False) if not line: return False if not re.match(r'^\s+$', line): break self.readLine() return True # strategy here is simple: consume lines until an Event tag is found # in other words, Event tags delimit the matches def next(self): if not self.consumeNewLines(): raise StopIteration matchText = self.readLine() if not re.match(r'^\[Event', matchText): raise Exception(("expected Event tag at %s:%d\n" + \ "(instead got: %s)") % (self.path, self.lineNum, matchText)) # so long as the next line is not an Event tag, add to current match while 1: line = self.peekLine() if not re.match(r'^\[Event', line): matchText += '\n' + line if not self.readLine(): break else: break # return a match match = PgnChessMatch() match.path = self.path match.parsePgn(matchText) return match def __del__(self): if self.fp: self.fp.close() self.fp = None ############################################################################### # MatchIteratorDir # - return matches from a directory containing files # - basically, loop over MatchIteratorFile for every file in a directory ############################################################################### class PgnChessMatchIteratorDir: def __init__(self, path): self.walkObj = os.walk(path) self.matchIterFileObj = None self.filesList = [] def __iter__(self): return self def next(self): while 1: # first level: does the file iterator still have something left? if self.matchIterFileObj: try: return self.matchIterFileObj.next() except StopIteration: self.matchIterFileObj = None # second level, is current list of files exhausted? can we create a new # file iterator? if self.filesList: self.matchIterFileObj = PgnChessMatchIteratorFile(self.filesList.pop()) continue # third level: no file iterator, no files list, descend! # purposely don't trap exception: StopIterations should bubble up and tell # caller that we're done (root, subFolder, files) = self.walkObj.next() for f in files: (dummy, ext) = os.path.splitext(f) if ext == '.bpgn': self.filesList.append(os.path.join(root, f)) ############################################################################### # main() ############################################################################### if __name__ == '__main__': gamesCount = 0 goodGamesCount = 0 path = sys.argv[1] it = None if os.path.isfile(path): it = PgnChessMatchIteratorFile(path) elif os.path.isdir(path): it = PgnChessMatchIteratorDir(path) else: raise Exception("WTF?") for m in it: gamesCount += 1 try: m.sanityCheck() #except MatchMovesOOOException as e: # print "%s: skipping match due to out of order (or missing) moves\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e)) # continue #except MatchZeroMovesException as e: # print "%s: skipping match due to it being empty (no moves whatsoever)\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e)) # continue except Exception as e: print e for s in m.states: print s.occupancy() goodGamesCount += 1 #raw_input("hit enter for next game") print "%d/%d games are good (%02.2f%%)" % (goodGamesCount, gamesCount, 100.0*goodGamesCount/gamesCount)
thompGIT/chesster
ChessLogic/PgnParser.py
Python
mit
10,798
''' libnano.datasets For now this only contains lazy-loaded access to the restriction enzyme dataset, but it may act as the general access to libnano datasets should the need arise in the future. All datasets are provided in bytes (3.x) and str verions. The unicode versions have a "_U" suffix. ''' import json as _json import os as _os import sys as _sys from typing import ( Dict ) from libnano.helpers import jsonbytes as _jsonbytes from libnano.datasets.build_enzyme_dataset import qcEnzymeDataset from libnano.datasets.build_codon_freq_dataset import ( RNA_TO_AA, DNA_TO_AA, AA_TO_DNA, AA_TO_RNA, getOrganismFrequencies, updateCodonFreqDataset ) _LOCAL_DIR: str = _os.path.dirname(_os.path.realpath(__file__)) class DatasetContainer(object): ''' Supports lazy loading of datasets to minimize memory footprint ''' RNA_TO_AA = RNA_TO_AA DNA_TO_AA = DNA_TO_AA AA_TO_DNA = AA_TO_DNA AA_TO_RNA = AA_TO_RNA # ~~~~~~~~~~~~~~~~~~~~~~~ Codon frequency dataset ~~~~~~~~~~~~~~~~~~~~~~~ # def _loadCodonFreqDataset(self, as_unicode: bool = False) -> dict: jsonlib = _json if as_unicode else _jsonbytes dataset_fp = _os.path.join(_LOCAL_DIR, 'codon_freq_dataset.json') with open(dataset_fp) as fd: raw_data = jsonlib.load(fd) if as_unicode: self._CODON_FREQ_DATASET_U = raw_data else: self._CODON_FREQ_DATASET = raw_data @property def CODON_FREQ_DATASET(self) -> dict: try: return self._CODON_FREQ_DATASET except AttributeError: self._loadCodonFreqDataset() return self._CODON_FREQ_DATASET @property def CODON_FREQ_DATASET_U(self) -> dict: try: return self._CODON_FREQ_DATASET_U except AttributeError: self._loadCodonFreqDataset(as_unicode=True) return self._CODON_FREQ_DATASET_U def organismFrequencies(self, organism_id: int, organism_class: str) -> Dict[str, Dict[str, float]]: return getOrganismFrequencies(organism_id, organism_class) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Enzyme dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # def _loadEnzymeDataset(self, as_unicode: bool = False): jsonlib = _json if as_unicode else _jsonbytes dataset_fp = _os.path.join(_LOCAL_DIR, 'enzyme_dataset.json') with open(dataset_fp) as fd: raw_data = jsonlib.load(fd) if as_unicode: self._ENZYME_DATASET_U = raw_data[u'enzyme_data'] self._REBASE_VERSION = raw_data[u'rebase_version'] else: self._ENZYME_DATASET = raw_data[b'enzyme_data'] self._REBASE_VERSION = raw_data[b'rebase_version'] @property def ENZYME_DATASET(self) -> dict: try: return self._ENZYME_DATASET except AttributeError: self._loadEnzymeDataset() return self._ENZYME_DATASET @property def REBASE_VERSION(self) -> dict: try: return self._REBASE_VERSION except AttributeError: self._loadEnzymeDataset() return self._REBASE_VERSION @property def ENZYME_DATASET_U(self) -> dict: try: return self._ENZYME_DATASET_U except AttributeError: self._loadEnzymeDataset(as_unicode=True) return self._ENZYME_DATASET_U dataset_container = DatasetContainer() __all__ = ['qcEnzymeDataset', 'dataset_container']
libnano/libnano
libnano/datasets/__init__.py
Python
gpl-2.0
3,595
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * import socket import os import llnl.util.tty as tty from os import environ as env def cmake_cache_entry(name, value): """ Helper that creates CMake cache entry strings used in 'host-config' files. """ return 'set({0} "{1}" CACHE PATH "")\n\n'.format(name, value) class Ascent(Package): """Ascent is an open source many-core capable lightweight in situ visualization and analysis infrastructure for multi-physics HPC simulations.""" homepage = "https://github.com/Alpine-DAV/ascent" url = "https://github.com/Alpine-DAV/ascent" maintainers = ['cyrush'] version('develop', git='https://github.com/Alpine-DAV/ascent.git', branch='develop', submodules=True) ########################################################################### # package variants ########################################################################### variant("shared", default=True, description="Build Conduit as shared libs") variant("cmake", default=True, description="Build CMake (if off, attempt to use cmake from PATH)") variant("mpi", default=True, description="Build Ascent MPI Support") # variants for python support variant("python", default=True, description="Build Conduit Python support") # variants for runtime features variant("vtkh", default=True, description="Build VTK-h filter and rendering support") variant("tbb", default=True, description="Build tbb support") variant("cuda", default=False, description="Build cuda support") variant("adios", default=False, description="Build Adios filter support") # variants for dev-tools (docs, etc) variant("doc", default=False, description="Build Conduit's documentation") ########################################################################### # package dependencies ########################################################################### depends_on("cmake", when="+cmake") depends_on("conduit@master") ####################### # Python ####################### # we need a shared version of python b/c linking with static python lib # causes duplicate state issues when running compiled python modules. depends_on("python+shared") extends("python", when="+python") depends_on("py-numpy", when="+python", type=('build', 'run')) ####################### # MPI ####################### depends_on("mpi", when="+mpi") depends_on("py-mpi4py", when="+python+mpi") ############################# # TPLs for Runtime Features ############################# depends_on("vtkh", when="+vtkh") depends_on("vtkh+cuda", when="+vtkh+cuda") depends_on("adios", when="+adios") ####################### # Documentation related ####################### depends_on("py-sphinx", when="+python+doc", type='build') def install(self, spec, prefix): """ Build and install Conduit. """ with working_dir('spack-build', create=True): py_site_pkgs_dir = None if "+python" in spec: py_site_pkgs_dir = site_packages_dir host_cfg_fname = self.create_host_config(spec, prefix, py_site_pkgs_dir) cmake_args = [] # if we have a static build, we need to avoid any of # spack's default cmake settings related to rpaths # (see: https://github.com/LLNL/spack/issues/2658) if "+shared" in spec: cmake_args.extend(std_cmake_args) else: for arg in std_cmake_args: if arg.count("RPATH") == 0: cmake_args.append(arg) cmake_args.extend(["-C", host_cfg_fname, "../src"]) cmake(*cmake_args) make() make("install") # install copy of host config for provenance install(host_cfg_fname, prefix) def create_host_config(self, spec, prefix, py_site_pkgs_dir=None): """ This method creates a 'host-config' file that specifies all of the options used to configure and build ascent. For more details about 'host-config' files see: http://ascent.readthedocs.io/en/latest/BuildingAscent.html Note: The `py_site_pkgs_dir` arg exists to allow a package that subclasses this package provide a specific site packages dir when calling this function. `py_site_pkgs_dir` should be an absolute path or `None`. This is necessary because the spack `site_packages_dir` var will not exist in the base class. For more details on this issue see: https://github.com/spack/spack/issues/6261 """ ####################### # Compiler Info ####################### c_compiler = env["SPACK_CC"] cpp_compiler = env["SPACK_CXX"] f_compiler = None if self.compiler.fc: # even if this is set, it may not exist so do one more sanity check if os.path.isfile(env["SPACK_FC"]): f_compiler = env["SPACK_FC"] ####################################################################### # By directly fetching the names of the actual compilers we appear # to doing something evil here, but this is necessary to create a # 'host config' file that works outside of the spack install env. ####################################################################### sys_type = spec.architecture # if on llnl systems, we can use the SYS_TYPE if "SYS_TYPE" in env: sys_type = env["SYS_TYPE"] ############################################## # Find and record what CMake is used ############################################## if "+cmake" in spec: cmake_exe = spec['cmake'].command.path else: cmake_exe = which("cmake") if cmake_exe is None: msg = 'failed to find CMake (and cmake variant is off)' raise RuntimeError(msg) cmake_exe = cmake_exe.path host_cfg_fname = "%s-%s-%s-ascent.cmake" % (socket.gethostname(), sys_type, spec.compiler) cfg = open(host_cfg_fname, "w") cfg.write("##################################\n") cfg.write("# spack generated host-config\n") cfg.write("##################################\n") cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler)) cfg.write("##################################\n\n") # Include path to cmake for reference cfg.write("# cmake from spack \n") cfg.write("# cmake executable path: %s\n\n" % cmake_exe) ####################### # Compiler Settings ####################### cfg.write("#######\n") cfg.write("# using %s compiler spec\n" % spec.compiler) cfg.write("#######\n\n") cfg.write("# c compiler used by spack\n") cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler)) cfg.write("# cpp compiler used by spack\n") cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler)) cfg.write("# fortran compiler used by spack\n") if f_compiler is not None: cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "ON")) cfg.write(cmake_cache_entry("CMAKE_Fortran_COMPILER", f_compiler)) else: cfg.write("# no fortran compiler found\n\n") cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "OFF")) ####################################################################### # Core Dependencies ####################################################################### ####################### # Conduit ####################### cfg.write("# conduit from spack \n") cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix)) ####################################################################### # Optional Dependencies ####################################################################### ####################### # Python ####################### cfg.write("# Python Support\n") if "+python" in spec: cfg.write("# Enable python module builds\n") cfg.write(cmake_cache_entry("ENABLE_PYTHON", "ON")) cfg.write("# python from spack \n") cfg.write(cmake_cache_entry("PYTHON_EXECUTABLE", spec['python'].command.path)) # only set dest python site packages dir if passed if py_site_pkgs_dir: cfg.write(cmake_cache_entry("PYTHON_MODULE_INSTALL_PREFIX", py_site_pkgs_dir)) else: cfg.write(cmake_cache_entry("ENABLE_PYTHON", "OFF")) if "+doc" in spec: cfg.write(cmake_cache_entry("ENABLE_DOCS", "ON")) cfg.write("# sphinx from spack \n") sphinx_build_exe = join_path(spec['py-sphinx'].prefix.bin, "sphinx-build") cfg.write(cmake_cache_entry("SPHINX_EXECUTABLE", sphinx_build_exe)) else: cfg.write(cmake_cache_entry("ENABLE_DOCS", "OFF")) ####################### # MPI ####################### cfg.write("# MPI Support\n") if "+mpi" in spec: cfg.write(cmake_cache_entry("ENABLE_MPI", "ON")) cfg.write(cmake_cache_entry("MPI_C_COMPILER", spec['mpi'].mpicc)) cfg.write(cmake_cache_entry("MPI_CXX_COMPILER", spec['mpi'].mpicxx)) cfg.write(cmake_cache_entry("MPI_Fortran_COMPILER", spec['mpi'].mpifc)) mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec') if os.path.isfile(mpiexe_bin): # starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE # vs the older versions which expect MPIEXEC if self.spec["cmake"].satisfies('@3.10:'): cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE", mpiexe_bin)) else: cfg.write(cmake_cache_entry("MPIEXEC", mpiexe_bin)) else: cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF")) ####################### # CUDA ####################### cfg.write("# CUDA Support\n") if "+cuda" in spec: cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON")) else: cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF")) ####################### # VTK-h ####################### cfg.write("# vtk-h support \n") if "+vtkh" in spec: cfg.write("# tbb from spack\n") cfg.write(cmake_cache_entry("TBB_DIR", spec['tbb'].prefix)) cfg.write("# vtk-m from spack\n") cfg.write(cmake_cache_entry("VTKM_DIR", spec['vtkm'].prefix)) cfg.write("# vtk-h from spack\n") cfg.write(cmake_cache_entry("VTKH_DIR", spec['vtkh'].prefix)) else: cfg.write("# vtk-h not built by spack \n") ####################### # Adios ####################### cfg.write("# adios support\n") if "+adios" in spec: cfg.write(cmake_cache_entry("ADIOS_DIR", spec['adios'].prefix)) else: cfg.write("# adios not built by spack \n") cfg.write("##################################\n") cfg.write("# end spack generated host-config\n") cfg.write("##################################\n") cfg.close() host_cfg_fname = os.path.abspath(host_cfg_fname) tty.info("spack generated conduit host-config file: " + host_cfg_fname) return host_cfg_fname
EmreAtes/spack
var/spack/repos/builtin/packages/ascent/package.py
Python
lgpl-2.1
13,598
''' Helper methods for GotoNewest ''' CONFIG_FILE = '../config/gn.cnf' def get_basepath(): ''' Read the basepath as saved in the config file and return it (or None if there is a problem) ''' basepath = None with open(CONFIG_FILE, 'r') as fhi: for line in fhi: # Allow for comments if line.startswith('#'): pass else: info = line.strip().split('=') if info[0] == 'basepath': basepath = info[1] return basepath
ambidextrousTx/GotoNewest
src/gn_helper.py
Python
bsd-2-clause
548
# coding: utf-8 import datetime import decimal import re import uuid import sqlalchemy as sa from sqlalchemy import any_ from sqlalchemy import ARRAY from sqlalchemy import cast from sqlalchemy import Column from sqlalchemy import column from sqlalchemy import DateTime from sqlalchemy import Enum from sqlalchemy import exc from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import null from sqlalchemy import Numeric from sqlalchemy import REAL from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import testing from sqlalchemy import Text from sqlalchemy import text from sqlalchemy import type_coerce from sqlalchemy import TypeDecorator from sqlalchemy import types from sqlalchemy import Unicode from sqlalchemy import util from sqlalchemy.dialects import postgresql from sqlalchemy.dialects.postgresql import array from sqlalchemy.dialects.postgresql import DATERANGE from sqlalchemy.dialects.postgresql import HSTORE from sqlalchemy.dialects.postgresql import hstore from sqlalchemy.dialects.postgresql import INT4RANGE from sqlalchemy.dialects.postgresql import INT8RANGE from sqlalchemy.dialects.postgresql import JSON from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.dialects.postgresql import NUMRANGE from sqlalchemy.dialects.postgresql import TSRANGE from sqlalchemy.dialects.postgresql import TSTZRANGE from sqlalchemy.exc import CompileError from sqlalchemy.orm import declarative_base from sqlalchemy.orm import Session from sqlalchemy.sql import operators from sqlalchemy.sql import sqltypes from sqlalchemy.sql.type_api import Variant from sqlalchemy.testing import fixtures from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message from sqlalchemy.testing.assertions import AssertsCompiledSQL from sqlalchemy.testing.assertions import AssertsExecutionResults from sqlalchemy.testing.assertions import ComparesTables from sqlalchemy.testing.assertions import eq_ from sqlalchemy.testing.assertions import is_ from sqlalchemy.testing.assertsql import RegexSQL from sqlalchemy.testing.schema import pep435_enum from sqlalchemy.testing.suite import test_types as suite from sqlalchemy.testing.util import round_decimal class FloatCoercionTest(fixtures.TablesTest, AssertsExecutionResults): __only_on__ = "postgresql" __dialect__ = postgresql.dialect() __backend__ = True @classmethod def define_tables(cls, metadata): Table( "data_table", metadata, Column("id", Integer, primary_key=True), Column("data", Integer), ) @classmethod def insert_data(cls, connection): data_table = cls.tables.data_table connection.execute( data_table.insert().values( [ {"data": 3}, {"data": 5}, {"data": 7}, {"data": 2}, {"data": 15}, {"data": 12}, {"data": 6}, {"data": 478}, {"data": 52}, {"data": 9}, ] ) ) def test_float_coercion(self, connection): data_table = self.tables.data_table for type_, result in [ (Numeric, decimal.Decimal("140.381230939")), (Float, 140.381230939), (Float(asdecimal=True), decimal.Decimal("140.381230939")), (Numeric(asdecimal=False), 140.381230939), ]: ret = connection.execute( select(func.stddev_pop(data_table.c.data, type_=type_)) ).scalar() eq_(round_decimal(ret, 9), result) ret = connection.execute( select(cast(func.stddev_pop(data_table.c.data), type_)) ).scalar() eq_(round_decimal(ret, 9), result) def test_arrays_pg(self, connection, metadata): t1 = Table( "t", metadata, Column("x", postgresql.ARRAY(Float)), Column("y", postgresql.ARRAY(REAL)), Column("z", postgresql.ARRAY(postgresql.DOUBLE_PRECISION)), Column("q", postgresql.ARRAY(Numeric)), ) metadata.create_all(connection) connection.execute( t1.insert(), dict(x=[5], y=[5], z=[6], q=[decimal.Decimal("6.4")]) ) row = connection.execute(t1.select()).first() eq_(row, ([5], [5], [6], [decimal.Decimal("6.4")])) def test_arrays_base(self, connection, metadata): t1 = Table( "t", metadata, Column("x", sqltypes.ARRAY(Float)), Column("y", sqltypes.ARRAY(REAL)), Column("z", sqltypes.ARRAY(postgresql.DOUBLE_PRECISION)), Column("q", sqltypes.ARRAY(Numeric)), ) metadata.create_all(connection) connection.execute( t1.insert(), dict(x=[5], y=[5], z=[6], q=[decimal.Decimal("6.4")]) ) row = connection.execute(t1.select()).first() eq_(row, ([5], [5], [6], [decimal.Decimal("6.4")])) class EnumTest(fixtures.TestBase, AssertsExecutionResults): __backend__ = True __only_on__ = "postgresql > 8.3" def test_native_enum_warnings(self): """test #6106""" with testing.expect_warnings( "the native_enum flag does not apply to the " "sqlalchemy.dialects.postgresql.ENUM datatype;" ): e1 = postgresql.ENUM("a", "b", "c", native_enum=False) e2 = postgresql.ENUM("a", "b", "c", native_enum=True) e3 = postgresql.ENUM("a", "b", "c") is_(e1.native_enum, True) is_(e2.native_enum, True) is_(e3.native_enum, True) def test_create_table(self, metadata, connection): metadata = self.metadata t1 = Table( "table", metadata, Column("id", Integer, primary_key=True), Column( "value", Enum("one", "two", "three", name="onetwothreetype") ), ) t1.create(connection) t1.create(connection, checkfirst=True) # check the create connection.execute(t1.insert(), dict(value="two")) connection.execute(t1.insert(), dict(value="three")) connection.execute(t1.insert(), dict(value="three")) eq_( connection.execute(t1.select().order_by(t1.c.id)).fetchall(), [(1, "two"), (2, "three"), (3, "three")], ) @testing.combinations(None, "foo", argnames="symbol_name") def test_create_table_schema_translate_map(self, connection, symbol_name): # note we can't use the fixture here because it will not drop # from the correct schema metadata = MetaData() t1 = Table( "table", metadata, Column("id", Integer, primary_key=True), Column( "value", Enum( "one", "two", "three", name="schema_enum", schema=symbol_name, ), ), schema=symbol_name, ) conn = connection.execution_options( schema_translate_map={symbol_name: testing.config.test_schema} ) t1.create(conn) assert "schema_enum" in [ e["name"] for e in inspect(conn).get_enums(schema=testing.config.test_schema) ] t1.create(conn, checkfirst=True) conn.execute(t1.insert(), dict(value="two")) conn.execute(t1.insert(), dict(value="three")) conn.execute(t1.insert(), dict(value="three")) eq_( conn.execute(t1.select().order_by(t1.c.id)).fetchall(), [(1, "two"), (2, "three"), (3, "three")], ) t1.drop(conn) assert "schema_enum" not in [ e["name"] for e in inspect(conn).get_enums(schema=testing.config.test_schema) ] t1.drop(conn, checkfirst=True) @testing.combinations( ("local_schema",), ("metadata_schema_only",), ("inherit_table_schema",), ("override_metadata_schema",), argnames="test_case", ) @testing.requires.schemas def test_schema_inheritance(self, test_case, metadata, connection): """test #6373""" metadata.schema = testing.config.test_schema if test_case == "metadata_schema_only": enum = Enum( "four", "five", "six", metadata=metadata, name="myenum" ) assert_schema = testing.config.test_schema elif test_case == "override_metadata_schema": enum = Enum( "four", "five", "six", metadata=metadata, schema=testing.config.test_schema_2, name="myenum", ) assert_schema = testing.config.test_schema_2 elif test_case == "inherit_table_schema": enum = Enum( "four", "five", "six", metadata=metadata, inherit_schema=True, name="myenum", ) assert_schema = testing.config.test_schema_2 elif test_case == "local_schema": enum = Enum("four", "five", "six", name="myenum") assert_schema = testing.config.db.dialect.default_schema_name Table( "t", metadata, Column("data", enum), schema=testing.config.test_schema_2, ) metadata.create_all(connection) eq_( inspect(connection).get_enums(schema=assert_schema), [ { "labels": ["four", "five", "six"], "name": "myenum", "schema": assert_schema, "visible": assert_schema == testing.config.db.dialect.default_schema_name, } ], ) def test_name_required(self, metadata, connection): etype = Enum("four", "five", "six", metadata=metadata) assert_raises(exc.CompileError, etype.create, connection) assert_raises( exc.CompileError, etype.compile, dialect=connection.dialect ) def test_unicode_labels(self, connection, metadata): t1 = Table( "table", metadata, Column("id", Integer, primary_key=True), Column( "value", Enum( util.u("réveillé"), util.u("drôle"), util.u("S’il"), name="onetwothreetype", ), ), ) metadata.create_all(connection) connection.execute(t1.insert(), dict(value=util.u("drôle"))) connection.execute(t1.insert(), dict(value=util.u("réveillé"))) connection.execute(t1.insert(), dict(value=util.u("S’il"))) eq_( connection.execute(t1.select().order_by(t1.c.id)).fetchall(), [ (1, util.u("drôle")), (2, util.u("réveillé")), (3, util.u("S’il")), ], ) m2 = MetaData() t2 = Table("table", m2, autoload_with=connection) eq_( t2.c.value.type.enums, [util.u("réveillé"), util.u("drôle"), util.u("S’il")], ) def test_non_native_enum(self, metadata, connection): metadata = self.metadata t1 = Table( "foo", metadata, Column( "bar", Enum( "one", "two", "three", name="myenum", create_constraint=True, native_enum=False, ), ), ) def go(): t1.create(connection) self.assert_sql( connection, go, [ ( "CREATE TABLE foo (\tbar " "VARCHAR(5), \tCONSTRAINT myenum CHECK " "(bar IN ('one', 'two', 'three')))", {}, ) ], ) connection.execute(t1.insert(), {"bar": "two"}) eq_(connection.scalar(select(t1.c.bar)), "two") def test_non_native_enum_w_unicode(self, metadata, connection): metadata = self.metadata t1 = Table( "foo", metadata, Column( "bar", Enum( "B", util.u("Ü"), name="myenum", create_constraint=True, native_enum=False, ), ), ) def go(): t1.create(connection) self.assert_sql( connection, go, [ ( util.u( "CREATE TABLE foo (\tbar " "VARCHAR(1), \tCONSTRAINT myenum CHECK " "(bar IN ('B', 'Ü')))" ), {}, ) ], ) connection.execute(t1.insert(), {"bar": util.u("Ü")}) eq_(connection.scalar(select(t1.c.bar)), util.u("Ü")) def test_disable_create(self, metadata, connection): metadata = self.metadata e1 = postgresql.ENUM( "one", "two", "three", name="myenum", create_type=False ) t1 = Table("e1", metadata, Column("c1", e1)) # table can be created separately # without conflict e1.create(bind=connection) t1.create(connection) t1.drop(connection) e1.drop(bind=connection) def test_dont_keep_checking(self, metadata, connection): metadata = self.metadata e1 = postgresql.ENUM("one", "two", "three", name="myenum") Table("t", metadata, Column("a", e1), Column("b", e1), Column("c", e1)) with self.sql_execution_asserter(connection) as asserter: metadata.create_all(connection) asserter.assert_( # check for table RegexSQL( "select relname from pg_class c join pg_namespace.*", dialect="postgresql", ), # check for enum, just once RegexSQL(r".*SELECT EXISTS ", dialect="postgresql"), RegexSQL("CREATE TYPE myenum AS ENUM .*", dialect="postgresql"), RegexSQL(r"CREATE TABLE t .*", dialect="postgresql"), ) with self.sql_execution_asserter(connection) as asserter: metadata.drop_all(connection) asserter.assert_( RegexSQL( "select relname from pg_class c join pg_namespace.*", dialect="postgresql", ), RegexSQL("DROP TABLE t", dialect="postgresql"), RegexSQL(r".*SELECT EXISTS ", dialect="postgresql"), RegexSQL("DROP TYPE myenum", dialect="postgresql"), ) def test_generate_multiple(self, metadata, connection): """Test that the same enum twice only generates once for the create_all() call, without using checkfirst. A 'memo' collection held by the DDL runner now handles this. """ e1 = Enum("one", "two", "three", name="myenum") Table("e1", metadata, Column("c1", e1)) Table("e2", metadata, Column("c1", e1)) metadata.create_all(connection, checkfirst=False) metadata.drop_all(connection, checkfirst=False) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums() ] def test_generate_alone_on_metadata(self, connection, metadata): """Test that the same enum twice only generates once for the create_all() call, without using checkfirst. A 'memo' collection held by the DDL runner now handles this. """ Enum("one", "two", "three", name="myenum", metadata=metadata) metadata.create_all(connection, checkfirst=False) assert "myenum" in [e["name"] for e in inspect(connection).get_enums()] metadata.drop_all(connection, checkfirst=False) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums() ] def test_generate_multiple_on_metadata(self, connection, metadata): e1 = Enum("one", "two", "three", name="myenum", metadata=metadata) t1 = Table("e1", metadata, Column("c1", e1)) t2 = Table("e2", metadata, Column("c1", e1)) metadata.create_all(connection, checkfirst=False) assert "myenum" in [e["name"] for e in inspect(connection).get_enums()] metadata.drop_all(connection, checkfirst=False) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums() ] e1.create(connection) # creates ENUM t1.create(connection) # does not create ENUM t2.create(connection) # does not create ENUM def test_generate_multiple_schemaname_on_metadata( self, metadata, connection ): Enum("one", "two", "three", name="myenum", metadata=metadata) Enum( "one", "two", "three", name="myenum", metadata=metadata, schema="test_schema", ) metadata.create_all(connection, checkfirst=False) assert "myenum" in [e["name"] for e in inspect(connection).get_enums()] assert "myenum" in [ e["name"] for e in inspect(connection).get_enums(schema="test_schema") ] metadata.drop_all(connection, checkfirst=False) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums() ] assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums(schema="test_schema") ] def test_drops_on_table(self, connection, metadata): e1 = Enum("one", "two", "three", name="myenum") table = Table("e1", metadata, Column("c1", e1)) table.create(connection) table.drop(connection) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums() ] table.create(connection) assert "myenum" in [e["name"] for e in inspect(connection).get_enums()] table.drop(connection) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums() ] def test_create_drop_schema_translate_map(self, connection): conn = connection.execution_options( schema_translate_map={None: testing.config.test_schema} ) e1 = Enum("one", "two", "three", name="myenum") assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums(testing.config.test_schema) ] e1.create(conn, checkfirst=True) e1.create(conn, checkfirst=True) assert "myenum" in [ e["name"] for e in inspect(connection).get_enums(testing.config.test_schema) ] s1 = conn.begin_nested() assert_raises(exc.ProgrammingError, e1.create, conn, checkfirst=False) s1.rollback() e1.drop(conn, checkfirst=True) e1.drop(conn, checkfirst=True) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums(testing.config.test_schema) ] assert_raises(exc.ProgrammingError, e1.drop, conn, checkfirst=False) def test_remain_on_table_metadata_wide(self, metadata, future_connection): connection = future_connection e1 = Enum("one", "two", "three", name="myenum", metadata=metadata) table = Table("e1", metadata, Column("c1", e1)) # need checkfirst here, otherwise enum will not be created assert_raises_message( sa.exc.ProgrammingError, '.*type "myenum" does not exist', table.create, connection, ) connection.rollback() table.create(connection, checkfirst=True) table.drop(connection) table.create(connection, checkfirst=True) table.drop(connection) assert "myenum" in [e["name"] for e in inspect(connection).get_enums()] metadata.drop_all(connection) assert "myenum" not in [ e["name"] for e in inspect(connection).get_enums() ] def test_non_native_dialect(self, metadata, testing_engine): engine = testing_engine() engine.connect() engine.dialect.supports_native_enum = False t1 = Table( "foo", metadata, Column( "bar", Enum( "one", "two", "three", name="myenum", create_constraint=True, ), ), ) def go(): t1.create(engine) self.assert_sql( engine, go, [ ( "CREATE TABLE foo (bar " "VARCHAR(5), CONSTRAINT myenum CHECK " "(bar IN ('one', 'two', 'three')))", {}, ) ], ) def test_standalone_enum(self, connection, metadata): etype = Enum( "four", "five", "six", name="fourfivesixtype", metadata=metadata ) etype.create(connection) try: assert connection.dialect.has_type(connection, "fourfivesixtype") finally: etype.drop(connection) assert not connection.dialect.has_type( connection, "fourfivesixtype" ) metadata.create_all(connection) try: assert connection.dialect.has_type(connection, "fourfivesixtype") finally: metadata.drop_all(connection) assert not connection.dialect.has_type( connection, "fourfivesixtype" ) def test_no_support(self, testing_engine): def server_version_info(self): return (8, 2) e = testing_engine() dialect = e.dialect dialect._get_server_version_info = server_version_info assert dialect.supports_native_enum e.connect() assert not dialect.supports_native_enum # initialize is called again on new pool e.dispose() e.connect() assert not dialect.supports_native_enum def test_reflection(self, metadata, connection): etype = Enum( "four", "five", "six", name="fourfivesixtype", metadata=metadata ) Table( "table", metadata, Column("id", Integer, primary_key=True), Column( "value", Enum("one", "two", "three", name="onetwothreetype") ), Column("value2", etype), ) metadata.create_all(connection) m2 = MetaData() t2 = Table("table", m2, autoload_with=connection) eq_(t2.c.value.type.enums, ["one", "two", "three"]) eq_(t2.c.value.type.name, "onetwothreetype") eq_(t2.c.value2.type.enums, ["four", "five", "six"]) eq_(t2.c.value2.type.name, "fourfivesixtype") def test_schema_reflection(self, metadata, connection): etype = Enum( "four", "five", "six", name="fourfivesixtype", schema="test_schema", metadata=metadata, ) Table( "table", metadata, Column("id", Integer, primary_key=True), Column( "value", Enum( "one", "two", "three", name="onetwothreetype", schema="test_schema", ), ), Column("value2", etype), ) metadata.create_all(connection) m2 = MetaData() t2 = Table("table", m2, autoload_with=connection) eq_(t2.c.value.type.enums, ["one", "two", "three"]) eq_(t2.c.value.type.name, "onetwothreetype") eq_(t2.c.value2.type.enums, ["four", "five", "six"]) eq_(t2.c.value2.type.name, "fourfivesixtype") eq_(t2.c.value2.type.schema, "test_schema") def test_custom_subclass(self, metadata, connection): class MyEnum(TypeDecorator): impl = Enum("oneHI", "twoHI", "threeHI", name="myenum") cache_ok = True def process_bind_param(self, value, dialect): if value is not None: value += "HI" return value def process_result_value(self, value, dialect): if value is not None: value += "THERE" return value t1 = Table("table1", self.metadata, Column("data", MyEnum())) self.metadata.create_all(connection) connection.execute(t1.insert(), {"data": "two"}) eq_(connection.scalar(select(t1.c.data)), "twoHITHERE") def test_generic_w_pg_variant(self, metadata, connection): some_table = Table( "some_table", self.metadata, Column( "data", Enum( "one", "two", "three", native_enum=True # make sure this is True because # it should *not* take effect due to # the variant ).with_variant( postgresql.ENUM("four", "five", "six", name="my_enum"), "postgresql", ), ), ) assert "my_enum" not in [ e["name"] for e in inspect(connection).get_enums() ] self.metadata.create_all(connection) assert "my_enum" in [ e["name"] for e in inspect(connection).get_enums() ] connection.execute(some_table.insert(), {"data": "five"}) self.metadata.drop_all(connection) assert "my_enum" not in [ e["name"] for e in inspect(connection).get_enums() ] def test_generic_w_some_other_variant(self, metadata, connection): some_table = Table( "some_table", self.metadata, Column( "data", Enum( "one", "two", "three", name="my_enum", native_enum=True ).with_variant(Enum("four", "five", "six"), "mysql"), ), ) assert "my_enum" not in [ e["name"] for e in inspect(connection).get_enums() ] self.metadata.create_all(connection) assert "my_enum" in [ e["name"] for e in inspect(connection).get_enums() ] connection.execute(some_table.insert(), {"data": "two"}) self.metadata.drop_all(connection) assert "my_enum" not in [ e["name"] for e in inspect(connection).get_enums() ] class OIDTest(fixtures.TestBase): __only_on__ = "postgresql" __backend__ = True def test_reflection(self, connection, metadata): Table( "table", metadata, Column("x", Integer), Column("y", postgresql.OID), ) metadata.create_all(connection) m2 = MetaData() t2 = Table( "table", m2, autoload_with=connection, ) assert isinstance(t2.c.y.type, postgresql.OID) class RegClassTest(fixtures.TestBase): __only_on__ = "postgresql" __backend__ = True @testing.fixture() def scalar(self, connection): def go(expression): return connection.scalar(select(expression)) return go def test_cast_name(self, scalar): eq_(scalar(cast("pg_class", postgresql.REGCLASS)), "pg_class") def test_cast_path(self, scalar): eq_( scalar(cast("pg_catalog.pg_class", postgresql.REGCLASS)), "pg_class", ) def test_cast_oid(self, scalar): regclass = cast("pg_class", postgresql.REGCLASS) oid = scalar(cast(regclass, postgresql.OID)) assert isinstance(oid, int) eq_( scalar( cast(type_coerce(oid, postgresql.OID), postgresql.REGCLASS) ), "pg_class", ) def test_cast_whereclause(self, connection): pga = Table( "pg_attribute", MetaData(), Column("attrelid", postgresql.OID), Column("attname", String(64)), ) oid = connection.scalar( select(pga.c.attrelid).where( pga.c.attrelid == cast("pg_class", postgresql.REGCLASS) ) ) assert isinstance(oid, int) class NumericInterpretationTest(fixtures.TestBase): __only_on__ = "postgresql" __backend__ = True def test_numeric_codes(self): from sqlalchemy.dialects.postgresql import ( pg8000, pygresql, psycopg2, psycopg2cffi, base, ) dialects = ( pg8000.dialect(), pygresql.dialect(), psycopg2.dialect(), psycopg2cffi.dialect(), ) for dialect in dialects: typ = Numeric().dialect_impl(dialect) for code in ( base._INT_TYPES + base._FLOAT_TYPES + base._DECIMAL_TYPES ): proc = typ.result_processor(dialect, code) val = 23.7 if proc is not None: val = proc(val) assert val in (23.7, decimal.Decimal("23.7")) def test_numeric_default(self, connection, metadata): # pg8000 appears to fail when the value is 0, # returns an int instead of decimal. t = Table( "t", metadata, Column("id", Integer, primary_key=True), Column("nd", Numeric(asdecimal=True), default=1), Column("nf", Numeric(asdecimal=False), default=1), Column("fd", Float(asdecimal=True), default=1), Column("ff", Float(asdecimal=False), default=1), ) metadata.create_all(connection) connection.execute(t.insert()) row = connection.execute(t.select()).first() assert isinstance(row[1], decimal.Decimal) assert isinstance(row[2], float) assert isinstance(row[3], decimal.Decimal) assert isinstance(row[4], float) eq_(row, (1, decimal.Decimal("1"), 1, decimal.Decimal("1"), 1)) class PythonTypeTest(fixtures.TestBase): def test_interval(self): is_(postgresql.INTERVAL().python_type, datetime.timedelta) class TimezoneTest(fixtures.TablesTest): __backend__ = True """Test timezone-aware datetimes. psycopg will return a datetime with a tzinfo attached to it, if postgresql returns it. python then will not let you compare a datetime with a tzinfo to a datetime that doesn't have one. this test illustrates two ways to have datetime types with and without timezone info. """ __only_on__ = "postgresql" @classmethod def define_tables(cls, metadata): # current_timestamp() in postgresql is assumed to return # TIMESTAMP WITH TIMEZONE Table( "tztable", metadata, Column("id", Integer, primary_key=True), Column( "date", DateTime(timezone=True), onupdate=func.current_timestamp(), ), Column("name", String(20)), ) Table( "notztable", metadata, Column("id", Integer, primary_key=True), Column( "date", DateTime(timezone=False), onupdate=cast( func.current_timestamp(), DateTime(timezone=False) ), ), Column("name", String(20)), ) def test_with_timezone(self, connection): tztable, notztable = self.tables("tztable", "notztable") # get a date with a tzinfo somedate = connection.scalar(func.current_timestamp().select()) assert somedate.tzinfo connection.execute( tztable.insert(), dict(id=1, name="row1", date=somedate) ) row = connection.execute( select(tztable.c.date).where(tztable.c.id == 1) ).first() eq_(row[0], somedate) eq_( somedate.tzinfo.utcoffset(somedate), row[0].tzinfo.utcoffset(row[0]), ) result = connection.execute( tztable.update() .where(tztable.c.id == 1) .returning(tztable.c.date), dict( name="newname", ), ) row = result.first() assert row[0] >= somedate def test_without_timezone(self, connection): # get a date without a tzinfo tztable, notztable = self.tables("tztable", "notztable") somedate = datetime.datetime(2005, 10, 20, 11, 52, 0) assert not somedate.tzinfo connection.execute( notztable.insert(), dict(id=1, name="row1", date=somedate) ) row = connection.execute( select(notztable.c.date).where(notztable.c.id == 1) ).first() eq_(row[0], somedate) eq_(row[0].tzinfo, None) result = connection.execute( notztable.update() .where(notztable.c.id == 1) .returning(notztable.c.date), dict( name="newname", ), ) row = result.first() assert row[0] >= somedate class TimePrecisionCompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = postgresql.dialect() @testing.combinations( (postgresql.TIME(), "TIME WITHOUT TIME ZONE"), (postgresql.TIME(precision=5), "TIME(5) WITHOUT TIME ZONE"), ( postgresql.TIME(timezone=True, precision=5), "TIME(5) WITH TIME ZONE", ), (postgresql.TIMESTAMP(), "TIMESTAMP WITHOUT TIME ZONE"), (postgresql.TIMESTAMP(precision=5), "TIMESTAMP(5) WITHOUT TIME ZONE"), ( postgresql.TIMESTAMP(timezone=True, precision=5), "TIMESTAMP(5) WITH TIME ZONE", ), (postgresql.TIME(precision=0), "TIME(0) WITHOUT TIME ZONE"), (postgresql.TIMESTAMP(precision=0), "TIMESTAMP(0) WITHOUT TIME ZONE"), ) def test_compile(self, type_, expected): self.assert_compile(type_, expected) class TimePrecisionTest(fixtures.TestBase): __only_on__ = "postgresql" __backend__ = True def test_reflection(self, metadata, connection): t1 = Table( "t1", metadata, Column("c1", postgresql.TIME()), Column("c2", postgresql.TIME(precision=5)), Column("c3", postgresql.TIME(timezone=True, precision=5)), Column("c4", postgresql.TIMESTAMP()), Column("c5", postgresql.TIMESTAMP(precision=5)), Column("c6", postgresql.TIMESTAMP(timezone=True, precision=5)), ) t1.create(connection) m2 = MetaData() t2 = Table("t1", m2, autoload_with=connection) eq_(t2.c.c1.type.precision, None) eq_(t2.c.c2.type.precision, 5) eq_(t2.c.c3.type.precision, 5) eq_(t2.c.c4.type.precision, None) eq_(t2.c.c5.type.precision, 5) eq_(t2.c.c6.type.precision, 5) eq_(t2.c.c1.type.timezone, False) eq_(t2.c.c2.type.timezone, False) eq_(t2.c.c3.type.timezone, True) eq_(t2.c.c4.type.timezone, False) eq_(t2.c.c5.type.timezone, False) eq_(t2.c.c6.type.timezone, True) class ArrayTest(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "postgresql" def test_array_literal(self): obj = postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) self.assert_compile( obj, "ARRAY[%(param_1)s, %(param_2)s] || " "ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]", params={ "param_1": 1, "param_2": 2, "param_3": 3, "param_4": 4, "param_5": 5, }, ) self.assert_compile( obj[1], "(ARRAY[%(param_1)s, %(param_2)s] || ARRAY[%(param_3)s, " "%(param_4)s, %(param_5)s])[%(param_6)s]", params={ "param_1": 1, "param_2": 2, "param_3": 3, "param_4": 4, "param_5": 5, }, ) def test_array_literal_getitem_multidim(self): obj = postgresql.array( [postgresql.array([1, 2]), postgresql.array([3, 4])] ) self.assert_compile( obj, "ARRAY[ARRAY[%(param_1)s, %(param_2)s], " "ARRAY[%(param_3)s, %(param_4)s]]", ) self.assert_compile( obj[1], "(ARRAY[ARRAY[%(param_1)s, %(param_2)s], " "ARRAY[%(param_3)s, %(param_4)s]])[%(param_5)s]", ) self.assert_compile( obj[1][0], "(ARRAY[ARRAY[%(param_1)s, %(param_2)s], " "ARRAY[%(param_3)s, %(param_4)s]])[%(param_5)s][%(param_6)s]", ) def test_array_type_render_str(self): self.assert_compile(postgresql.ARRAY(Unicode(30)), "VARCHAR(30)[]") def test_array_type_render_str_collate(self): self.assert_compile( postgresql.ARRAY(Unicode(30, collation="en_US")), 'VARCHAR(30)[] COLLATE "en_US"', ) def test_array_type_render_str_multidim(self): self.assert_compile( postgresql.ARRAY(Unicode(30), dimensions=2), "VARCHAR(30)[][]" ) self.assert_compile( postgresql.ARRAY(Unicode(30), dimensions=3), "VARCHAR(30)[][][]" ) def test_array_type_render_str_collate_multidim(self): self.assert_compile( postgresql.ARRAY(Unicode(30, collation="en_US"), dimensions=2), 'VARCHAR(30)[][] COLLATE "en_US"', ) self.assert_compile( postgresql.ARRAY(Unicode(30, collation="en_US"), dimensions=3), 'VARCHAR(30)[][][] COLLATE "en_US"', ) def test_array_int_index(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col[3]), "SELECT x[%(x_1)s] AS anon_1", checkparams={"x_1": 3}, ) def test_array_any(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.any(7, operator=operators.lt)), "SELECT %(param_1)s < ANY (x) AS anon_1", checkparams={"param_1": 7}, ) def test_array_all(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.all(7, operator=operators.lt)), "SELECT %(param_1)s < ALL (x) AS anon_1", checkparams={"param_1": 7}, ) def test_array_contains(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.contains(array([4, 5, 6]))), "SELECT x @> ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] " "AS anon_1", checkparams={"param_1": 4, "param_3": 6, "param_2": 5}, ) def test_contains_override_raises(self): col = column("x", postgresql.ARRAY(Integer)) assert_raises_message( NotImplementedError, "Operator 'contains' is not supported on this expression", lambda: "foo" in col, ) def test_array_contained_by(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.contained_by(array([4, 5, 6]))), "SELECT x <@ ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] " "AS anon_1", checkparams={"param_1": 4, "param_3": 6, "param_2": 5}, ) def test_array_overlap(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.overlap(array([4, 5, 6]))), "SELECT x && ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] " "AS anon_1", checkparams={"param_1": 4, "param_3": 6, "param_2": 5}, ) def test_array_slice_index(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col[5:10]), "SELECT x[%(x_1)s:%(x_2)s] AS anon_1", checkparams={"x_2": 10, "x_1": 5}, ) def test_array_dim_index(self): col = column("x", postgresql.ARRAY(Integer, dimensions=2)) self.assert_compile( select(col[3][5]), "SELECT x[%(x_1)s][%(param_1)s] AS anon_1", checkparams={"x_1": 3, "param_1": 5}, ) def test_array_concat(self): col = column("x", postgresql.ARRAY(Integer)) literal = array([4, 5]) self.assert_compile( select(col + literal), "SELECT x || ARRAY[%(param_1)s, %(param_2)s] AS anon_1", checkparams={"param_1": 4, "param_2": 5}, ) def test_array_index_map_dimensions(self): col = column("x", postgresql.ARRAY(Integer, dimensions=3)) is_(col[5].type._type_affinity, ARRAY) assert isinstance(col[5].type, postgresql.ARRAY) eq_(col[5].type.dimensions, 2) is_(col[5][6].type._type_affinity, ARRAY) assert isinstance(col[5][6].type, postgresql.ARRAY) eq_(col[5][6].type.dimensions, 1) is_(col[5][6][7].type._type_affinity, Integer) def test_array_getitem_single_type(self): m = MetaData() arrtable = Table( "arrtable", m, Column("intarr", postgresql.ARRAY(Integer)), Column("strarr", postgresql.ARRAY(String)), ) is_(arrtable.c.intarr[1].type._type_affinity, Integer) is_(arrtable.c.strarr[1].type._type_affinity, String) def test_array_getitem_slice_type(self): m = MetaData() arrtable = Table( "arrtable", m, Column("intarr", postgresql.ARRAY(Integer)), Column("strarr", postgresql.ARRAY(String)), ) # type affinity is Array... is_(arrtable.c.intarr[1:3].type._type_affinity, ARRAY) is_(arrtable.c.strarr[1:3].type._type_affinity, ARRAY) # but the slice returns the actual type assert isinstance(arrtable.c.intarr[1:3].type, postgresql.ARRAY) assert isinstance(arrtable.c.strarr[1:3].type, postgresql.ARRAY) def test_array_functions_plus_getitem(self): """test parenthesizing of functions plus indexing, which seems to be required by PostgreSQL. """ stmt = select( func.array_cat( array([1, 2, 3]), array([4, 5, 6]), type_=postgresql.ARRAY(Integer), )[2:5] ) self.assert_compile( stmt, "SELECT (array_cat(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s], " "ARRAY[%(param_4)s, %(param_5)s, %(param_6)s]))" "[%(param_7)s:%(param_8)s] AS anon_1", ) self.assert_compile( func.array_cat( array([1, 2, 3]), array([4, 5, 6]), type_=postgresql.ARRAY(Integer), )[3], "(array_cat(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s], " "ARRAY[%(param_4)s, %(param_5)s, %(param_6)s]))[%(array_cat_1)s]", ) def test_array_agg_generic(self): expr = func.array_agg(column("q", Integer)) is_(expr.type.__class__, types.ARRAY) is_(expr.type.item_type.__class__, Integer) @testing.combinations( ("original", False, False), ("just_enum", True, False), ("just_order_by", False, True), ("issue_5989", True, True), id_="iaa", argnames="with_enum, using_aggregate_order_by", ) def test_array_agg_specific(self, with_enum, using_aggregate_order_by): from sqlalchemy.dialects.postgresql import aggregate_order_by from sqlalchemy.dialects.postgresql import array_agg from sqlalchemy.dialects.postgresql import ENUM element_type = ENUM if with_enum else Integer expr = ( array_agg( aggregate_order_by( column("q", element_type), column("idx", Integer) ) ) if using_aggregate_order_by else array_agg(column("q", element_type)) ) is_(expr.type.__class__, postgresql.ARRAY) is_(expr.type.item_type.__class__, element_type) AnEnum = pep435_enum("AnEnum") AnEnum("Foo", 1) AnEnum("Bar", 2) AnEnum("Baz", 3) class ArrayRoundTripTest(object): __only_on__ = "postgresql" __backend__ = True __unsupported_on__ = ("postgresql+pg8000",) ARRAY = postgresql.ARRAY @classmethod def define_tables(cls, metadata): class ProcValue(TypeDecorator): impl = cls.ARRAY(Integer, dimensions=2) cache_ok = True def process_bind_param(self, value, dialect): if value is None: return None return [[x + 5 for x in v] for v in value] def process_result_value(self, value, dialect): if value is None: return None return [[x - 7 for x in v] for v in value] Table( "arrtable", metadata, Column("id", Integer, primary_key=True), Column("intarr", cls.ARRAY(Integer)), Column("strarr", cls.ARRAY(Unicode())), Column("dimarr", ProcValue), ) Table( "dim_arrtable", metadata, Column("id", Integer, primary_key=True), Column("intarr", cls.ARRAY(Integer, dimensions=1)), Column("strarr", cls.ARRAY(Unicode(), dimensions=1)), Column("dimarr", ProcValue), ) def _fixture_456(self, table, connection): connection.execute(table.insert(), dict(intarr=[4, 5, 6])) def test_reflect_array_column(self, connection): metadata2 = MetaData() tbl = Table("arrtable", metadata2, autoload_with=connection) assert isinstance(tbl.c.intarr.type, self.ARRAY) assert isinstance(tbl.c.strarr.type, self.ARRAY) assert isinstance(tbl.c.intarr.type.item_type, Integer) assert isinstance(tbl.c.strarr.type.item_type, String) def test_array_str_collation(self, metadata, connection): t = Table( "t", metadata, Column("data", sqltypes.ARRAY(String(50, collation="en_US"))), ) t.create(connection) def test_array_agg(self, metadata, connection): values_table = Table("values", metadata, Column("value", Integer)) metadata.create_all(connection) connection.execute( values_table.insert(), [{"value": i} for i in range(1, 10)] ) stmt = select(func.array_agg(values_table.c.value)) eq_(connection.execute(stmt).scalar(), list(range(1, 10))) stmt = select(func.array_agg(values_table.c.value)[3]) eq_(connection.execute(stmt).scalar(), 3) stmt = select(func.array_agg(values_table.c.value)[2:4]) eq_(connection.execute(stmt).scalar(), [2, 3, 4]) def test_array_index_slice_exprs(self, connection): """test a variety of expressions that sometimes need parenthesizing""" stmt = select(array([1, 2, 3, 4])[2:3]) eq_(connection.execute(stmt).scalar(), [2, 3]) stmt = select(array([1, 2, 3, 4])[2]) eq_(connection.execute(stmt).scalar(), 2) stmt = select((array([1, 2]) + array([3, 4]))[2:3]) eq_(connection.execute(stmt).scalar(), [2, 3]) stmt = select(array([1, 2]) + array([3, 4])[2:3]) eq_(connection.execute(stmt).scalar(), [1, 2, 4]) stmt = select(array([1, 2])[2:3] + array([3, 4])) eq_(connection.execute(stmt).scalar(), [2, 3, 4]) stmt = select( func.array_cat( array([1, 2, 3]), array([4, 5, 6]), type_=self.ARRAY(Integer), )[2:5] ) eq_(connection.execute(stmt).scalar(), [2, 3, 4, 5]) def test_any_all_exprs_array(self, connection): stmt = select( 3 == any_( func.array_cat( array([1, 2, 3]), array([4, 5, 6]), type_=self.ARRAY(Integer), ) ) ) eq_(connection.execute(stmt).scalar(), True) def test_insert_array(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict( intarr=[1, 2, 3], strarr=[util.u("abc"), util.u("def")], ), ) results = connection.execute(arrtable.select()).fetchall() eq_(len(results), 1) eq_(results[0].intarr, [1, 2, 3]) eq_(results[0].strarr, [util.u("abc"), util.u("def")]) def test_insert_array_w_null(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict( intarr=[1, None, 3], strarr=[util.u("abc"), None], ), ) results = connection.execute(arrtable.select()).fetchall() eq_(len(results), 1) eq_(results[0].intarr, [1, None, 3]) eq_(results[0].strarr, [util.u("abc"), None]) def test_array_where(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict( intarr=[1, 2, 3], strarr=[util.u("abc"), util.u("def")], ), ) connection.execute( arrtable.insert(), dict(intarr=[4, 5, 6], strarr=util.u("ABC")) ) results = connection.execute( arrtable.select().where(arrtable.c.intarr == [1, 2, 3]) ).fetchall() eq_(len(results), 1) eq_(results[0].intarr, [1, 2, 3]) def test_array_concat(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict(intarr=[1, 2, 3], strarr=[util.u("abc"), util.u("def")]), ) results = connection.execute( select(arrtable.c.intarr + [4, 5, 6]) ).fetchall() eq_(len(results), 1) eq_(results[0][0], [1, 2, 3, 4, 5, 6]) def test_array_comparison(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict( id=5, intarr=[1, 2, 3], strarr=[util.u("abc"), util.u("def")] ), ) results = connection.execute( select(arrtable.c.id).where(arrtable.c.intarr < [4, 5, 6]) ).fetchall() eq_(len(results), 1) eq_(results[0][0], 5) def test_array_subtype_resultprocessor(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict( intarr=[4, 5, 6], strarr=[[util.ue("m\xe4\xe4")], [util.ue("m\xf6\xf6")]], ), ) connection.execute( arrtable.insert(), dict( intarr=[1, 2, 3], strarr=[util.ue("m\xe4\xe4"), util.ue("m\xf6\xf6")], ), ) results = connection.execute( arrtable.select().order_by(arrtable.c.intarr) ).fetchall() eq_(len(results), 2) eq_(results[0].strarr, [util.ue("m\xe4\xe4"), util.ue("m\xf6\xf6")]) eq_( results[1].strarr, [[util.ue("m\xe4\xe4")], [util.ue("m\xf6\xf6")]], ) def test_array_literal_roundtrip(self, connection): eq_( connection.scalar( select(postgresql.array([1, 2]) + postgresql.array([3, 4, 5])) ), [1, 2, 3, 4, 5], ) eq_( connection.scalar( select( (postgresql.array([1, 2]) + postgresql.array([3, 4, 5]))[3] ) ), 3, ) eq_( connection.scalar( select( (postgresql.array([1, 2]) + postgresql.array([3, 4, 5]))[ 2:4 ] ) ), [2, 3, 4], ) def test_array_literal_multidimensional_roundtrip(self, connection): eq_( connection.scalar( select( postgresql.array( [postgresql.array([1, 2]), postgresql.array([3, 4])] ) ) ), [[1, 2], [3, 4]], ) eq_( connection.scalar( select( postgresql.array( [postgresql.array([1, 2]), postgresql.array([3, 4])] )[2][1] ) ), 3, ) def test_array_literal_compare(self, connection): eq_( connection.scalar(select(postgresql.array([1, 2]) < [3, 4, 5])), True, ) def test_array_getitem_single_exec(self, connection): arrtable = self.tables.arrtable self._fixture_456(arrtable, connection) eq_(connection.scalar(select(arrtable.c.intarr[2])), 5) connection.execute(arrtable.update().values({arrtable.c.intarr[2]: 7})) eq_(connection.scalar(select(arrtable.c.intarr[2])), 7) def test_array_getitem_slice_exec(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict( intarr=[4, 5, 6], strarr=[util.u("abc"), util.u("def")], ), ) eq_(connection.scalar(select(arrtable.c.intarr[2:3])), [5, 6]) connection.execute( arrtable.update().values({arrtable.c.intarr[2:3]: [7, 8]}) ) eq_(connection.scalar(select(arrtable.c.intarr[2:3])), [7, 8]) def test_multi_dim_roundtrip(self, connection): arrtable = self.tables.arrtable connection.execute( arrtable.insert(), dict(dimarr=[[1, 2, 3], [4, 5, 6]]) ) eq_( connection.scalar(select(arrtable.c.dimarr)), [[-1, 0, 1], [2, 3, 4]], ) def test_array_any_exec(self, connection): arrtable = self.tables.arrtable connection.execute(arrtable.insert(), dict(intarr=[4, 5, 6])) eq_( connection.scalar( select(arrtable.c.intarr).where( postgresql.Any(5, arrtable.c.intarr) ) ), [4, 5, 6], ) def test_array_all_exec(self, connection): arrtable = self.tables.arrtable connection.execute(arrtable.insert(), dict(intarr=[4, 5, 6])) eq_( connection.scalar( select(arrtable.c.intarr).where( arrtable.c.intarr.all(4, operator=operators.le) ) ), [4, 5, 6], ) def test_tuple_flag(self, connection, metadata): t1 = Table( "t1", metadata, Column("id", Integer, primary_key=True), Column("data", self.ARRAY(String(5), as_tuple=True)), Column( "data2", self.ARRAY(Numeric(asdecimal=False), as_tuple=True) ), ) metadata.create_all(connection) connection.execute( t1.insert(), dict(id=1, data=["1", "2", "3"], data2=[5.4, 5.6]) ) connection.execute( t1.insert(), dict(id=2, data=["4", "5", "6"], data2=[1.0]) ) connection.execute( t1.insert(), dict( id=3, data=[["4", "5"], ["6", "7"]], data2=[[5.4, 5.6], [1.0, 1.1]], ), ) r = connection.execute(t1.select().order_by(t1.c.id)).fetchall() eq_( r, [ (1, ("1", "2", "3"), (5.4, 5.6)), (2, ("4", "5", "6"), (1.0,)), (3, (("4", "5"), ("6", "7")), ((5.4, 5.6), (1.0, 1.1))), ], ) # hashable eq_( set(row[1] for row in r), set([("1", "2", "3"), ("4", "5", "6"), (("4", "5"), ("6", "7"))]), ) def test_array_plus_native_enum_create(self, metadata, connection): t = Table( "t", metadata, Column( "data_1", self.ARRAY(postgresql.ENUM("a", "b", "c", name="my_enum_1")), ), Column( "data_2", self.ARRAY(types.Enum("a", "b", "c", name="my_enum_2")), ), Column( "data_3", self.ARRAY( types.Enum("a", "b", "c", name="my_enum_3") ).with_variant(String(), "other"), ), ) t.create(connection) eq_( set(e["name"] for e in inspect(connection).get_enums()), set(["my_enum_1", "my_enum_2", "my_enum_3"]), ) t.drop(connection) eq_(inspect(connection).get_enums(), []) def _type_combinations(exclude_json=False): def str_values(x): return ["one", "two: %s" % x, "three", "four", "five"] def unicode_values(x): return [ util.u("réveillé"), util.u("drôle"), util.u("S’il %s" % x), util.u("🐍 %s" % x), util.u("« S’il vous"), ] def json_values(x): return [ 1, {"a": x}, {"b": [1, 2, 3]}, ["d", "e", "f"], {"struct": True, "none": None}, ] def binary_values(x): return [v.encode("utf-8") for v in unicode_values(x)] def enum_values(x): return [ AnEnum.Foo, AnEnum.Baz, AnEnum.get(x), AnEnum.Baz, AnEnum.Foo, ] class inet_str(str): def __eq__(self, other): return str(self) == str(other) def __ne__(self, other): return str(self) != str(other) class money_str(str): def __eq__(self, other): comp = re.sub(r"[^\d\.]", "", other) return float(self) == float(comp) def __ne__(self, other): return not self.__eq__(other) elements = [ (sqltypes.Integer, lambda x: [1, x, 3, 4, 5]), (sqltypes.Text, str_values), (sqltypes.String, str_values), (sqltypes.Unicode, unicode_values), (postgresql.JSONB, json_values), (sqltypes.Boolean, lambda x: [False] + [True] * x), ( sqltypes.LargeBinary, binary_values, ), ( postgresql.BYTEA, binary_values, ), ( postgresql.INET, lambda x: [ inet_str("1.1.1.1"), inet_str("{0}.{0}.{0}.{0}".format(x)), inet_str("192.168.1.1"), inet_str("10.1.2.25"), inet_str("192.168.22.5"), ], ), ( postgresql.CIDR, lambda x: [ inet_str("10.0.0.0/8"), inet_str("%s.0.0.0/8" % x), inet_str("192.168.1.0/24"), inet_str("192.168.0.0/16"), inet_str("192.168.1.25/32"), ], ), ( sqltypes.Date, lambda x: [ datetime.date(2020, 5, x), datetime.date(2020, 7, 12), datetime.date(2018, 12, 15), datetime.date(2009, 1, 5), datetime.date(2021, 3, 18), ], ), ( sqltypes.DateTime, lambda x: [ datetime.datetime(2020, 5, x, 2, 15, 0), datetime.datetime(2020, 7, 12, 15, 30, x), datetime.datetime(2018, 12, 15, 3, x, 25), datetime.datetime(2009, 1, 5, 12, 45, x), datetime.datetime(2021, 3, 18, 17, 1, 0), ], ), ( sqltypes.Numeric, lambda x: [ decimal.Decimal("45.10"), decimal.Decimal(x), decimal.Decimal(".03242"), decimal.Decimal("532.3532"), decimal.Decimal("95503.23"), ], ), ( postgresql.MONEY, lambda x: [ money_str("2"), money_str("%s" % (5 + x)), money_str("50.25"), money_str("18.99"), money_str("15.%s" % x), ], testing.skip_if( "postgresql+psycopg2", "this is a psycopg2 bug" ), ), ( postgresql.HSTORE, lambda x: [ {"a": "1"}, {"b": "%s" % x}, {"c": "3"}, {"c": "c2"}, {"d": "e"}, ], testing.requires.hstore, ), (postgresql.ENUM(AnEnum), enum_values), (sqltypes.Enum(AnEnum, native_enum=True), enum_values), (sqltypes.Enum(AnEnum, native_enum=False), enum_values), ] if not exclude_json: elements.extend( [ (sqltypes.JSON, json_values), (postgresql.JSON, json_values), ] ) return testing.combinations_list( elements, argnames="type_,gen", id_="na" ) @classmethod def _cls_type_combinations(cls, **kw): return ArrayRoundTripTest.__dict__["_type_combinations"](**kw) @testing.fixture(params=[True, False]) def type_specific_fixture(self, request, metadata, connection, type_): use_variant = request.param meta = MetaData() if use_variant: typ = self.ARRAY(type_).with_variant(String(), "other") else: typ = self.ARRAY(type_) table = Table( "foo", meta, Column("id", Integer), Column("bar", typ), ) meta.create_all(connection) def go(gen): connection.execute( table.insert(), [{"id": 1, "bar": gen(1)}, {"id": 2, "bar": gen(2)}], ) return table return go @_type_combinations() def test_type_specific_value_select( self, type_specific_fixture, connection, type_, gen ): table = type_specific_fixture(gen) rows = connection.execute( select(table.c.bar).order_by(table.c.id) ).all() eq_(rows, [(gen(1),), (gen(2),)]) @_type_combinations() def test_type_specific_value_update( self, type_specific_fixture, connection, type_, gen ): table = type_specific_fixture(gen) new_gen = gen(3) connection.execute( table.update().where(table.c.id == 2).values(bar=new_gen) ) eq_( new_gen, connection.scalar(select(table.c.bar).where(table.c.id == 2)), ) @_type_combinations() def test_type_specific_slice_update( self, type_specific_fixture, connection, type_, gen ): table = type_specific_fixture(gen) new_gen = gen(3) if isinstance(table.c.bar.type, Variant): # this is not likely to occur to users but we need to just # exercise this as far as we can expr = type_coerce(table.c.bar, ARRAY(type_))[1:3] else: expr = table.c.bar[1:3] connection.execute( table.update().where(table.c.id == 2).values({expr: new_gen[1:4]}) ) rows = connection.execute( select(table.c.bar).order_by(table.c.id) ).all() sliced_gen = gen(2) sliced_gen[0:3] = new_gen[1:4] eq_(rows, [(gen(1),), (sliced_gen,)]) @_type_combinations(exclude_json=True) def test_type_specific_value_delete( self, type_specific_fixture, connection, type_, gen ): table = type_specific_fixture(gen) new_gen = gen(2) connection.execute(table.delete().where(table.c.bar == new_gen)) eq_(connection.scalar(select(func.count(table.c.id))), 1) class CoreArrayRoundTripTest( ArrayRoundTripTest, fixtures.TablesTest, AssertsExecutionResults ): ARRAY = sqltypes.ARRAY class PGArrayRoundTripTest( ArrayRoundTripTest, fixtures.TablesTest, AssertsExecutionResults ): ARRAY = postgresql.ARRAY @ArrayRoundTripTest._cls_type_combinations(exclude_json=True) def test_type_specific_contains( self, type_specific_fixture, connection, type_, gen ): table = type_specific_fixture(gen) connection.execute( table.insert(), [{"id": 1, "bar": gen(1)}, {"id": 2, "bar": gen(2)}], ) id_, value = connection.execute( select(table).where(table.c.bar.contains(gen(1))) ).first() eq_(id_, 1) eq_(value, gen(1)) @testing.combinations( (set,), (list,), (lambda elem: (x for x in elem),), argnames="struct" ) def test_undim_array_contains_typed_exec(self, struct, connection): arrtable = self.tables.arrtable self._fixture_456(arrtable, connection) eq_( connection.scalar( select(arrtable.c.intarr).where( arrtable.c.intarr.contains(struct([4, 5])) ) ), [4, 5, 6], ) @testing.combinations( (set,), (list,), (lambda elem: (x for x in elem),), argnames="struct" ) def test_dim_array_contains_typed_exec(self, struct, connection): dim_arrtable = self.tables.dim_arrtable self._fixture_456(dim_arrtable, connection) eq_( connection.scalar( select(dim_arrtable.c.intarr).where( dim_arrtable.c.intarr.contains(struct([4, 5])) ) ), [4, 5, 6], ) def test_array_contained_by_exec(self, connection): arrtable = self.tables.arrtable connection.execute(arrtable.insert(), dict(intarr=[6, 5, 4])) eq_( connection.scalar( select(arrtable.c.intarr.contained_by([4, 5, 6, 7])) ), True, ) def test_undim_array_empty(self, connection): arrtable = self.tables.arrtable self._fixture_456(arrtable, connection) eq_( connection.scalar( select(arrtable.c.intarr).where(arrtable.c.intarr.contains([])) ), [4, 5, 6], ) def test_array_overlap_exec(self, connection): arrtable = self.tables.arrtable connection.execute(arrtable.insert(), dict(intarr=[4, 5, 6])) eq_( connection.scalar( select(arrtable.c.intarr).where( arrtable.c.intarr.overlap([7, 6]) ) ), [4, 5, 6], ) class _ArrayOfEnum(TypeDecorator): # previous workaround for array of enum impl = postgresql.ARRAY cache_ok = True def bind_expression(self, bindvalue): return sa.cast(bindvalue, self) def result_processor(self, dialect, coltype): super_rp = super(_ArrayOfEnum, self).result_processor(dialect, coltype) def handle_raw_string(value): inner = re.match(r"^{(.*)}$", value).group(1) return inner.split(",") if inner else [] def process(value): if value is None: return None return super_rp(handle_raw_string(value)) return process class ArrayEnum(fixtures.TestBase): __backend__ = True __only_on__ = "postgresql" __unsupported_on__ = ("postgresql+pg8000",) @testing.combinations( sqltypes.ARRAY, postgresql.ARRAY, argnames="array_cls" ) def test_raises_non_native_enums(self, metadata, connection, array_cls): enum_cls = sqltypes.Enum Table( "my_table", self.metadata, Column( "my_col", array_cls( enum_cls( "foo", "bar", "baz", name="my_enum", create_constraint=True, native_enum=False, ) ), ), ) testing.assert_raises_message( CompileError, "PostgreSQL dialect cannot produce the CHECK constraint " "for ARRAY of non-native ENUM; please specify " "create_constraint=False on this Enum datatype.", self.metadata.create_all, connection, ) @testing.combinations(sqltypes.Enum, postgresql.ENUM, argnames="enum_cls") @testing.combinations( sqltypes.ARRAY, postgresql.ARRAY, (_ArrayOfEnum, testing.only_on("postgresql+psycopg2")), argnames="array_cls", ) def test_array_of_enums(self, array_cls, enum_cls, metadata, connection): tbl = Table( "enum_table", self.metadata, Column("id", Integer, primary_key=True), Column( "enum_col", array_cls(enum_cls("foo", "bar", "baz", name="an_enum")), ), ) if util.py3k: from enum import Enum class MyEnum(Enum): a = "aaa" b = "bbb" c = "ccc" tbl.append_column( Column( "pyenum_col", array_cls(enum_cls(MyEnum)), ), ) self.metadata.create_all(connection) connection.execute( tbl.insert(), [{"enum_col": ["foo"]}, {"enum_col": ["foo", "bar"]}] ) sel = select(tbl.c.enum_col).order_by(tbl.c.id) eq_( connection.execute(sel).fetchall(), [(["foo"],), (["foo", "bar"],)] ) if util.py3k: connection.execute(tbl.insert(), {"pyenum_col": [MyEnum.a]}) sel = select(tbl.c.pyenum_col).order_by(tbl.c.id.desc()) eq_(connection.scalar(sel), [MyEnum.a]) self.metadata.drop_all(connection) class ArrayJSON(fixtures.TestBase): __backend__ = True __only_on__ = "postgresql" __unsupported_on__ = ("postgresql+pg8000",) @testing.combinations( sqltypes.ARRAY, postgresql.ARRAY, argnames="array_cls" ) @testing.combinations( sqltypes.JSON, postgresql.JSON, postgresql.JSONB, argnames="json_cls" ) def test_array_of_json(self, array_cls, json_cls, metadata, connection): tbl = Table( "json_table", self.metadata, Column("id", Integer, primary_key=True), Column( "json_col", array_cls(json_cls), ), ) self.metadata.create_all(connection) connection.execute( tbl.insert(), [ {"id": 1, "json_col": ["foo"]}, {"id": 2, "json_col": [{"foo": "bar"}, [1]]}, {"id": 3, "json_col": [None]}, {"id": 4, "json_col": [42]}, {"id": 5, "json_col": [True]}, {"id": 6, "json_col": None}, ], ) sel = select(tbl.c.json_col).order_by(tbl.c.id) eq_( connection.execute(sel).fetchall(), [ (["foo"],), ([{"foo": "bar"}, [1]],), ([None],), ([42],), ([True],), (None,), ], ) eq_( connection.exec_driver_sql( """select json_col::text = array['"foo"']::json[]::text""" " from json_table where id = 1" ).scalar(), True, ) eq_( connection.exec_driver_sql( "select json_col::text = " """array['{"foo": "bar"}', '[1]']::json[]::text""" " from json_table where id = 2" ).scalar(), True, ) eq_( connection.exec_driver_sql( """select json_col::text = array['null']::json[]::text""" " from json_table where id = 3" ).scalar(), True, ) eq_( connection.exec_driver_sql( """select json_col::text = array['42']::json[]::text""" " from json_table where id = 4" ).scalar(), True, ) eq_( connection.exec_driver_sql( """select json_col::text = array['true']::json[]::text""" " from json_table where id = 5" ).scalar(), True, ) eq_( connection.exec_driver_sql( "select json_col is null from json_table where id = 6" ).scalar(), True, ) class HashableFlagORMTest(fixtures.TestBase): """test the various 'collection' types that they flip the 'hashable' flag appropriately. [ticket:3499]""" __only_on__ = "postgresql" @testing.combinations( ( "ARRAY", postgresql.ARRAY(Text()), [["a", "b", "c"], ["d", "e", "f"]], ), ( "JSON", postgresql.JSON(), [ {"a": "1", "b": "2", "c": "3"}, { "d": "4", "e": {"e1": "5", "e2": "6"}, "f": {"f1": [9, 10, 11]}, }, ], ), ( "HSTORE", postgresql.HSTORE(), [{"a": "1", "b": "2", "c": "3"}, {"d": "4", "e": "5", "f": "6"}], testing.requires.hstore, ), ( "JSONB", postgresql.JSONB(), [ {"a": "1", "b": "2", "c": "3"}, { "d": "4", "e": {"e1": "5", "e2": "6"}, "f": {"f1": [9, 10, 11]}, }, ], testing.requires.postgresql_jsonb, ), argnames="type_,data", id_="iaa", ) def test_hashable_flag(self, metadata, connection, type_, data): Base = declarative_base(metadata=metadata) class A(Base): __tablename__ = "a1" id = Column(Integer, primary_key=True) data = Column(type_) Base.metadata.create_all(connection) s = Session(connection) s.add_all([A(data=elem) for elem in data]) s.commit() eq_( [ (obj.A.id, obj.data) for obj in s.query(A, A.data).order_by(A.id) ], list(enumerate(data, 1)), ) class TimestampTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = "postgresql" __backend__ = True def test_timestamp(self, connection): s = select(text("timestamp '2007-12-25'")) result = connection.execute(s).first() eq_(result[0], datetime.datetime(2007, 12, 25, 0, 0)) def test_interval_arithmetic(self, connection): # basically testing that we get timedelta back for an INTERVAL # result. more of a driver assertion. s = select(text("timestamp '2007-12-25' - timestamp '2007-11-15'")) result = connection.execute(s).first() eq_(result[0], datetime.timedelta(40)) def test_interval_coercion(self): expr = column("bar", postgresql.INTERVAL) + column("foo", types.Date) eq_(expr.type._type_affinity, types.DateTime) expr = column("bar", postgresql.INTERVAL) * column( "foo", types.Numeric ) eq_(expr.type._type_affinity, types.Interval) assert isinstance(expr.type, postgresql.INTERVAL) def test_interval_coercion_literal(self): expr = column("bar", postgresql.INTERVAL) == datetime.timedelta(days=1) eq_(expr.right.type._type_affinity, types.Interval) class SpecialTypesCompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "postgresql" @testing.combinations( (postgresql.BIT(), "BIT(1)"), (postgresql.BIT(5), "BIT(5)"), (postgresql.BIT(varying=True), "BIT VARYING"), (postgresql.BIT(5, varying=True), "BIT VARYING(5)"), ) def test_bit_compile(self, type_, expected): self.assert_compile(type_, expected) class SpecialTypesTest(fixtures.TablesTest, ComparesTables): """test DDL and reflection of PG-specific types""" __only_on__ = ("postgresql >= 8.3.0",) __backend__ = True @testing.metadata_fixture() def special_types_table(self, metadata): # create these types so that we can issue # special SQL92 INTERVAL syntax class y2m(types.UserDefinedType, postgresql.INTERVAL): def get_col_spec(self): return "INTERVAL YEAR TO MONTH" class d2s(types.UserDefinedType, postgresql.INTERVAL): def get_col_spec(self): return "INTERVAL DAY TO SECOND" table = Table( "sometable", metadata, Column("id", postgresql.UUID, primary_key=True), Column("flag", postgresql.BIT), Column("bitstring", postgresql.BIT(4)), Column("addr", postgresql.INET), Column("addr2", postgresql.MACADDR), Column("price", postgresql.MONEY), Column("addr3", postgresql.CIDR), Column("doubleprec", postgresql.DOUBLE_PRECISION), Column("plain_interval", postgresql.INTERVAL), Column("year_interval", y2m()), Column("month_interval", d2s()), Column("precision_interval", postgresql.INTERVAL(precision=3)), Column("tsvector_document", postgresql.TSVECTOR), ) return table def test_reflection(self, special_types_table, connection): # cheat so that the "strict type check" # works special_types_table.c.year_interval.type = postgresql.INTERVAL() special_types_table.c.month_interval.type = postgresql.INTERVAL() m = MetaData() t = Table("sometable", m, autoload_with=connection) self.assert_tables_equal(special_types_table, t, strict_types=True) assert t.c.plain_interval.type.precision is None assert t.c.precision_interval.type.precision == 3 assert t.c.bitstring.type.length == 4 def test_tsvector_round_trip(self, connection, metadata): t = Table("t1", metadata, Column("data", postgresql.TSVECTOR)) t.create(connection) connection.execute(t.insert(), dict(data="a fat cat sat")) eq_(connection.scalar(select(t.c.data)), "'a' 'cat' 'fat' 'sat'") connection.execute( t.update(), dict(data="'a' 'cat' 'fat' 'mat' 'sat'") ) eq_( connection.scalar(select(t.c.data)), "'a' 'cat' 'fat' 'mat' 'sat'", ) def test_bit_reflection(self, metadata, connection): t1 = Table( "t1", metadata, Column("bit1", postgresql.BIT()), Column("bit5", postgresql.BIT(5)), Column("bitvarying", postgresql.BIT(varying=True)), Column("bitvarying5", postgresql.BIT(5, varying=True)), ) t1.create(connection) m2 = MetaData() t2 = Table("t1", m2, autoload_with=connection) eq_(t2.c.bit1.type.length, 1) eq_(t2.c.bit1.type.varying, False) eq_(t2.c.bit5.type.length, 5) eq_(t2.c.bit5.type.varying, False) eq_(t2.c.bitvarying.type.length, None) eq_(t2.c.bitvarying.type.varying, True) eq_(t2.c.bitvarying5.type.length, 5) eq_(t2.c.bitvarying5.type.varying, True) class UUIDTest(fixtures.TestBase): """Test the bind/return values of the UUID type.""" __only_on__ = "postgresql >= 8.3" __backend__ = True @testing.combinations( ( "not_as_uuid", postgresql.UUID(as_uuid=False), str(uuid.uuid4()), str(uuid.uuid4()), ), ("as_uuid", postgresql.UUID(as_uuid=True), uuid.uuid4(), uuid.uuid4()), id_="iaaa", argnames="datatype, value1, value2", ) def test_round_trip(self, datatype, value1, value2, connection): utable = Table("utable", MetaData(), Column("data", datatype)) utable.create(connection) connection.execute(utable.insert(), {"data": value1}) connection.execute(utable.insert(), {"data": value2}) r = connection.execute( select(utable.c.data).where(utable.c.data != value1) ) eq_(r.fetchone()[0], value2) eq_(r.fetchone(), None) @testing.combinations( ( "as_uuid", postgresql.ARRAY(postgresql.UUID(as_uuid=True)), [uuid.uuid4(), uuid.uuid4()], [uuid.uuid4(), uuid.uuid4()], ), ( "not_as_uuid", postgresql.ARRAY(postgresql.UUID(as_uuid=False)), [str(uuid.uuid4()), str(uuid.uuid4())], [str(uuid.uuid4()), str(uuid.uuid4())], ), id_="iaaa", argnames="datatype, value1, value2", ) # passes pg8000 as of 1.19.1 def test_uuid_array(self, datatype, value1, value2, connection): self.test_round_trip(datatype, value1, value2, connection) class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "postgresql" def setup_test(self): metadata = MetaData() self.test_table = Table( "test_table", metadata, Column("id", Integer, primary_key=True), Column("hash", HSTORE), ) self.hashcol = self.test_table.c.hash def _test_where(self, whereclause, expected): stmt = select(self.test_table).where(whereclause) self.assert_compile( stmt, "SELECT test_table.id, test_table.hash FROM test_table " "WHERE %s" % expected, ) def test_bind_serialize_default(self): dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_( proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])), '"key1"=>"value1", "key2"=>"value2"', ) def test_bind_serialize_with_slashes_and_quotes(self): dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_(proc({'\\"a': '\\"1'}), '"\\\\\\"a"=>"\\\\\\"1"') def test_parse_error(self): dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) assert_raises_message( ValueError, r"""After u?'\[\.\.\.\], "key1"=>"value1", ', could not parse """ r"""residual at position 36: u?'crapcrapcrap, "key3"\[\.\.\.\]""", proc, '"key2"=>"value2", "key1"=>"value1", ' 'crapcrapcrap, "key3"=>"value3"', ) def test_result_deserialize_default(self): dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) eq_( proc('"key2"=>"value2", "key1"=>"value1"'), {"key1": "value1", "key2": "value2"}, ) def test_result_deserialize_with_slashes_and_quotes(self): dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) eq_(proc('"\\\\\\"a"=>"\\\\\\"1"'), {'\\"a': '\\"1'}) def test_bind_serialize_psycopg2(self): from sqlalchemy.dialects.postgresql import psycopg2 dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = True proc = self.test_table.c.hash.type._cached_bind_processor(dialect) is_(proc, None) dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = False proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_( proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])), '"key1"=>"value1", "key2"=>"value2"', ) def test_result_deserialize_psycopg2(self): from sqlalchemy.dialects.postgresql import psycopg2 dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = True proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) is_(proc, None) dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = False proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) eq_( proc('"key2"=>"value2", "key1"=>"value1"'), {"key1": "value1", "key2": "value2"}, ) def test_ret_type_text(self): col = column("x", HSTORE()) is_(col["foo"].type.__class__, Text) def test_ret_type_custom(self): class MyType(types.UserDefinedType): pass col = column("x", HSTORE(text_type=MyType)) is_(col["foo"].type.__class__, MyType) def test_where_has_key(self): self._test_where( # hide from 2to3 getattr(self.hashcol, "has_key")("foo"), "test_table.hash ? %(hash_1)s", ) def test_where_has_all(self): self._test_where( self.hashcol.has_all(postgresql.array(["1", "2"])), "test_table.hash ?& ARRAY[%(param_1)s, %(param_2)s]", ) def test_where_has_any(self): self._test_where( self.hashcol.has_any(postgresql.array(["1", "2"])), "test_table.hash ?| ARRAY[%(param_1)s, %(param_2)s]", ) def test_where_defined(self): self._test_where( self.hashcol.defined("foo"), "defined(test_table.hash, %(defined_1)s)", ) def test_where_contains(self): self._test_where( self.hashcol.contains({"foo": "1"}), "test_table.hash @> %(hash_1)s", ) def test_where_contained_by(self): self._test_where( self.hashcol.contained_by({"foo": "1", "bar": None}), "test_table.hash <@ %(hash_1)s", ) def test_where_getitem(self): self._test_where( self.hashcol["bar"] == None, # noqa "(test_table.hash -> %(hash_1)s) IS NULL", ) @testing.combinations( ( lambda self: self.hashcol["foo"], "test_table.hash -> %(hash_1)s AS anon_1", True, ), ( lambda self: self.hashcol.delete("foo"), "delete(test_table.hash, %(delete_2)s) AS delete_1", True, ), ( lambda self: self.hashcol.delete(postgresql.array(["foo", "bar"])), ( "delete(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) " "AS delete_1" ), True, ), ( lambda self: self.hashcol.delete(hstore("1", "2")), ( "delete(test_table.hash, hstore(%(hstore_1)s, %(hstore_2)s)) " "AS delete_1" ), True, ), ( lambda self: self.hashcol.slice(postgresql.array(["1", "2"])), ( "slice(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) " "AS slice_1" ), True, ), ( lambda self: hstore("foo", "3")["foo"], "hstore(%(hstore_1)s, %(hstore_2)s) -> %(hstore_3)s AS anon_1", False, ), ( lambda self: hstore( postgresql.array(["1", "2"]), postgresql.array(["3", None]) )["1"], ( "hstore(ARRAY[%(param_1)s, %(param_2)s], " "ARRAY[%(param_3)s, NULL]) -> %(hstore_1)s AS anon_1" ), False, ), ( lambda self: hstore(postgresql.array(["1", "2", "3", None]))["3"], ( "hstore(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s, NULL]) " "-> %(hstore_1)s AS anon_1" ), False, ), ( lambda self: self.hashcol.concat( hstore(cast(self.test_table.c.id, Text), "3") ), ( "test_table.hash || hstore(CAST(test_table.id AS TEXT), " "%(hstore_1)s) AS anon_1" ), True, ), ( lambda self: hstore("foo", "bar") + self.hashcol, "hstore(%(hstore_1)s, %(hstore_2)s) || test_table.hash AS anon_1", True, ), ( lambda self: (self.hashcol + self.hashcol)["foo"], "(test_table.hash || test_table.hash) -> %(param_1)s AS anon_1", True, ), ( lambda self: self.hashcol["foo"] != None, # noqa "(test_table.hash -> %(hash_1)s) IS NOT NULL AS anon_1", True, ), ( # hide from 2to3 lambda self: getattr(self.hashcol, "keys")(), "akeys(test_table.hash) AS akeys_1", True, ), ( lambda self: self.hashcol.vals(), "avals(test_table.hash) AS avals_1", True, ), ( lambda self: self.hashcol.array(), "hstore_to_array(test_table.hash) AS hstore_to_array_1", True, ), ( lambda self: self.hashcol.matrix(), "hstore_to_matrix(test_table.hash) AS hstore_to_matrix_1", True, ), ) def test_cols(self, colclause_fn, expected, from_): colclause = colclause_fn(self) stmt = select(colclause) self.assert_compile( stmt, ("SELECT %s" + (" FROM test_table" if from_ else "")) % expected, ) class HStoreRoundTripTest(fixtures.TablesTest): __requires__ = ("hstore",) __dialect__ = "postgresql" __backend__ = True @classmethod def define_tables(cls, metadata): Table( "data_table", metadata, Column("id", Integer, primary_key=True), Column("name", String(30), nullable=False), Column("data", HSTORE), ) def _fixture_data(self, connection): data_table = self.tables.data_table connection.execute( data_table.insert(), [ {"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}}, {"name": "r2", "data": {"k1": "r2v1", "k2": "r2v2"}}, {"name": "r3", "data": {"k1": "r3v1", "k2": "r3v2"}}, {"name": "r4", "data": {"k1": "r4v1", "k2": "r4v2"}}, {"name": "r5", "data": {"k1": "r5v1", "k2": "r5v2"}}, ], ) def _assert_data(self, compare, conn): data = conn.execute( select(self.tables.data_table.c.data).order_by( self.tables.data_table.c.name ) ).fetchall() eq_([d for d, in data], compare) def _test_insert(self, connection): connection.execute( self.tables.data_table.insert(), {"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}}, ) self._assert_data([{"k1": "r1v1", "k2": "r1v2"}], connection) @testing.fixture def non_native_hstore_connection(self, testing_engine): local_engine = testing.requires.psycopg2_native_hstore.enabled if local_engine: engine = testing_engine(options=dict(use_native_hstore=False)) else: engine = testing.db conn = engine.connect() trans = conn.begin() yield conn try: trans.rollback() finally: conn.close() def test_reflect(self, connection): insp = inspect(connection) cols = insp.get_columns("data_table") assert isinstance(cols[2]["type"], HSTORE) def test_literal_round_trip(self, connection): # in particular, this tests that the array index # operator against the function is handled by PG; with some # array functions it requires outer parenthezisation on the left and # we may not be doing that here expr = hstore( postgresql.array(["1", "2"]), postgresql.array(["3", None]) )["1"] eq_(connection.scalar(select(expr)), "3") @testing.requires.psycopg2_native_hstore def test_insert_native(self, connection): self._test_insert(connection) def test_insert_python(self, non_native_hstore_connection): self._test_insert(non_native_hstore_connection) @testing.requires.psycopg2_native_hstore def test_criterion_native(self, connection): self._fixture_data(connection) self._test_criterion(connection) def test_criterion_python(self, non_native_hstore_connection): self._fixture_data(non_native_hstore_connection) self._test_criterion(non_native_hstore_connection) def _test_criterion(self, connection): data_table = self.tables.data_table result = connection.execute( select(data_table.c.data).where(data_table.c.data["k1"] == "r3v1") ).first() eq_(result, ({"k1": "r3v1", "k2": "r3v2"},)) def _test_fixed_round_trip(self, connection): s = select( hstore( array(["key1", "key2", "key3"]), array(["value1", "value2", "value3"]), ) ) eq_( connection.scalar(s), {"key1": "value1", "key2": "value2", "key3": "value3"}, ) def test_fixed_round_trip_python(self, non_native_hstore_connection): self._test_fixed_round_trip(non_native_hstore_connection) @testing.requires.psycopg2_native_hstore def test_fixed_round_trip_native(self, connection): self._test_fixed_round_trip(connection) def _test_unicode_round_trip(self, connection): s = select( hstore( array([util.u("réveillé"), util.u("drôle"), util.u("S’il")]), array([util.u("réveillé"), util.u("drôle"), util.u("S’il")]), ) ) eq_( connection.scalar(s), { util.u("réveillé"): util.u("réveillé"), util.u("drôle"): util.u("drôle"), util.u("S’il"): util.u("S’il"), }, ) @testing.requires.psycopg2_native_hstore def test_unicode_round_trip_python(self, non_native_hstore_connection): self._test_unicode_round_trip(non_native_hstore_connection) @testing.requires.psycopg2_native_hstore def test_unicode_round_trip_native(self, connection): self._test_unicode_round_trip(connection) def test_escaped_quotes_round_trip_python( self, non_native_hstore_connection ): self._test_escaped_quotes_round_trip(non_native_hstore_connection) @testing.requires.psycopg2_native_hstore def test_escaped_quotes_round_trip_native(self, connection): self._test_escaped_quotes_round_trip(connection) def _test_escaped_quotes_round_trip(self, connection): connection.execute( self.tables.data_table.insert(), {"name": "r1", "data": {r"key \"foo\"": r'value \"bar"\ xyz'}}, ) self._assert_data([{r"key \"foo\"": r'value \"bar"\ xyz'}], connection) def test_orm_round_trip(self, registry): class Data(object): def __init__(self, name, data): self.name = name self.data = data registry.map_imperatively(Data, self.tables.data_table) with fixtures.fixture_session() as s: d = Data( name="r1", data={"key1": "value1", "key2": "value2", "key3": "value3"}, ) s.add(d) eq_(s.query(Data.data, Data).all(), [(d.data, d)]) class _RangeTypeCompilation(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "postgresql" # operator tests @classmethod def setup_test_class(cls): table = Table( "data_table", MetaData(), Column("range", cls._col_type, primary_key=True), ) cls.col = table.c.range def _test_clause(self, colclause, expected, type_): self.assert_compile(colclause, expected) is_(colclause.type._type_affinity, type_._type_affinity) def test_where_equal(self): self._test_clause( self.col == self._data_str(), "data_table.range = %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_where_not_equal(self): self._test_clause( self.col != self._data_str(), "data_table.range <> %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_where_is_null(self): self._test_clause( self.col == None, "data_table.range IS NULL", sqltypes.BOOLEANTYPE ) def test_where_is_not_null(self): self._test_clause( self.col != None, "data_table.range IS NOT NULL", sqltypes.BOOLEANTYPE, ) def test_where_less_than(self): self._test_clause( self.col < self._data_str(), "data_table.range < %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_where_greater_than(self): self._test_clause( self.col > self._data_str(), "data_table.range > %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_where_less_than_or_equal(self): self._test_clause( self.col <= self._data_str(), "data_table.range <= %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_where_greater_than_or_equal(self): self._test_clause( self.col >= self._data_str(), "data_table.range >= %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_contains(self): self._test_clause( self.col.contains(self._data_str()), "data_table.range @> %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_contained_by(self): self._test_clause( self.col.contained_by(self._data_str()), "data_table.range <@ %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_overlaps(self): self._test_clause( self.col.overlaps(self._data_str()), "data_table.range && %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_strictly_left_of(self): self._test_clause( self.col << self._data_str(), "data_table.range << %(range_1)s", sqltypes.BOOLEANTYPE, ) self._test_clause( self.col.strictly_left_of(self._data_str()), "data_table.range << %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_strictly_right_of(self): self._test_clause( self.col >> self._data_str(), "data_table.range >> %(range_1)s", sqltypes.BOOLEANTYPE, ) self._test_clause( self.col.strictly_right_of(self._data_str()), "data_table.range >> %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_not_extend_right_of(self): self._test_clause( self.col.not_extend_right_of(self._data_str()), "data_table.range &< %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_not_extend_left_of(self): self._test_clause( self.col.not_extend_left_of(self._data_str()), "data_table.range &> %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_adjacent_to(self): self._test_clause( self.col.adjacent_to(self._data_str()), "data_table.range -|- %(range_1)s", sqltypes.BOOLEANTYPE, ) def test_union(self): self._test_clause( self.col + self.col, "data_table.range + data_table.range", self.col.type, ) def test_intersection(self): self._test_clause( self.col * self.col, "data_table.range * data_table.range", self.col.type, ) def test_different(self): self._test_clause( self.col - self.col, "data_table.range - data_table.range", self.col.type, ) class _RangeTypeRoundTrip(fixtures.TablesTest): __requires__ = "range_types", "psycopg2_compatibility" __backend__ = True def extras(self): # done this way so we don't get ImportErrors with # older psycopg2 versions. if testing.against("postgresql+psycopg2cffi"): from psycopg2cffi import extras else: from psycopg2 import extras return extras @classmethod def define_tables(cls, metadata): # no reason ranges shouldn't be primary keys, # so lets just use them as such table = Table( "data_table", metadata, Column("range", cls._col_type, primary_key=True), ) cls.col = table.c.range def test_actual_type(self): eq_(str(self._col_type()), self._col_str) def test_reflect(self, connection): from sqlalchemy import inspect insp = inspect(connection) cols = insp.get_columns("data_table") assert isinstance(cols[0]["type"], self._col_type) def _assert_data(self, conn): data = conn.execute(select(self.tables.data_table.c.range)).fetchall() eq_(data, [(self._data_obj(),)]) def test_insert_obj(self, connection): connection.execute( self.tables.data_table.insert(), {"range": self._data_obj()} ) self._assert_data(connection) def test_insert_text(self, connection): connection.execute( self.tables.data_table.insert(), {"range": self._data_str()} ) self._assert_data(connection) def test_union_result(self, connection): # insert connection.execute( self.tables.data_table.insert(), {"range": self._data_str()} ) # select range_ = self.tables.data_table.c.range data = connection.execute(select(range_ + range_)).fetchall() eq_(data, [(self._data_obj(),)]) def test_intersection_result(self, connection): # insert connection.execute( self.tables.data_table.insert(), {"range": self._data_str()} ) # select range_ = self.tables.data_table.c.range data = connection.execute(select(range_ * range_)).fetchall() eq_(data, [(self._data_obj(),)]) def test_difference_result(self, connection): # insert connection.execute( self.tables.data_table.insert(), {"range": self._data_str()} ) # select range_ = self.tables.data_table.c.range data = connection.execute(select(range_ - range_)).fetchall() eq_(data, [(self._data_obj().__class__(empty=True),)]) class _Int4RangeTests(object): _col_type = INT4RANGE _col_str = "INT4RANGE" def _data_str(self): return "[1,2)" def _data_obj(self): return self.extras().NumericRange(1, 2) class _Int8RangeTests(object): _col_type = INT8RANGE _col_str = "INT8RANGE" def _data_str(self): return "[9223372036854775806,9223372036854775807)" def _data_obj(self): return self.extras().NumericRange( 9223372036854775806, 9223372036854775807 ) class _NumRangeTests(object): _col_type = NUMRANGE _col_str = "NUMRANGE" def _data_str(self): return "[1.0,2.0)" def _data_obj(self): return self.extras().NumericRange( decimal.Decimal("1.0"), decimal.Decimal("2.0") ) class _DateRangeTests(object): _col_type = DATERANGE _col_str = "DATERANGE" def _data_str(self): return "[2013-03-23,2013-03-24)" def _data_obj(self): return self.extras().DateRange( datetime.date(2013, 3, 23), datetime.date(2013, 3, 24) ) class _DateTimeRangeTests(object): _col_type = TSRANGE _col_str = "TSRANGE" def _data_str(self): return "[2013-03-23 14:30,2013-03-23 23:30)" def _data_obj(self): return self.extras().DateTimeRange( datetime.datetime(2013, 3, 23, 14, 30), datetime.datetime(2013, 3, 23, 23, 30), ) class _DateTimeTZRangeTests(object): _col_type = TSTZRANGE _col_str = "TSTZRANGE" # make sure we use one, steady timestamp with timezone pair # for all parts of all these tests _tstzs = None def tstzs(self): if self._tstzs is None: with testing.db.connect() as connection: lower = connection.scalar(func.current_timestamp().select()) upper = lower + datetime.timedelta(1) self._tstzs = (lower, upper) return self._tstzs def _data_str(self): return "[%s,%s)" % self.tstzs() def _data_obj(self): return self.extras().DateTimeTZRange(*self.tstzs()) class Int4RangeCompilationTest(_Int4RangeTests, _RangeTypeCompilation): pass class Int4RangeRoundTripTest(_Int4RangeTests, _RangeTypeRoundTrip): pass class Int8RangeCompilationTest(_Int8RangeTests, _RangeTypeCompilation): pass class Int8RangeRoundTripTest(_Int8RangeTests, _RangeTypeRoundTrip): pass class NumRangeCompilationTest(_NumRangeTests, _RangeTypeCompilation): pass class NumRangeRoundTripTest(_NumRangeTests, _RangeTypeRoundTrip): pass class DateRangeCompilationTest(_DateRangeTests, _RangeTypeCompilation): pass class DateRangeRoundTripTest(_DateRangeTests, _RangeTypeRoundTrip): pass class DateTimeRangeCompilationTest(_DateTimeRangeTests, _RangeTypeCompilation): pass class DateTimeRangeRoundTripTest(_DateTimeRangeTests, _RangeTypeRoundTrip): pass class DateTimeTZRangeCompilationTest( _DateTimeTZRangeTests, _RangeTypeCompilation ): pass class DateTimeTZRangeRoundTripTest(_DateTimeTZRangeTests, _RangeTypeRoundTrip): pass class JSONTest(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "postgresql" def setup_test(self): metadata = MetaData() self.test_table = Table( "test_table", metadata, Column("id", Integer, primary_key=True), Column("test_column", JSON), ) self.jsoncol = self.test_table.c.test_column @testing.combinations( ( lambda self: self.jsoncol["bar"] == None, # noqa "(test_table.test_column -> %(test_column_1)s) IS NULL", ), ( lambda self: self.jsoncol[("foo", 1)] == None, # noqa "(test_table.test_column #> %(test_column_1)s) IS NULL", ), ( lambda self: self.jsoncol["bar"].astext == None, # noqa "(test_table.test_column ->> %(test_column_1)s) IS NULL", ), ( lambda self: self.jsoncol["bar"].astext.cast(Integer) == 5, "CAST((test_table.test_column ->> %(test_column_1)s) AS INTEGER) " "= %(param_1)s", ), ( lambda self: self.jsoncol["bar"].cast(Integer) == 5, "CAST((test_table.test_column -> %(test_column_1)s) AS INTEGER) " "= %(param_1)s", ), ( lambda self: self.jsoncol[("foo", 1)].astext == None, # noqa "(test_table.test_column #>> %(test_column_1)s) IS NULL", ), ) def test_where(self, whereclause_fn, expected): whereclause = whereclause_fn(self) stmt = select(self.test_table).where(whereclause) self.assert_compile( stmt, "SELECT test_table.id, test_table.test_column FROM test_table " "WHERE %s" % expected, ) def test_path_typing(self): col = column("x", JSON()) is_(col["q"].type._type_affinity, types.JSON) is_(col[("q",)].type._type_affinity, types.JSON) is_(col["q"]["p"].type._type_affinity, types.JSON) is_(col[("q", "p")].type._type_affinity, types.JSON) def test_custom_astext_type(self): class MyType(types.UserDefinedType): pass col = column("x", JSON(astext_type=MyType)) is_(col["q"].astext.type.__class__, MyType) is_(col[("q", "p")].astext.type.__class__, MyType) is_(col["q"]["p"].astext.type.__class__, MyType) @testing.combinations( ( lambda self: self.jsoncol["foo"], "test_table.test_column -> %(test_column_1)s AS anon_1", True, ) ) def test_cols(self, colclause_fn, expected, from_): colclause = colclause_fn(self) stmt = select(colclause) self.assert_compile( stmt, ("SELECT %s" + (" FROM test_table" if from_ else "")) % expected, ) class JSONRoundTripTest(fixtures.TablesTest): __only_on__ = ("postgresql >= 9.3",) __backend__ = True data_type = JSON @classmethod def define_tables(cls, metadata): Table( "data_table", metadata, Column("id", Integer, primary_key=True), Column("name", String(30), nullable=False), Column("data", cls.data_type), Column("nulldata", cls.data_type(none_as_null=True)), ) def _fixture_data(self, connection): data_table = self.tables.data_table data = [ {"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}}, {"name": "r2", "data": {"k1": "r2v1", "k2": "r2v2"}}, {"name": "r3", "data": {"k1": "r3v1", "k2": "r3v2"}}, {"name": "r4", "data": {"k1": "r4v1", "k2": "r4v2"}}, {"name": "r5", "data": {"k1": "r5v1", "k2": "r5v2", "k3": 5}}, {"name": "r6", "data": {"k1": {"r6v1": {"subr": [1, 2, 3]}}}}, ] connection.execute(data_table.insert(), data) return data def _assert_data(self, compare, conn, column="data"): col = self.tables.data_table.c[column] data = conn.execute( select(col).order_by(self.tables.data_table.c.name) ).fetchall() eq_([d for d, in data], compare) def _assert_column_is_NULL(self, conn, column="data"): col = self.tables.data_table.c[column] data = conn.execute(select(col).where(col.is_(null()))).fetchall() eq_([d for d, in data], [None]) def _assert_column_is_JSON_NULL(self, conn, column="data"): col = self.tables.data_table.c[column] data = conn.execute( select(col).where(cast(col, String) == "null") ).fetchall() eq_([d for d, in data], [None]) def test_reflect(self, connection): insp = inspect(connection) cols = insp.get_columns("data_table") assert isinstance(cols[2]["type"], self.data_type) def test_insert(self, connection): connection.execute( self.tables.data_table.insert(), {"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}}, ) self._assert_data([{"k1": "r1v1", "k2": "r1v2"}], connection) def test_insert_nulls(self, connection): connection.execute( self.tables.data_table.insert(), {"name": "r1", "data": null()} ) self._assert_data([None], connection) def test_insert_none_as_null(self, connection): connection.execute( self.tables.data_table.insert(), {"name": "r1", "nulldata": None}, ) self._assert_column_is_NULL(connection, column="nulldata") def test_insert_nulljson_into_none_as_null(self, connection): connection.execute( self.tables.data_table.insert(), {"name": "r1", "nulldata": JSON.NULL}, ) self._assert_column_is_JSON_NULL(connection, column="nulldata") def test_custom_serialize_deserialize(self, testing_engine): import json def loads(value): value = json.loads(value) value["x"] = value["x"] + "_loads" return value def dumps(value): value = dict(value) value["x"] = "dumps_y" return json.dumps(value) engine = testing_engine( options=dict(json_serializer=dumps, json_deserializer=loads) ) s = select(cast({"key": "value", "x": "q"}, self.data_type)) with engine.begin() as conn: eq_(conn.scalar(s), {"key": "value", "x": "dumps_y_loads"}) def test_criterion(self, connection): self._fixture_data(connection) data_table = self.tables.data_table result = connection.execute( select(data_table.c.data).where( data_table.c.data["k1"].astext == "r3v1" ) ).first() eq_(result, ({"k1": "r3v1", "k2": "r3v2"},)) result = connection.execute( select(data_table.c.data).where( data_table.c.data["k1"].astext.cast(String) == "r3v1" ) ).first() eq_(result, ({"k1": "r3v1", "k2": "r3v2"},)) def test_path_query(self, connection): self._fixture_data(connection) data_table = self.tables.data_table result = connection.execute( select(data_table.c.name).where( data_table.c.data[("k1", "r6v1", "subr")].astext == "[1, 2, 3]" ) ) eq_(result.scalar(), "r6") @testing.fails_on( "postgresql < 9.4", "Improvement in PostgreSQL behavior?" ) def test_multi_index_query(self, connection): self._fixture_data(connection) data_table = self.tables.data_table result = connection.execute( select(data_table.c.name).where( data_table.c.data["k1"]["r6v1"]["subr"].astext == "[1, 2, 3]" ) ) eq_(result.scalar(), "r6") def test_query_returned_as_text(self, connection): self._fixture_data(connection) data_table = self.tables.data_table result = connection.execute( select(data_table.c.data["k1"].astext) ).first() if connection.dialect.returns_unicode_strings: assert isinstance(result[0], util.text_type) else: assert isinstance(result[0], util.string_types) def test_query_returned_as_int(self, connection): self._fixture_data(connection) data_table = self.tables.data_table result = connection.execute( select(data_table.c.data["k3"].astext.cast(Integer)).where( data_table.c.name == "r5" ) ).first() assert isinstance(result[0], int) def test_fixed_round_trip(self, connection): s = select( cast( {"key": "value", "key2": {"k1": "v1", "k2": "v2"}}, self.data_type, ) ) eq_( connection.scalar(s), {"key": "value", "key2": {"k1": "v1", "k2": "v2"}}, ) def test_unicode_round_trip(self, connection): s = select( cast( { util.u("réveillé"): util.u("réveillé"), "data": {"k1": util.u("drôle")}, }, self.data_type, ) ) eq_( connection.scalar(s), { util.u("réveillé"): util.u("réveillé"), "data": {"k1": util.u("drôle")}, }, ) def test_eval_none_flag_orm(self, connection): Base = declarative_base() class Data(Base): __table__ = self.tables.data_table with Session(connection) as s: d1 = Data(name="d1", data=None, nulldata=None) s.add(d1) s.commit() s.bulk_insert_mappings( Data, [{"name": "d2", "data": None, "nulldata": None}] ) eq_( s.query( cast(self.tables.data_table.c.data, String), cast(self.tables.data_table.c.nulldata, String), ) .filter(self.tables.data_table.c.name == "d1") .first(), ("null", None), ) eq_( s.query( cast(self.tables.data_table.c.data, String), cast(self.tables.data_table.c.nulldata, String), ) .filter(self.tables.data_table.c.name == "d2") .first(), ("null", None), ) def test_literal(self, connection): exp = self._fixture_data(connection) result = connection.exec_driver_sql( "select data from data_table order by name" ) res = list(result) eq_(len(res), len(exp)) for row, expected in zip(res, exp): eq_(row[0], expected["data"]) class JSONBTest(JSONTest): def setup_test(self): metadata = MetaData() self.test_table = Table( "test_table", metadata, Column("id", Integer, primary_key=True), Column("test_column", JSONB), ) self.jsoncol = self.test_table.c.test_column @testing.combinations( ( # hide from 2to3 lambda self: getattr(self.jsoncol, "has_key")("data"), "test_table.test_column ? %(test_column_1)s", ), ( lambda self: self.jsoncol.has_all( {"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}} ), "test_table.test_column ?& %(test_column_1)s", ), ( lambda self: self.jsoncol.has_any( postgresql.array(["name", "data"]) ), "test_table.test_column ?| ARRAY[%(param_1)s, %(param_2)s]", ), ( lambda self: self.jsoncol.contains({"k1": "r1v1"}), "test_table.test_column @> %(test_column_1)s", ), ( lambda self: self.jsoncol.contained_by({"foo": "1", "bar": None}), "test_table.test_column <@ %(test_column_1)s", ), ) def test_where(self, whereclause_fn, expected): super(JSONBTest, self).test_where(whereclause_fn, expected) class JSONBRoundTripTest(JSONRoundTripTest): __requires__ = ("postgresql_jsonb",) data_type = JSONB @testing.requires.postgresql_utf8_server_encoding def test_unicode_round_trip(self, connection): super(JSONBRoundTripTest, self).test_unicode_round_trip(connection) class JSONBSuiteTest(suite.JSONTest): __requires__ = ("postgresql_jsonb",) datatype = JSONB class JSONBCastSuiteTest(suite.JSONLegacyStringCastIndexTest): __requires__ = ("postgresql_jsonb",) datatype = JSONB
monetate/sqlalchemy
test/dialect/postgresql/test_types.py
Python
mit
121,945
import glob def handle(userToken, _): # Get usertoken data userID = userToken.userID # Make sure the match exists matchID = userToken.matchID if matchID not in glob.matches.matches: return match = glob.matches.matches[matchID] # Get our slotID and change ready status slotID = match.getUserSlotID(userID) if slotID != None: match.toggleSlotReady(slotID)
osuripple/ripple
c.ppy.sh/matchReadyEvent.py
Python
mit
370
#-*- coding: utf-8 -*- # Copyright 2022 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Job utility methods for Turbinia.""" import logging from turbinia import config from turbinia.jobs import manager as job_manager log = logging.getLogger('turbinia') config.LoadConfig() def register_job_timeouts(dependencies): """Registers a timeout for each job. Args: dependencies(dict): dependencies to grab timeout value from. """ log.info('Registering job timeouts.') timeout_default = 3600 job_names = list(job_manager.JobsManager.GetJobNames()) # Iterate through list of jobs for job, values in dependencies.items(): if job not in job_names: continue timeout = values.get('timeout') if not isinstance(timeout, int): log.warning( 'No timeout found for job: {0:s}. Setting default timeout of {1:d} seconds.' .format(job, timeout_default)) timeout = timeout_default job_manager.JobsManager.RegisterTimeout(job, timeout)
google/turbinia
turbinia/job_utils.py
Python
apache-2.0
1,506
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import hashlib import tempfile import unittest import pytest # type: ignore from _pytest.monkeypatch import MonkeyPatch # type: ignore from monty.json import MontyDecoder from pymatgen.core import SETTINGS from pymatgen.core import Lattice, Species, Structure from pymatgen.core.surface import SlabGenerator from pymatgen.io.vasp.inputs import Kpoints, Poscar from pymatgen.io.vasp.outputs import Vasprun from pymatgen.io.vasp.sets import * from pymatgen.util.testing import PymatgenTest MODULE_DIR = Path(__file__).resolve().parent dec = MontyDecoder() class SetChangeCheckTest(PymatgenTest): def test_sets_changed(self): # WARNING! # These tests will fail when you change an input set. # They are included as a sanity check: if you want to change # an input set, please make sure to notify the users for that set. # For sets starting with "MVL" this is @shyuep, for sets starting # with "MP" this is @shyuep and @mkhorton. os.chdir(MODULE_DIR / "..") input_sets = glob.glob("*.yaml") hashes = {} for input_set in input_sets: with open(input_set, "r") as f: hashes[input_set] = hashlib.sha1(f.read().encode("utf-8")).hexdigest() known_hashes = { "MVLGWSet.yaml": "f4df9516cf7dd923b37281172c662a70fa32bebc", "MVLRelax52Set.yaml": "eb538ffb45c0cd13f13df48afc1e71c44d2e34b2", "MPHSERelaxSet.yaml": "2bb969e64b57ff049077c8ec10e64f94c9c97f42", "VASPIncarBase.yaml": "19762515f8deefb970f2968fca48a0d67f7964d4", "MPSCANRelaxSet.yaml": "2604952d387f6531bfc37641ac3b1ffcce9f1bc1", "MPRelaxSet.yaml": "4ea97d776fbdc7e168036f73e9176012a56c0a45", "MITRelaxSet.yaml": "1a0970f8cad9417ec810f7ab349dc854eaa67010", "vdW_parameters.yaml": "66541f58b221c8966109156f4f651b2ca8aa76da", } self.assertDictEqual( hashes, known_hashes, "These tests will fail when you change an input set. " "They are included as a sanity check: if you want to " "change an input set, please make sure to notify the " "users for that set. " 'For sets starting with "MVL" this is @shyuep, ' 'for sets starting with "MP" this is @shyuep and @mkhorton.', ) class MITMPRelaxSetTest(PymatgenTest): @classmethod def setUpClass(cls): cls.monkeypatch = MonkeyPatch() filepath = cls.TEST_FILES_DIR / "POSCAR" poscar = Poscar.from_file(filepath) cls.structure = poscar.structure cls.coords = [[0, 0, 0], [0.75, 0.5, 0.75]] cls.lattice = Lattice( [ [3.8401979337, 0.00, 0.00], [1.9200989668, 3.3257101909, 0.00], [0.00, -2.2171384943, 3.1355090603], ] ) cls.mitset = MITRelaxSet(cls.structure) cls.mitset_unsorted = MITRelaxSet(cls.structure, sort_structure=False) cls.mpset = MPRelaxSet(cls.structure) def setUp(self): warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_metal_check(self): structure = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Cu"], [[0, 0, 0]]) with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Trigger a warning. vis = MITRelaxSet(structure) incar = vis.incar # Verify some things self.assertIn("ISMEAR", str(w[-1].message)) def test_poscar(self): structure = Structure(self.lattice, ["Fe", "Mn"], self.coords) mitparamset = MITRelaxSet(structure, sort_structure=False) s_unsorted = mitparamset.poscar.structure mitparamset = MITRelaxSet(structure, sort_structure=True) s_sorted = mitparamset.poscar.structure self.assertEqual(s_unsorted[0].specie.symbol, "Fe") self.assertEqual(s_sorted[0].specie.symbol, "Mn") def test_potcar_symbols(self): coords = list() coords.append([0, 0, 0]) coords.append([0.75, 0.5, 0.75]) coords.append([0.75, 0.25, 0.75]) lattice = Lattice( [ [3.8401979337, 0.00, 0.00], [1.9200989668, 3.3257101909, 0.00], [0.00, -2.2171384943, 3.1355090603], ] ) structure = Structure(lattice, ["P", "Fe", "O"], coords) mitparamset = MITRelaxSet(structure) syms = mitparamset.potcar_symbols self.assertEqual(syms, ["Fe", "P", "O"]) paramset = MPRelaxSet(structure, sort_structure=False) syms = paramset.potcar_symbols self.assertEqual(syms, ["P", "Fe_pv", "O"]) def test_potcar_validation(self): structure = Structure(self.lattice, ["P", "Fe"], self.coords) # Use pytest's monkeypatch to temporarily point pymatgen to a directory # containing the wrong POTCARs (LDA potcars in a PBE directory) with self.monkeypatch.context() as m: m.setitem(SETTINGS, "PMG_VASP_PSP_DIR", str(self.TEST_FILES_DIR / "wrong_potcars")) with pytest.warns(BadInputSetWarning, match="not known by pymatgen"): MITRelaxSet(structure).potcar def test_lda_potcar(self): structure = Structure(self.lattice, ["P", "Fe"], self.coords) p = MITRelaxSet(structure, user_potcar_functional="LDA").potcar self.assertEqual(p.functional, "LDA") def test_nelect(self): coords = [[0] * 3, [0.5] * 3, [0.75] * 3] lattice = Lattice.cubic(4) s = Structure(lattice, ["Si", "Si", "Fe"], coords) self.assertAlmostEqual(MITRelaxSet(s).nelect, 16) # Test estimate of number of bands (function of nelect) with nmag>0 self.assertAlmostEqual(MITRelaxSet(s).estimate_nbands(), 13) self.assertAlmostEqual(MPRelaxSet(s).estimate_nbands(), 17) # Test estimate of number of bands (function of nelect) with nmag==0 s = Structure(lattice, ["Si", "Si", "Si"], coords) self.assertAlmostEqual(MITRelaxSet(s).estimate_nbands(), 11) self.assertAlmostEqual(MPRelaxSet(s).estimate_nbands(), 11) # Check that it works even when oxidation states are present. Was a bug # previously. s = Structure(lattice, ["Si4+", "Si4+", "Fe2+"], coords) self.assertAlmostEqual(MITRelaxSet(s).nelect, 16) self.assertAlmostEqual(MPRelaxSet(s).nelect, 22) # Check that it works for disordered structure. Was a bug previously s = Structure(lattice, ["Si4+", "Fe2+", "Si4+"], coords) self.assertAlmostEqual(MITRelaxSet(s).nelect, 16) self.assertAlmostEqual(MPRelaxSet(s).nelect, 22) def test_get_incar(self): incar = self.mpset.incar self.assertEqual(incar["LDAUU"], [5.3, 0, 0]) self.assertAlmostEqual(incar["EDIFF"], 0.0012) incar = self.mitset.incar self.assertEqual(incar["LDAUU"], [4.0, 0, 0]) self.assertAlmostEqual(incar["EDIFF"], 1e-5) si = 14 coords = list() coords.append(np.array([0, 0, 0])) coords.append(np.array([0.75, 0.5, 0.75])) # Silicon structure for testing. latt = Lattice( np.array( [ [3.8401979337, 0.00, 0.00], [1.9200989668, 3.3257101909, 0.00], [0.00, -2.2171384943, 3.1355090603], ] ) ) struct = Structure(latt, [si, si], coords) incar = MPRelaxSet(struct).incar self.assertNotIn("LDAU", incar) coords = list() coords.append([0, 0, 0]) coords.append([0.75, 0.5, 0.75]) lattice = Lattice( [ [3.8401979337, 0.00, 0.00], [1.9200989668, 3.3257101909, 0.00], [0.00, -2.2171384943, 3.1355090603], ] ) struct = Structure(lattice, ["Fe", "Mn"], coords) incar = MPRelaxSet(struct).incar self.assertNotIn("LDAU", incar) # check fluorides struct = Structure(lattice, ["Fe", "F"], coords) incar = MPRelaxSet(struct).incar self.assertEqual(incar["LDAUU"], [5.3, 0]) self.assertEqual(incar["MAGMOM"], [5, 0.6]) struct = Structure(lattice, ["Fe", "F"], coords) incar = MITRelaxSet(struct).incar self.assertEqual(incar["LDAUU"], [4.0, 0]) # Make sure this works with species. struct = Structure(lattice, ["Fe2+", "O2-"], coords) incar = MPRelaxSet(struct).incar self.assertEqual(incar["LDAUU"], [5.3, 0]) struct = Structure(lattice, ["Fe", "Mn"], coords, site_properties={"magmom": (5.2, -4.5)}) incar = MPRelaxSet(struct).incar self.assertEqual(incar["MAGMOM"], [-4.5, 5.2]) incar = MITRelaxSet(struct, sort_structure=False).incar self.assertEqual(incar["MAGMOM"], [5.2, -4.5]) struct = Structure(lattice, [Species("Fe", 2, {"spin": 4.1}), "Mn"], coords) incar = MPRelaxSet(struct).incar self.assertEqual(incar["MAGMOM"], [5, 4.1]) struct = Structure(lattice, ["Mn3+", "Mn4+"], coords) incar = MITRelaxSet(struct).incar self.assertEqual(incar["MAGMOM"], [4, 3]) userset = MPRelaxSet(struct, user_incar_settings={"MAGMOM": {"Fe": 10, "S": -5, "Mn3+": 100}}) self.assertEqual(userset.incar["MAGMOM"], [100, 0.6]) noencutset = MPRelaxSet(struct, user_incar_settings={"ENCUT": None}) self.assertNotIn("ENCUT", noencutset.incar) # sulfide vs sulfate test coords = list() coords.append([0, 0, 0]) coords.append([0.75, 0.5, 0.75]) coords.append([0.25, 0.5, 0]) struct = Structure(lattice, ["Fe", "Fe", "S"], coords) incar = MITRelaxSet(struct).incar self.assertEqual(incar["LDAUU"], [1.9, 0]) # Make sure Matproject sulfides are ok. self.assertNotIn("LDAUU", MPRelaxSet(struct).incar) struct = Structure(lattice, ["Fe", "S", "O"], coords) incar = MITRelaxSet(struct).incar self.assertEqual(incar["LDAUU"], [4.0, 0, 0]) # Make sure Matproject sulfates are ok. self.assertEqual(MPRelaxSet(struct).incar["LDAUU"], [5.3, 0, 0]) # test for default LDAUU value userset_ldauu_fallback = MPRelaxSet(struct, user_incar_settings={"LDAUU": {"Fe": 5.0, "S": 0}}) self.assertEqual(userset_ldauu_fallback.incar["LDAUU"], [5.0, 0, 0]) # Expected to be oxide (O is the most electronegative atom) s = Structure(lattice, ["Fe", "O", "S"], coords) incar = MITRelaxSet(s).incar self.assertEqual(incar["LDAUU"], [4.0, 0, 0]) # Expected to be chloride (Cl is the most electronegative atom) s = Structure(lattice, ["Fe", "Cl", "S"], coords) incar = MITRelaxSet(s, user_incar_settings={"LDAU": True}).incar self.assertFalse("LDAUU" in incar) # LDAU = False # User set a compound to be sulfide by specifing values of "LDAUL" etc. s = Structure(lattice, ["Fe", "Cl", "S"], coords) incar = MITRelaxSet( s, user_incar_settings={ "LDAU": True, "LDAUL": {"Fe": 3}, "LDAUU": {"Fe": 1.8}, }, ).incar self.assertEqual(incar["LDAUL"], [3.0, 0, 0]) self.assertEqual(incar["LDAUU"], [1.8, 0, 0]) # test that van-der-Waals parameters are parsed correctly incar = MITRelaxSet(struct, vdw="optB86b").incar self.assertEqual(incar["GGA"], "Mk") self.assertEqual(incar["LUSE_VDW"], True) self.assertEqual(incar["PARAM1"], 0.1234) # Test that NELECT is updated when a charge is present si = 14 coords = list() coords.append(np.array([0, 0, 0])) coords.append(np.array([0.75, 0.5, 0.75])) # Silicon structure for testing. latt = Lattice( np.array( [ [3.8401979337, 0.00, 0.00], [1.9200989668, 3.3257101909, 0.00], [0.00, -2.2171384943, 3.1355090603], ] ) ) struct = Structure(latt, [si, si], coords, charge=1) mpr = MPRelaxSet(struct, use_structure_charge=True) self.assertEqual(mpr.incar["NELECT"], 7, "NELECT not properly set for nonzero charge") # test that NELECT does not get set when use_structure_charge = False mpr = MPRelaxSet(struct, use_structure_charge=False) self.assertFalse( "NELECT" in mpr.incar.keys(), "NELECT should not be set when " "use_structure_charge is False", ) struct = Structure(latt, ["Co", "O"], coords) mpr = MPRelaxSet(struct) self.assertEqual(mpr.incar["MAGMOM"], [0.6, 0.6]) struct = Structure(latt, ["Co4+", "O"], coords) mpr = MPRelaxSet(struct) self.assertEqual(mpr.incar["MAGMOM"], [1, 0.6]) # test passing user_incar_settings and user_kpoint_settings of None sets = [ MPRelaxSet(struct, user_incar_settings=None, user_kpoints_settings=None), MPStaticSet(struct, user_incar_settings=None, user_kpoints_settings=None), MPNonSCFSet(struct, user_incar_settings=None, user_kpoints_settings=None), ] for mp_set in sets: self.assertNotEqual(mp_set.kpoints, None) self.assertNotEqual(mp_set.incar, None) def test_get_kpoints(self): kpoints = MPRelaxSet(self.structure).kpoints self.assertEqual(kpoints.kpts, [[2, 4, 5]]) self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma) kpoints = MPRelaxSet(self.structure, user_kpoints_settings={"reciprocal_density": 1000}).kpoints self.assertEqual(kpoints.kpts, [[6, 10, 13]]) self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma) kpoints_obj = Kpoints(kpts=[[3, 3, 3]]) kpoints_return = MPRelaxSet(self.structure, user_kpoints_settings=kpoints_obj).kpoints self.assertEqual(kpoints_return.kpts, [[3, 3, 3]]) kpoints = self.mitset.kpoints self.assertEqual(kpoints.kpts, [[25]]) self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic) recip_paramset = MPRelaxSet(self.structure, force_gamma=True) recip_paramset.kpoints_settings = {"reciprocal_density": 40} kpoints = recip_paramset.kpoints self.assertEqual(kpoints.kpts, [[2, 4, 5]]) self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma) def test_get_vasp_input(self): d = self.mitset.get_vasp_input() self.assertEqual(d["INCAR"]["ISMEAR"], -5) s = self.structure.copy() s.make_supercell(4) paramset = MPRelaxSet(s) d = paramset.get_vasp_input() self.assertEqual(d["INCAR"]["ISMEAR"], 0) def test_MPMetalRelaxSet(self): mpmetalset = MPMetalRelaxSet(self.get_structure("Sn")) incar = mpmetalset.incar self.assertEqual(incar["ISMEAR"], 1) self.assertEqual(incar["SIGMA"], 0.2) kpoints = mpmetalset.kpoints self.assertArrayAlmostEqual(kpoints.kpts[0], [5, 5, 5]) def test_as_from_dict(self): mitset = MITRelaxSet(self.structure) mpset = MPRelaxSet(self.structure) mpuserset = MPRelaxSet( self.structure, user_incar_settings={"MAGMOM": {"Fe": 10, "S": -5, "Mn3+": 100}}, ) d = mitset.as_dict() v = dec.process_decoded(d) self.assertEqual(v._config_dict["INCAR"]["LDAUU"]["O"]["Fe"], 4) d = mpset.as_dict() v = dec.process_decoded(d) self.assertEqual(v._config_dict["INCAR"]["LDAUU"]["O"]["Fe"], 5.3) d = mpuserset.as_dict() v = dec.process_decoded(d) # self.assertEqual(type(v), MPVaspInputSet) self.assertEqual(v.user_incar_settings["MAGMOM"], {"Fe": 10, "S": -5, "Mn3+": 100}) def test_hubbard_off_and_ediff_override(self): p = MPRelaxSet(self.structure, user_incar_settings={"LDAU": False, "EDIFF": 1e-10}) self.assertNotIn("LDAUU", p.incar) self.assertEqual(p.incar["EDIFF"], 1e-10) def test_write_input(self): self.mitset.write_input(".", make_dir_if_not_present=True) for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR"]: self.assertTrue(os.path.exists(f)) self.assertFalse(os.path.exists("Fe4P4O16.cif")) self.mitset.write_input(".", make_dir_if_not_present=True, include_cif=True) self.assertTrue(os.path.exists("Fe4P4O16.cif")) for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR", "Fe4P4O16.cif"]: os.remove(f) self.mitset.write_input(".", make_dir_if_not_present=True, potcar_spec=True) for f in ["INCAR", "KPOINTS", "POSCAR"]: self.assertTrue(os.path.exists(f)) self.assertFalse(os.path.exists("POTCAR")) self.assertTrue(os.path.exists("POTCAR.spec")) for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR.spec"]: os.remove(f) def test_user_potcar_settings(self): vis = MPRelaxSet(self.structure, user_potcar_settings={"Fe": "Fe"}) potcar = vis.potcar self.assertEqual(potcar.symbols, ["Fe", "P", "O"]) def test_valid_magmom_struct(self): # First test the helper function struct = self.structure.copy() get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="v") props = [isite.properties for isite in struct.sites] self.assertEquals(props, [{"magmom": [1.0, 1.0, 1.0]}] * len(props)) struct = self.structure.copy() get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="s") props = [isite.properties for isite in struct.sites] self.assertEquals(props, [{"magmom": 1.0}] * len(props)) struct.insert(0, "Li", [0, 0, 0]) get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="a") props = [isite.properties for isite in struct.sites] self.assertEquals(props, [{"magmom": 1.0}] * len(props)) struct = self.structure.copy() get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="v") struct.insert(0, "Li", [0, 0, 0], properties={"magmom": 10.0}) with self.assertRaises(TypeError) as context: get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="a") self.assertTrue("Magmom type conflict" in str(context.exception)) # Test the behavior of MPRelaxSet to atomacically fill in the missing magmom struct = self.structure.copy() get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="s") struct.insert(0, "Li", [0, 0, 0]) vis = MPRelaxSet(struct, user_potcar_settings={"Fe": "Fe"}, validate_magmom=False) with self.assertRaises(TypeError) as context: print(vis.get_vasp_input()) self.assertTrue("argument must be a string" in str(context.exception)) vis = MPRelaxSet(struct, user_potcar_settings={"Fe": "Fe"}, validate_magmom=True) self.assertEqual(vis.get_vasp_input()["INCAR"]["MAGMOM"], [1.0] * len(struct)) class MPStaticSetTest(PymatgenTest): def setUp(self): self.tmp = tempfile.mkdtemp() warnings.simplefilter("ignore") def test_init(self): prev_run = self.TEST_FILES_DIR / "relaxation" vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["NSW"], 0) # Check that the ENCUT has been inherited. self.assertEqual(vis.incar["ENCUT"], 600) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst) # Check as from dict. vis = MPStaticSet.from_dict(vis.as_dict()) self.assertEqual(vis.incar["NSW"], 0) # Check that the ENCUT has been inherited. self.assertEqual(vis.incar["ENCUT"], 600) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst) non_prev_vis = MPStaticSet(vis.structure, user_incar_settings={"LORBIT": 12, "LWAVE": True}) self.assertEqual(non_prev_vis.incar["NSW"], 0) # Check that the ENCUT and Kpoints style has NOT been inherited. self.assertEqual(non_prev_vis.incar["ENCUT"], 520) # Check that user incar settings are applied. self.assertEqual(non_prev_vis.incar["LORBIT"], 12) self.assertTrue(non_prev_vis.incar["LWAVE"]) self.assertEqual(non_prev_vis.kpoints.style, Kpoints.supported_modes.Gamma) v2 = MPStaticSet.from_dict(non_prev_vis.as_dict()) self.assertEqual(v2.incar["ENCUT"], 520) # Check that user incar settings are applied. self.assertEqual(v2.incar["LORBIT"], 12) leps_vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run, lepsilon=True) self.assertTrue(leps_vis.incar["LEPSILON"]) self.assertEqual(leps_vis.incar["IBRION"], 8) self.assertNotIn("NPAR", leps_vis.incar) self.assertNotIn("NSW", leps_vis.incar) self.assertEqual(non_prev_vis.kpoints.kpts, [[11, 10, 10]]) non_prev_vis = MPStaticSet(vis.structure, reciprocal_density=200) self.assertEqual(non_prev_vis.kpoints.kpts, [[14, 12, 12]]) # Check LCALCPOL flag lcalcpol_vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run, lcalcpol=True) self.assertTrue(lcalcpol_vis.incar["LCALCPOL"]) def test_user_incar_kspacing(self): # Make sure user KSPACING settings properly overrides KPOINTS. si = self.get_structure("Si") vis = MPRelaxSet(si, user_incar_settings={"KSPACING": 0.22}) self.assertEqual(vis.incar["KSPACING"], 0.22) self.assertEqual(vis.kpoints, None) def test_kspacing_override(self): # If KSPACING is set and user_kpoints_settings are given, # make sure the user_kpoints_settings override KSPACING si = self.get_structure("Si") vis = MPRelaxSet( si, user_incar_settings={"KSPACING": 0.22}, user_kpoints_settings={"reciprocal_density": 1000}, ) self.assertEqual(vis.incar.get("KSPACING"), None) self.assertIsInstance(vis.kpoints, Kpoints) def test_override_from_prev_calc(self): # test override_from_prev prev_run = self.TEST_FILES_DIR / "relaxation" vis = MPStaticSet(_dummy_structure) vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["NSW"], 0) self.assertEqual(vis.incar["ENCUT"], 600) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst) # Check LCALCPOL flag lcalcpol_vis = MPStaticSet(_dummy_structure, lcalcpol=True) lcalcpol_vis = lcalcpol_vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertTrue(lcalcpol_vis.incar["LCALCPOL"]) def test_standardize_structure(self): sga = SpacegroupAnalyzer(self.get_structure("Si")) original_structure = sga.get_conventional_standard_structure() sm = StructureMatcher(primitive_cell=False, scale=False) vis = MPStaticSet(original_structure) self.assertTrue(sm.fit(vis.structure, original_structure)) vis = MPStaticSet(original_structure, standardize=True) self.assertFalse(sm.fit(vis.structure, original_structure)) def test_write_input_zipped(self): vis = MPStaticSet(self.get_structure("Si")) vis.write_input(output_dir=".", potcar_spec=True, zip_output=True) self.assertTrue(os.path.exists("MPStaticSet.zip")) with ZipFile("MPStaticSet.zip", "r") as zip: contents = zip.namelist() self.assertSetEqual(set(contents), {"INCAR", "POSCAR", "POTCAR.spec", "KPOINTS"}) spec = zip.open("POTCAR.spec", "r").read().decode() self.assertEqual(spec, "Si") os.remove("MPStaticSet.zip") def test_conflicting_arguments(self): with pytest.raises(ValueError, match="deprecated"): si = self.get_structure("Si") vis = MPStaticSet(si, potcar_functional="PBE", user_potcar_functional="PBE") def test_grid_size_from_struct(self): # TODO grab a bunch_of_calculations store as a list of tuples # (structure, ngx, ..., ngxf, ...) where all the grid size values are generated by vasp # check that the code produces the same grid sizes fname = self.TEST_FILES_DIR / "grid_data_files" / "vasp_inputs_for_grid_check.json" parsed_vasp_data = loadfn(fname) for tt in parsed_vasp_data: ng = [tt["input"]["parameters"][ik] for ik in ["NGX", "NGY", "NGZ"]] ngf = [tt["input"]["parameters"][ik] for ik in ["NGXF", "NGYF", "NGZF"]] struct = tt["input"]["structure"] static_set = MPStaticSet(struct) matched = static_set.calculate_ng() == (ng, ngf) self.assertTrue(matched) def tearDown(self): shutil.rmtree(self.tmp) warnings.simplefilter("default") class MPNonSCFSetTest(PymatgenTest): def setUp(self): self.tmp = tempfile.mkdtemp() warnings.simplefilter("ignore") def test_init(self): prev_run = self.TEST_FILES_DIR / "relaxation" # check boltztrap mode vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Boltztrap") self.assertEqual(vis.incar["ISMEAR"], 0) # check uniform mode vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform") self.assertEqual(vis.incar["ISMEAR"], -5) self.assertEqual(vis.incar["ISYM"], 2) # check uniform mode with automatic nedos vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform", nedos=0) self.assertEqual(vis.incar["NEDOS"], 12217) # test line mode vis = MPNonSCFSet.from_prev_calc( prev_calc_dir=prev_run, mode="Line", copy_chgcar=False, user_incar_settings={"SIGMA": 0.025}, ) self.assertEqual(vis.incar["NSW"], 0) # Check that the ENCUT has been inherited. self.assertEqual(vis.incar["ENCUT"], 600) # Check that the user_incar_settings works self.assertEqual(vis.incar["SIGMA"], 0.025) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal) # Check as from dict. vis = MPNonSCFSet.from_dict(vis.as_dict()) self.assertEqual(vis.incar["NSW"], 0) # Check that the ENCUT has been inherited. self.assertEqual(vis.incar["ENCUT"], 600) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal) vis.write_input(self.tmp) self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR"))) vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Line", copy_chgcar=True) # check ISMEAR set correctly for line mode self.assertEqual(vis.incar["ISMEAR"], 0) vis.write_input(self.tmp) self.assertTrue(os.path.exists(os.path.join(self.tmp, "CHGCAR"))) os.remove(os.path.join(self.tmp, "CHGCAR")) vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, standardize=True, mode="Line", copy_chgcar=True) vis.write_input(self.tmp) self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR"))) def test_override_from_prev(self): prev_run = self.TEST_FILES_DIR / "relaxation" # test override_from_prev vis = MPNonSCFSet(_dummy_structure, mode="Boltztrap") vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["ISMEAR"], 0) vis = MPNonSCFSet(_dummy_structure, mode="Uniform") vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["ISMEAR"], -5) self.assertEqual(vis.incar["ISYM"], 2) vis = MPNonSCFSet(_dummy_structure, mode="Uniform", nedos=0) vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["NEDOS"], 12217) # test line mode vis = MPNonSCFSet( _dummy_structure, mode="Line", copy_chgcar=False, user_incar_settings={"SIGMA": 0.025}, ) vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["NSW"], 0) self.assertEqual(vis.incar["ENCUT"], 600) self.assertEqual(vis.incar["SIGMA"], 0.025) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal) vis = MPNonSCFSet(_dummy_structure, mode="Line", copy_chgcar=True) vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["ISMEAR"], 0) vis.write_input(self.tmp) self.assertTrue(os.path.exists(os.path.join(self.tmp, "CHGCAR"))) os.remove(os.path.join(self.tmp, "CHGCAR")) vis = MPNonSCFSet(_dummy_structure, standardize=True, mode="Line", copy_chgcar=True) vis.override_from_prev_calc(prev_calc_dir=prev_run) vis.write_input(self.tmp) self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR"))) def test_kpoints(self): # test k-points are generated in the correct format prev_run = self.TEST_FILES_DIR / "relaxation" vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform", copy_chgcar=False) self.assertEqual(np.array(vis.kpoints.kpts).shape, (1, 3)) vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Line", copy_chgcar=False) self.assertNotEqual(np.array(vis.kpoints.kpts).shape, (1, 3)) vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Boltztrap", copy_chgcar=False) self.assertNotEqual(np.array(vis.kpoints.kpts).shape, (1, 3)) def test_optics(self): prev_run = self.TEST_FILES_DIR / "relaxation" vis = MPNonSCFSet.from_prev_calc( prev_calc_dir=prev_run, copy_chgcar=False, optics=True, mode="Uniform", nedos=2001, ) self.assertEqual(vis.incar["NSW"], 0) # Check that the ENCUT has been inherited. self.assertEqual(vis.incar["ENCUT"], 600) # check NEDOS and ISMEAR set correctly self.assertEqual(vis.incar["NEDOS"], 2001) self.assertEqual(vis.incar["ISMEAR"], -5) self.assertEqual(vis.incar["ISYM"], 2) self.assertTrue(vis.incar["LOPTICS"]) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Gamma) def test_user_kpoint_override(self): user_kpoints_override = Kpoints( style=Kpoints.supported_modes.Gamma, kpts=((1, 1, 1),) ) # the default kpoints style is reciprocal prev_run = self.TEST_FILES_DIR / "relaxation" vis = MPNonSCFSet.from_prev_calc( prev_calc_dir=prev_run, copy_chgcar=False, optics=True, mode="Uniform", nedos=2001, user_kpoints_settings=user_kpoints_override, ) self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Gamma) def tearDown(self): shutil.rmtree(self.tmp) warnings.simplefilter("default") class MagmomLdauTest(PymatgenTest): def setUp(self): warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_structure_from_prev_run(self): vrun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.magmom_ldau") structure = vrun.final_structure poscar = Poscar(structure) structure_decorated = get_structure_from_prev_run(vrun) ldau_ans = {"LDAUU": [5.3, 0.0], "LDAUL": [2, 0], "LDAUJ": [0.0, 0.0]} magmom_ans = [5.0, 5.0, 5.0, 5.0, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6] ldau_dict = {} for key in ("LDAUU", "LDAUJ", "LDAUL"): if hasattr(structure_decorated[0], key.lower()): m = dict([(site.specie.symbol, getattr(site, key.lower())) for site in structure_decorated]) ldau_dict[key] = [m[sym] for sym in poscar.site_symbols] magmom = [site.magmom for site in structure_decorated] self.assertEqual(ldau_dict, ldau_ans) self.assertEqual(magmom, magmom_ans) def test_ln_magmom(self): YAML_PATH = os.path.join(os.path.dirname(__file__), "../VASPIncarBase.yaml") MAGMOM_SETTING = loadfn(YAML_PATH)["INCAR"]["MAGMOM"] structure = Structure.from_file(self.TEST_FILES_DIR / "La4Fe4O12.cif") structure.add_oxidation_state_by_element({"La": +3, "Fe": +3, "O": -2}) for ion in MAGMOM_SETTING: s = structure.copy() s.replace_species({"La3+": ion}) vis = MPRelaxSet(s) fe_pos = vis.poscar.comment.index("Fe") if fe_pos == 0: magmom_ans = [5] * 4 + [MAGMOM_SETTING[ion]] * 4 + [0.6] * 12 else: magmom_ans = [MAGMOM_SETTING[ion]] * 4 + [5] * 4 + [0.6] * 12 self.assertEqual(vis.incar["MAGMOM"], magmom_ans) class MITMDSetTest(PymatgenTest): def setUp(self): filepath = self.TEST_FILES_DIR / "POSCAR" poscar = Poscar.from_file(filepath) self.struct = poscar.structure self.mitmdparam = MITMDSet(self.struct, 300, 1200, 10000) warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_params(self): param = self.mitmdparam syms = param.potcar_symbols self.assertEqual(syms, ["Fe", "P", "O"]) incar = param.incar self.assertNotIn("LDAUU", incar) self.assertAlmostEqual(incar["EDIFF"], 1e-5) kpoints = param.kpoints self.assertEqual(kpoints.kpts, [(1, 1, 1)]) self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma) def test_as_from_dict(self): d = self.mitmdparam.as_dict() v = dec.process_decoded(d) self.assertEqual(type(v), MITMDSet) self.assertEqual(v._config_dict["INCAR"]["TEBEG"], 300) self.assertEqual(v._config_dict["INCAR"]["PREC"], "Low") class MVLNPTMDSetTest(PymatgenTest): def setUp(self): file_path = self.TEST_FILES_DIR / "POSCAR" poscar = Poscar.from_file(file_path) self.struct = poscar.structure self.mvl_npt_set = MVLNPTMDSet(self.struct, start_temp=0, end_temp=300, nsteps=1000) warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_incar(self): npt_set = self.mvl_npt_set syms = npt_set.potcar_symbols self.assertEqual(syms, ["Fe", "P", "O"]) incar = npt_set.incar self.assertNotIn("LDAUU", incar) self.assertAlmostEqual(incar["EDIFF"], 1e-5) self.assertEqual(incar["LANGEVIN_GAMMA_L"], 1) self.assertEqual(incar["LANGEVIN_GAMMA"], [10, 10, 10]) enmax = max([npt_set.potcar[i].keywords["ENMAX"] for i in range(self.struct.ntypesp)]) self.assertAlmostEqual(incar["ENCUT"], 1.5 * enmax) self.assertEqual(incar["IALGO"], 48) self.assertEqual(incar["ISIF"], 3) self.assertEqual(incar["MDALGO"], 3) self.assertEqual(incar["SMASS"], 0) self.assertEqual(incar["PREC"], "Low") kpoints = npt_set.kpoints self.assertEqual(kpoints.kpts, [(1, 1, 1)]) self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma) def test_as_from_dict(self): d = self.mvl_npt_set.as_dict() v = dec.process_decoded(d) self.assertEqual(type(v), MVLNPTMDSet) self.assertEqual(v._config_dict["INCAR"]["NSW"], 1000) class MITNEBSetTest(PymatgenTest): def setUp(self): c1 = [[0.5] * 3, [0.9] * 3] c2 = [[0.5] * 3, [0.9, 0.1, 0.1]] s1 = Structure(Lattice.cubic(5), ["Si", "Si"], c1) s2 = Structure(Lattice.cubic(5), ["Si", "Si"], c2) structs = [] for s in s1.interpolate(s2, 3, pbc=True): structs.append(Structure.from_sites(s.sites, to_unit_cell=True)) self.structures = structs self.vis = MITNEBSet(self.structures) warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_potcar_symbols(self): syms = self.vis.potcar_symbols self.assertEqual(syms, ["Si"]) def test_incar(self): incar = self.vis.incar self.assertNotIn("LDAUU", incar) self.assertAlmostEqual(incar["EDIFF"], 0.00001) def test_kpoints(self): kpoints = self.vis.kpoints self.assertEqual(kpoints.kpts, [[25]]) self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic) def test_as_from_dict(self): d = self.vis.as_dict() v = dec.process_decoded(d) self.assertEqual(v._config_dict["INCAR"]["IMAGES"], 2) def test_write_input(self): self.vis.write_input(".", write_cif=True, write_endpoint_inputs=True, write_path_cif=True) self.assertTrue(os.path.exists("INCAR")) self.assertTrue(os.path.exists("KPOINTS")) self.assertTrue(os.path.exists("POTCAR")) self.assertTrue(os.path.exists("00/POSCAR")) self.assertTrue(os.path.exists("01/POSCAR")) self.assertTrue(os.path.exists("02/POSCAR")) self.assertTrue(os.path.exists("03/POSCAR")) self.assertFalse(os.path.exists("04/POSCAR")) self.assertTrue(os.path.exists("00/INCAR")) self.assertTrue(os.path.exists("path.cif")) for d in ["00", "01", "02", "03"]: shutil.rmtree(d) for f in ["INCAR", "KPOINTS", "POTCAR", "path.cif"]: os.remove(f) class MPSOCSetTest(PymatgenTest): def setUp(self): warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_from_prev_calc(self): prev_run = self.TEST_FILES_DIR / "fe_monomer" vis = MPSOCSet.from_prev_calc( prev_calc_dir=prev_run, magmom=[3], saxis=(1, 0, 0), user_incar_settings={"SIGMA": 0.025}, ) self.assertEqual(vis.incar["ISYM"], -1) self.assertTrue(vis.incar["LSORBIT"]) self.assertEqual(vis.incar["ICHARG"], 11) self.assertEqual(vis.incar["SAXIS"], [1, 0, 0]) self.assertEqual(vis.incar["MAGMOM"], [[0, 0, 3]]) self.assertEqual(vis.incar["SIGMA"], 0.025) def test_override_from_prev_calc(self): # test override_from_prev_calc prev_run = self.TEST_FILES_DIR / "fe_monomer" vis = MPSOCSet( _dummy_structure, magmom=[3], saxis=(1, 0, 0), user_incar_settings={"SIGMA": 0.025}, ) vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(vis.incar["ISYM"], -1) self.assertTrue(vis.incar["LSORBIT"]) self.assertEqual(vis.incar["ICHARG"], 11) self.assertEqual(vis.incar["SAXIS"], [1, 0, 0]) self.assertEqual(vis.incar["MAGMOM"], [[0, 0, 3]]) self.assertEqual(vis.incar["SIGMA"], 0.025) class MPNMRSetTest(PymatgenTest): def setUp(self): warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_incar(self): filepath = self.TEST_FILES_DIR / "Li.cif" structure = Structure.from_file(filepath) vis = MPNMRSet(structure) self.assertTrue(vis.incar.get("LCHIMAG", None)) self.assertEqual(vis.incar.get("QUAD_EFG", None), None) vis = MPNMRSet(structure, mode="efg") self.assertFalse(vis.incar.get("LCHIMAG", None)) self.assertEqual(vis.incar.get("QUAD_EFG", None), [-0.808]) vis = MPNMRSet(structure, mode="efg", isotopes=["Li-7"]) self.assertFalse(vis.incar.get("LCHIMAG", None)) self.assertEqual(vis.incar.get("QUAD_EFG", None), [-40.1]) class MVLSlabSetTest(PymatgenTest): def setUp(self): s = self.get_structure("Li2O") gen = SlabGenerator(s, (1, 0, 0), 10, 10) self.slab = gen.get_slab() self.bulk = self.slab.oriented_unit_cell vis_bulk = MVLSlabSet(self.bulk, bulk=True) vis = MVLSlabSet(self.slab) vis_dipole = MVLSlabSet(self.slab, auto_dipole=True) self.d_bulk = vis_bulk.get_vasp_input() self.d_slab = vis.get_vasp_input() self.d_dipole = vis_dipole.get_vasp_input() self.vis = vis warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_user_incar_settings(self): # Make sure user incar settings properly override AMIX. si = self.get_structure("Si") vis = MVLSlabSet(si, user_incar_settings={"AMIX": 0.1}) self.assertEqual(vis.incar["AMIX"], 0.1) def test_bulk(self): incar_bulk = self.d_bulk["INCAR"] poscar_bulk = self.d_bulk["POSCAR"] self.assertEqual(incar_bulk["ISIF"], 3) self.assertEqual(incar_bulk["EDIFF"], 1e-4) self.assertEqual(incar_bulk["EDIFFG"], -0.02) self.assertEqual(poscar_bulk.structure.formula, self.bulk.formula) def test_slab(self): incar_slab = self.d_slab["INCAR"] poscar_slab = self.d_slab["POSCAR"] potcar_slab = self.d_slab["POTCAR"] self.assertEqual(incar_slab["AMIN"], 0.01) self.assertEqual(incar_slab["AMIX"], 0.2) self.assertEqual(incar_slab["BMIX"], 0.001) self.assertEqual(incar_slab["NELMIN"], 8) # No volume relaxation during slab calculations self.assertEqual(incar_slab["ISIF"], 2) self.assertEqual(potcar_slab.functional, "PBE") self.assertEqual(potcar_slab.symbols[1], "O") self.assertEqual(potcar_slab.symbols[0], "Li_sv") self.assertEqual(poscar_slab.structure.formula, self.slab.formula) # Test auto-dipole dipole_incar = self.d_dipole["INCAR"] self.assertTrue(dipole_incar["LDIPOL"]) self.assertArrayAlmostEqual(dipole_incar["DIPOL"], [0.2323, 0.2323, 0.2165], decimal=4) self.assertEqual(dipole_incar["IDIPOL"], 3) def test_kpoints(self): kpoints_slab = self.d_slab["KPOINTS"].kpts[0] kpoints_bulk = self.d_bulk["KPOINTS"].kpts[0] self.assertEqual(kpoints_bulk[0], kpoints_slab[0]) self.assertEqual(kpoints_bulk[1], kpoints_slab[1]) self.assertEqual(kpoints_bulk[0], 15) self.assertEqual(kpoints_bulk[1], 15) self.assertEqual(kpoints_bulk[2], 15) # The last kpoint in a slab should always be 1 self.assertEqual(kpoints_slab[2], 1) def test_as_dict(self): vis_dict = self.vis.as_dict() new = MVLSlabSet.from_dict(vis_dict) class MVLElasticSetTest(PymatgenTest): def setUp(self): warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_incar(self): mvlparam = MVLElasticSet(self.get_structure("Graphite")) incar = mvlparam.incar self.assertEqual(incar["IBRION"], 6) self.assertEqual(incar["NFREE"], 2) self.assertEqual(incar["POTIM"], 0.015) self.assertNotIn("NPAR", incar) class MVLGWSetTest(PymatgenTest): def setUp(self): self.tmp = tempfile.mkdtemp() self.s = PymatgenTest.get_structure("Li2O") warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") shutil.rmtree(self.tmp) def test_static(self): mvlgwsc = MVLGWSet(self.s) incar = mvlgwsc.incar self.assertEqual(incar["SIGMA"], 0.01) kpoints = mvlgwsc.kpoints self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma) symbols = mvlgwsc.potcar.symbols self.assertEqual(symbols, ["Li_sv_GW", "O_GW"]) def test_diag(self): prev_run = self.TEST_FILES_DIR / "relaxation" mvlgwdiag = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=True, mode="diag") mvlgwdiag.write_input(self.tmp) self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR"))) self.assertEqual(mvlgwdiag.incar["NBANDS"], 32) self.assertEqual(mvlgwdiag.incar["ALGO"], "Exact") self.assertTrue(mvlgwdiag.incar["LOPTICS"]) # test override_from_prev_calc mvlgwdiag = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="diag") mvlgwdiag.override_from_prev_calc(prev_calc_dir=prev_run) mvlgwdiag.write_input(self.tmp) self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR"))) self.assertEqual(mvlgwdiag.incar["NBANDS"], 32) self.assertEqual(mvlgwdiag.incar["ALGO"], "Exact") self.assertTrue(mvlgwdiag.incar["LOPTICS"]) def test_bse(self): prev_run = self.TEST_FILES_DIR / "relaxation" mvlgwgbse = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=True, mode="BSE") mvlgwgbse.write_input(self.tmp) self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR"))) self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVEDER"))) prev_run = self.TEST_FILES_DIR / "relaxation" mvlgwgbse = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=False, mode="GW") self.assertEqual(mvlgwgbse.incar["NOMEGA"], 80) self.assertEqual(mvlgwgbse.incar["ENCUTGW"], 250) self.assertEqual(mvlgwgbse.incar["ALGO"], "GW0") mvlgwgbse1 = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=False, mode="BSE") self.assertEqual(mvlgwgbse1.incar["ANTIRES"], 0) self.assertEqual(mvlgwgbse1.incar["NBANDSO"], 20) self.assertEqual(mvlgwgbse1.incar["ALGO"], "BSE") # test override_from_prev_calc prev_run = self.TEST_FILES_DIR / "relaxation" mvlgwgbse = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="BSE") mvlgwgbse.override_from_prev_calc(prev_calc_dir=prev_run) mvlgwgbse.write_input(self.tmp) self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR"))) self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVEDER"))) prev_run = self.TEST_FILES_DIR / "relaxation" mvlgwgbse = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="GW") mvlgwgbse.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(mvlgwgbse.incar["NOMEGA"], 80) self.assertEqual(mvlgwgbse.incar["ENCUTGW"], 250) self.assertEqual(mvlgwgbse.incar["ALGO"], "GW0") mvlgwgbse1 = MVLGWSet(_dummy_structure, copy_wavecar=False, mode="BSE") mvlgwgbse1.override_from_prev_calc(prev_calc_dir=prev_run) self.assertEqual(mvlgwgbse1.incar["ANTIRES"], 0) self.assertEqual(mvlgwgbse1.incar["NBANDSO"], 20) self.assertEqual(mvlgwgbse1.incar["ALGO"], "BSE") class MPHSEBSTest(PymatgenTest): def setUp(self): self.tmp = tempfile.mkdtemp() warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_init(self): prev_run = self.TEST_FILES_DIR / "static_silicon" vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="uniform") self.assertTrue(vis.incar["LHFCALC"]) self.assertEqual(len(vis.kpoints.kpts), 16) vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="gap") self.assertTrue(vis.incar["LHFCALC"]) self.assertEqual(len(vis.kpoints.kpts), 18) vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="line") self.assertTrue(vis.incar["LHFCALC"]) self.assertEqual(vis.incar["HFSCREEN"], 0.2) self.assertEqual(vis.incar["NSW"], 0) self.assertEqual(vis.incar["ISYM"], 3) self.assertEqual(len(vis.kpoints.kpts), 180) def test_override_from_prev_calc(self): prev_run = self.TEST_FILES_DIR / "static_silicon" vis = MPHSEBSSet(_dummy_structure, mode="uniform") vis = vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertTrue(vis.incar["LHFCALC"]) self.assertEqual(len(vis.kpoints.kpts), 16) vis = MPHSEBSSet(_dummy_structure, mode="gap") vis = vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertTrue(vis.incar["LHFCALC"]) self.assertEqual(len(vis.kpoints.kpts), 18) vis = MPHSEBSSet(_dummy_structure, mode="line") vis = vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertTrue(vis.incar["LHFCALC"]) self.assertEqual(vis.incar["HFSCREEN"], 0.2) self.assertEqual(vis.incar["NSW"], 0) self.assertEqual(vis.incar["ISYM"], 3) self.assertEqual(len(vis.kpoints.kpts), 180) class MVLScanRelaxSetTest(PymatgenTest): def setUp(self): file_path = self.TEST_FILES_DIR / "POSCAR" poscar = Poscar.from_file(file_path) self.struct = poscar.structure self.mvl_scan_set = MVLScanRelaxSet( self.struct, user_potcar_functional="PBE_52", user_incar_settings={"NSW": 500} ) warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_incar(self): incar = self.mvl_scan_set.incar self.assertIn("METAGGA", incar) self.assertIn("LASPH", incar) self.assertIn("ADDGRID", incar) self.assertEqual(incar["NSW"], 500) # Test SCAN+rVV10 scan_rvv10_set = MVLScanRelaxSet(self.struct, vdw="rVV10") self.assertEqual(scan_rvv10_set.incar["BPARAM"], 15.7) def test_potcar(self): self.assertEqual(self.mvl_scan_set.potcar.functional, "PBE_52") test_potcar_set_1 = MVLScanRelaxSet(self.struct, user_potcar_functional="PBE_54") self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_54") self.assertRaises(ValueError, MVLScanRelaxSet, self.struct, user_potcar_functional="PBE") def test_as_from_dict(self): d = self.mvl_scan_set.as_dict() v = dec.process_decoded(d) self.assertEqual(type(v), MVLScanRelaxSet) self.assertEqual(v._config_dict["INCAR"]["METAGGA"], "SCAN") self.assertEqual(v.user_incar_settings["NSW"], 500) class MPScanRelaxSetTest(PymatgenTest): def setUp(self): file_path = self.TEST_FILES_DIR / "POSCAR" poscar = Poscar.from_file(file_path) self.struct = poscar.structure self.mp_scan_set = MPScanRelaxSet( self.struct, user_potcar_functional="PBE_52", user_incar_settings={"NSW": 500} ) warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_incar(self): incar = self.mp_scan_set.incar self.assertEqual(incar["METAGGA"], "R2scan") self.assertEqual(incar["LASPH"], True) self.assertEqual(incar["ENAUG"], 1360) self.assertEqual(incar["ENCUT"], 680) self.assertEqual(incar["NSW"], 500) # the default POTCAR contains metals self.assertEqual(incar["KSPACING"], 0.22) self.assertEqual(incar["ISMEAR"], 2) self.assertEqual(incar["SIGMA"], 0.2) def test_scan_substitute(self): mp_scan_sub = MPScanRelaxSet( self.struct, user_potcar_functional="PBE_52", user_incar_settings={"METAGGA": "SCAN"}, ) incar = mp_scan_sub.incar self.assertEqual(incar["METAGGA"], "Scan") def test_nonmetal(self): # Test that KSPACING and ISMEAR change with a nonmetal structure file_path = self.TEST_FILES_DIR / "POSCAR.O2" struct = Poscar.from_file(file_path, check_for_POTCAR=False).structure scan_nonmetal_set = MPScanRelaxSet(struct, bandgap=1.1) incar = scan_nonmetal_set.incar self.assertAlmostEqual(incar["KSPACING"], 0.29125, places=5) self.assertEqual(incar["ISMEAR"], -5) self.assertEqual(incar["SIGMA"], 0.05) def test_incar_overrides(self): # use 'user_incar_settings' to override the KSPACING, ISMEAR, and SIGMA # parameters that MPScanSet normally determines mp_scan_set2 = MPScanRelaxSet( self.struct, user_incar_settings={"KSPACING": 0.5, "ISMEAR": 0, "SIGMA": 0.05}, ) incar = mp_scan_set2.incar self.assertEqual(incar["KSPACING"], 0.5) self.assertEqual(incar["ISMEAR"], 0) self.assertEqual(incar["SIGMA"], 0.05) # Test SCAN+rVV10 def test_rvv10(self): scan_rvv10_set = MPScanRelaxSet(self.struct, vdw="rVV10") self.assertIn("LUSE_VDW", scan_rvv10_set.incar) self.assertEqual(scan_rvv10_set.incar["BPARAM"], 15.7) def test_other_vdw(self): # should raise a warning. # IVDW key should not be present in the incar with pytest.warns(UserWarning, match=r"not supported at this time"): scan_vdw_set = MPScanRelaxSet(self.struct, vdw="DFTD3") self.assertNotIn("LUSE_VDW", scan_vdw_set.incar) self.assertNotIn("IVDW", scan_vdw_set.incar) def test_potcar(self): self.assertEqual(self.mp_scan_set.potcar.functional, "PBE_52") # the default functional should be PBE_54 test_potcar_set_1 = MPScanRelaxSet(self.struct) self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_54") self.assertRaises(ValueError, MPScanRelaxSet, self.struct, user_potcar_functional="PBE") def test_as_from_dict(self): d = self.mp_scan_set.as_dict() v = dec.process_decoded(d) self.assertEqual(type(v), MPScanRelaxSet) self.assertEqual(v._config_dict["INCAR"]["METAGGA"], "R2SCAN") self.assertEqual(v.user_incar_settings["NSW"], 500) def test_write_input(self): self.mp_scan_set.write_input(".") self.assertTrue(os.path.exists("INCAR")) self.assertFalse(os.path.exists("KPOINTS")) self.assertTrue(os.path.exists("POTCAR")) self.assertTrue(os.path.exists("POSCAR")) for f in ["INCAR", "POSCAR", "POTCAR"]: os.remove(f) class MPScanStaticSetTest(PymatgenTest): def setUp(self): self.tmp = tempfile.mkdtemp() warnings.simplefilter("ignore") def test_init(self): # test inheriting from a previous SCAN relaxation prev_run = self.TEST_FILES_DIR / "scan_relaxation" vis = MPScanStaticSet.from_prev_calc(prev_calc_dir=prev_run) # check that StaticSet settings were applied self.assertEqual(vis.incar["NSW"], 0) self.assertEqual(vis.incar["LREAL"], False) self.assertEqual(vis.incar["LORBIT"], 11) self.assertEqual(vis.incar["LVHAR"], True) self.assertEqual(vis.incar["ISMEAR"], -5) # Check that ENCUT and other INCAR settings were inherited. self.assertEqual(vis.incar["ENCUT"], 680) self.assertEqual(vis.incar["METAGGA"], "R2scan") self.assertEqual(vis.incar["KSPACING"], 0.34292842) # Check as from dict. # check that StaticSet settings were applied self.assertEqual(vis.incar["NSW"], 0) self.assertEqual(vis.incar["LREAL"], False) self.assertEqual(vis.incar["LORBIT"], 11) self.assertEqual(vis.incar["LVHAR"], True) # Check that ENCUT and KSPACING were inherited. self.assertEqual(vis.incar["ENCUT"], 680) self.assertEqual(vis.incar["METAGGA"], "R2scan") self.assertEqual(vis.incar["KSPACING"], 0.34292842) non_prev_vis = MPScanStaticSet( vis.structure, user_incar_settings={"ENCUT": 800, "LORBIT": 12, "LWAVE": True}, ) # check that StaticSet settings were applied self.assertEqual(non_prev_vis.incar["NSW"], 0) self.assertEqual(non_prev_vis.incar["LREAL"], False) self.assertEqual(non_prev_vis.incar["LVHAR"], True) self.assertEqual(vis.incar["ISMEAR"], -5) # Check that ENCUT and other INCAR settings were inherited. self.assertEqual(non_prev_vis.incar["METAGGA"], "R2scan") # the KSPACING will have the default value here, since no previous calc self.assertEqual(non_prev_vis.incar["KSPACING"], 0.22) # Check that user incar settings are applied. self.assertEqual(non_prev_vis.incar["ENCUT"], 800) self.assertEqual(non_prev_vis.incar["LORBIT"], 12) self.assertTrue(non_prev_vis.incar["LWAVE"]) v2 = MPScanStaticSet.from_dict(non_prev_vis.as_dict()) # Check that user incar settings are applied. self.assertEqual(v2.incar["ENCUT"], 800) self.assertEqual(v2.incar["LORBIT"], 12) self.assertTrue(non_prev_vis.incar["LWAVE"]) # Check LCALCPOL flag lcalcpol_vis = MPScanStaticSet.from_prev_calc(prev_calc_dir=prev_run, lcalcpol=True) self.assertTrue(lcalcpol_vis.incar["LCALCPOL"]) # Check LEPSILON flag lepsilon_vis = MPScanStaticSet.from_prev_calc(prev_calc_dir=prev_run, lepsilon=True) self.assertTrue(lepsilon_vis.incar["LEPSILON"]) self.assertTrue(lepsilon_vis.incar["LPEAD"]) self.assertEqual(lepsilon_vis.incar["IBRION"], 8) self.assertIsNone(lepsilon_vis.incar.get("NSW")) self.assertIsNone(lepsilon_vis.incar.get("NPAR")) def test_override_from_prev_calc(self): # test override_from_prev prev_run = self.TEST_FILES_DIR / "scan_relaxation" vis = MPScanStaticSet(_dummy_structure) vis.override_from_prev_calc(prev_calc_dir=prev_run) # check that StaticSet settings were applied self.assertEqual(vis.incar["NSW"], 0) self.assertEqual(vis.incar["LREAL"], False) self.assertEqual(vis.incar["LORBIT"], 11) self.assertEqual(vis.incar["LVHAR"], True) self.assertEqual(vis.incar["ISMEAR"], -5) # Check that ENCUT and other INCAR settings were inherited. self.assertEqual(vis.incar["ENCUT"], 680) self.assertEqual(vis.incar["METAGGA"], "R2scan") self.assertEqual(vis.incar["KSPACING"], 0.34292842) # Check LCALCPOL flag lcalcpol_vis = MPScanStaticSet(_dummy_structure, lcalcpol=True) lcalcpol_vis = lcalcpol_vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertTrue(lcalcpol_vis.incar["LCALCPOL"]) # Check LEPSILON flag lepsilon_vis = MPScanStaticSet(_dummy_structure, lepsilon=True) lepsilon_vis = lepsilon_vis.override_from_prev_calc(prev_calc_dir=prev_run) self.assertTrue(lepsilon_vis.incar["LEPSILON"]) self.assertTrue(lepsilon_vis.incar["LPEAD"]) self.assertEqual(lepsilon_vis.incar["IBRION"], 8) self.assertIsNone(lepsilon_vis.incar.get("NSW")) self.assertIsNone(lepsilon_vis.incar.get("NPAR")) def test_conflicting_arguments(self): with pytest.raises(ValueError, match="deprecated"): si = self.get_structure("Si") vis = MPScanStaticSet(si, potcar_functional="PBE", user_potcar_functional="PBE") def tearDown(self): shutil.rmtree(self.tmp) warnings.simplefilter("default") class FuncTest(PymatgenTest): def test_batch_write_input(self): structures = [ PymatgenTest.get_structure("Li2O"), PymatgenTest.get_structure("LiFePO4"), ] batch_write_input(structures) for d in ["Li4Fe4P4O16_1", "Li2O1_0"]: for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR"]: self.assertTrue(os.path.exists(os.path.join(d, f))) for d in ["Li4Fe4P4O16_1", "Li2O1_0"]: shutil.rmtree(d) class MVLGBSetTest(PymatgenTest): def setUp(self): filepath = self.TEST_FILES_DIR / "Li.cif" self.s = Structure.from_file(filepath) self.bulk = MVLGBSet(self.s) self.slab = MVLGBSet(self.s, slab_mode=True) self.d_bulk = self.bulk.get_vasp_input() self.d_slab = self.slab.get_vasp_input() warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_bulk(self): incar_bulk = self.d_bulk["INCAR"] self.assertEqual(incar_bulk["ISIF"], 3) def test_slab(self): incar_slab = self.d_slab["INCAR"] self.assertEqual(incar_slab["ISIF"], 2) def test_kpoints(self): kpoints = self.d_slab["KPOINTS"] k_a = int(40 / (self.s.lattice.abc[0]) + 0.5) k_b = int(40 / (self.s.lattice.abc[1]) + 0.5) self.assertEqual(kpoints.kpts, [[k_a, k_b, 1]]) class MVLRelax52SetTest(PymatgenTest): def setUp(self): file_path = self.TEST_FILES_DIR / "POSCAR" poscar = Poscar.from_file(file_path) self.struct = poscar.structure self.mvl_rlx_set = MVLRelax52Set(self.struct, user_potcar_functional="PBE_54", user_incar_settings={"NSW": 500}) warnings.simplefilter("ignore") def tearDown(self): warnings.simplefilter("default") def test_incar(self): incar = self.mvl_rlx_set.incar self.assertIn("NSW", incar) self.assertEqual(incar["LREAL"], "Auto") def test_potcar(self): self.assertEqual(self.mvl_rlx_set.potcar.functional, "PBE_54") self.assertIn("Fe", self.mvl_rlx_set.potcar.symbols) self.struct.remove_species(["Fe"]) test_potcar_set_1 = MVLRelax52Set(self.struct, user_potcar_functional="PBE_52") self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_52") self.assertRaises(ValueError, MVLRelax52Set, self.struct, user_potcar_functional="PBE") def test_potcar_functional_warning(self): with pytest.warns(FutureWarning, match="argument is deprecated"): test_potcar_set_1 = MVLRelax52Set(self.struct, potcar_functional="PBE_52") def test_as_from_dict(self): d = self.mvl_rlx_set.as_dict() v = dec.process_decoded(d) self.assertEqual(type(v), MVLRelax52Set) self.assertEqual(v.incar["NSW"], 500) class LobsterSetTest(PymatgenTest): def setUp(self): file_path = self.TEST_FILES_DIR / "POSCAR" poscar = Poscar.from_file(file_path) self.struct = poscar.structure # test for different parameters! self.lobsterset1 = LobsterSet(self.struct, isym=-1, ismear=-5) self.lobsterset2 = LobsterSet(self.struct, isym=0, ismear=0) # only allow isym=-1 and isym=0 with self.assertRaises(ValueError): self.lobsterset_new = LobsterSet(self.struct, isym=2, ismear=0) # only allow ismear=-5 and ismear=0 with self.assertRaises(ValueError): self.lobsterset_new = LobsterSet(self.struct, isym=-1, ismear=2) # test if one can still hand over grid density of kpoints self.lobsterset3 = LobsterSet(self.struct, isym=0, ismear=0, user_kpoints_settings={"grid_density": 6000}) # check if users can overwrite settings in this class with the help of user_incar_settings self.lobsterset4 = LobsterSet(self.struct, user_incar_settings={"ALGO": "Fast"}) # use basis functions supplied by user self.lobsterset5 = LobsterSet( self.struct, user_supplied_basis={"Fe": "3d 3p 4s", "P": "3p 3s", "O": "2p 2s"}, ) with self.assertRaises(ValueError): self.lobsterset6 = LobsterSet(self.struct, user_supplied_basis={"Fe": "3d 3p 4s", "P": "3p 3s"}) self.lobsterset7 = LobsterSet( self.struct, address_basis_file=os.path.join(MODULE_DIR, "../../lobster/lobster_basis/BASIS_PBE_54_standard.yaml"), ) with pytest.warns(BadInputSetWarning, match="Overriding the POTCAR"): self.lobsterset6 = LobsterSet(self.struct) def test_incar(self): incar1 = self.lobsterset1.incar self.assertIn("NBANDS", incar1) self.assertEqual(incar1["NBANDS"], 116) self.assertEqual(incar1["NSW"], 0) self.assertEqual(incar1["ISMEAR"], -5) self.assertEqual(incar1["ISYM"], -1) self.assertEqual(incar1["ALGO"], "Normal") self.assertEqual(incar1["EDIFF"], 1e-6) incar2 = self.lobsterset2.incar self.assertEqual(incar2["ISYM"], 0) self.assertEqual(incar2["ISMEAR"], 0) incar4 = self.lobsterset4.incar self.assertEqual(incar4["ALGO"], "Fast") def test_kpoints(self): kpoints1 = self.lobsterset1.kpoints self.assertTrue(kpoints1.comment.split(" ")[6], 6138) kpoints2 = self.lobsterset2.kpoints self.assertTrue(kpoints2.comment.split(" ")[6], 6138) kpoints3 = self.lobsterset3.kpoints self.assertTrue(kpoints3.comment.split(" ")[6], 6000) def test_potcar(self): # PBE_54 is preferred at the moment self.assertEqual(self.lobsterset1.potcar_functional, "PBE_54") def test_as_from_dict(self): dict_here = self.lobsterset1.as_dict() lobsterset_new = LobsterSet.from_dict(dict_here) # test relevant parts again incar1 = lobsterset_new.incar self.assertIn("NBANDS", incar1) self.assertEqual(incar1["NBANDS"], 116) self.assertEqual(incar1["NSW"], 0) self.assertEqual(incar1["NSW"], 0) self.assertEqual(incar1["ISMEAR"], -5) self.assertEqual(incar1["ISYM"], -1) self.assertEqual(incar1["ALGO"], "Normal") kpoints1 = lobsterset_new.kpoints self.assertTrue(kpoints1.comment.split(" ")[6], 6138) self.assertEqual(lobsterset_new.potcar_functional, "PBE_54") _dummy_structure = Structure( [1, 0, 0, 0, 1, 0, 0, 0, 1], ["I"], [[0, 0, 0]], site_properties={"magmom": [[0, 0, 1]]}, ) if __name__ == "__main__": unittest.main()
richardtran415/pymatgen
pymatgen/io/vasp/tests/test_sets.py
Python
mit
66,049
# -*- coding: utf8 -*- # Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv> # This program is Free Software see LICENSE file for details import urllib import xbmc import xbmcaddon import xbmcgui import xbmcvfs import xbmcplugin import urllib2 import os import time import hashlib import simplejson import re import threading import datetime import codecs from functools import wraps ADDON = xbmcaddon.Addon() ADDON_ID = ADDON.getAddonInfo('id') ADDON_ICON = ADDON.getAddonInfo('icon') ADDON_NAME = ADDON.getAddonInfo('name') ADDON_PATH = ADDON.getAddonInfo('path').decode("utf-8") ADDON_VERSION = ADDON.getAddonInfo('version') ADDON_DATA_PATH = xbmc.translatePath("special://profile/addon_data/%s" % ADDON_ID).decode("utf-8") HOME = xbmcgui.Window(10000) SETTING = ADDON.getSetting def LANG(label_id): if 31000 <= label_id <= 33000: return ADDON.getLocalizedString(label_id) else: return xbmc.getLocalizedString(label_id) def run_async(func): """ Decorator to run a function in a separate thread """ @wraps(func) def async_func(*args, **kwargs): func_hl = threading.Thread(target=func, args=args, kwargs=kwargs) func_hl.start() return func_hl return async_func def busy_dialog(func): """ Decorator to show busy dialog while function is running Only one of the decorated functions may run simultaniously """ @wraps(func) def decorator(self, *args, **kwargs): xbmc.executebuiltin("ActivateWindow(busydialog)") result = func(self, *args, **kwargs) xbmc.executebuiltin("Dialog.Close(busydialog)") return result return decorator def dictfind(lst, key, value): for i, dic in enumerate(lst): if dic[key] == value: return dic return "" def format_time(time, format=None): """ get formatted time format = h, m or None """ try: intTime = int(time) except: return time hour = str(intTime / 60) minute = str(intTime % 60).zfill(2) if format == "h": return hour elif format == "m": return minute elif intTime >= 60: return hour + " h " + minute + " min" else: return minute + " min" def url_quote(input_string): """ get url-quoted string """ try: return urllib.quote_plus(input_string.encode('utf8', 'ignore')) except: return urllib.quote_plus(unicode(input_string, "utf-8").encode("utf-8")) def merge_dicts(*dict_args): ''' Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. ''' result = {} for dictionary in dict_args: result.update(dictionary) return result def check_version(): """ check version, open TextViewer if update detected """ from WindowManager import wm if not SETTING("changelog_version") == ADDON_VERSION: wm.open_textviewer(header=LANG(24036), text=read_from_file(os.path.join(ADDON_PATH, "changelog.txt"), True)) ADDON.setSetting("changelog_version", ADDON_VERSION) if not SETTING("first_start_infodialog"): ADDON.setSetting("first_start_infodialog", "True") xbmcgui.Dialog().ok(heading=ADDON_NAME, line1=LANG(32140), line2=LANG(32141)) def get_autocomplete_items(search_str): """ get dict list with autocomplete labels from google """ if SETTING("autocomplete_provider") == "youtube": return get_google_autocomplete_items(search_str, True) elif SETTING("autocomplete_provider") == "google": return get_google_autocomplete_items(search_str) else: return get_common_words_autocomplete_items(search_str) def get_google_autocomplete_items(search_str, youtube=False): """ get dict list with autocomplete labels from google """ if not search_str: return [] listitems = [] headers = {'User-agent': 'Mozilla/5.0'} base_url = "http://clients1.google.com/complete/" url = "search?hl=%s&q=%s&json=t&client=serp" % (SETTING("autocomplete_lang"), urllib.quote_plus(search_str)) if youtube: url += "&ds=yt" result = get_JSON_response(url=base_url + url, headers=headers, folder="Google") if result and len(result) > 1: for item in result[1]: li = {"label": item, "path": "plugin://script.extendedinfo/?info=selectautocomplete&&id=%s" % item} listitems.append(li) return listitems def get_common_words_autocomplete_items(search_str): """ get dict list with autocomplete labels from locally saved lists """ listitems = [] k = search_str.rfind(" ") if k >= 0: search_str = search_str[k + 1:] path = os.path.join(ADDON_PATH, "resources", "data", "common_%s.txt" % SETTING("autocomplete_lang_local")) with codecs.open(path, encoding="utf8") as f: for i, line in enumerate(f.readlines()): if not line.startswith(search_str) or len(line) <= 2: continue li = {"label": line, "path": "plugin://script.extendedinfo/?info=selectautocomplete&&id=%s" % line} listitems.append(li) if len(listitems) > 10: break return listitems def widget_selectdialog(filter=None, string_prefix="widget"): """ show dialog including all video media lists (for widget selection) and set strings PREFIX.path and PREFIX.label with chosen values """ movie = {"intheaters": "%s [I](RottenTomatoes)[/I]" % LANG(32042), "boxoffice": "%s [I](RottenTomatoes)[/I]" % LANG(32055), "opening": "%s [I](RottenTomatoes)[/I]" % LANG(32048), "comingsoon": "%s [I](RottenTomatoes)[/I]" % LANG(32043), "toprentals": "%s [I](RottenTomatoes)[/I]" % LANG(32056), "currentdvdreleases": "%s [I](RottenTomatoes)[/I]" % LANG(32049), "newdvdreleases": "%s [I](RottenTomatoes)[/I]" % LANG(32053), "upcomingdvds": "%s [I](RottenTomatoes)[/I]" % LANG(32054), # tmdb "incinemas": "%s [I](TheMovieDB)[/I]" % LANG(32042), "upcoming": "%s [I](TheMovieDB)[/I]" % LANG(32043), "topratedmovies": "%s [I](TheMovieDB)[/I]" % LANG(32046), "popularmovies": "%s [I](TheMovieDB)[/I]" % LANG(32044), "accountlists": "%s [I](TheMovieDB)[/I]" % LANG(32045), # trakt "trendingmovies": "%s [I](Trakt.tv)[/I]" % LANG(32047), # tmdb "starredmovies": "%s [I](TheMovieDB)[/I]" % LANG(32134), "ratedmovies": "%s [I](TheMovieDB)[/I]" % LANG(32135), } tvshow = {"airingshows": "%s [I](Trakt.tv)[/I]" % LANG(32028), "premiereshows": "%s [I](Trakt.tv)[/I]" % LANG(32029), "trendingshows": "%s [I](Trakt.tv)[/I]" % LANG(32032), "airingtodaytvshows": "%s [I](TheMovieDB)[/I]" % LANG(32038), "onairtvshows": "%s [I](TheMovieDB)[/I]" % LANG(32039), "topratedtvshows": "%s [I](TheMovieDB)[/I]" % LANG(32040), "populartvshows": "%s [I](TheMovieDB)[/I]" % LANG(32041), "starredtvshows": "%s [I](TheMovieDB)[/I]" % LANG(32144), "ratedtvshows": "%s [I](TheMovieDB)[/I]" % LANG(32145), } image = {"xkcd": "XKCD webcomics", "cyanide": "Cyanide & Happiness webcomics", "dailybabe": "%s" % LANG(32057), "dailybabes": "%s" % LANG(32058), } # popularpeople artist = {"topartists": "LastFM: Top artists", "hypedartists": "LastFM: Hyped artists" } event = {} if True: listitems = merge_dicts(movie, tvshow, image, artist, event) keywords = [key for key in listitems.keys()] labels = [label for label in listitems.values()] ret = xbmcgui.Dialog().select(LANG(32151), labels) if ret > -1: notify(keywords[ret]) xbmc.executebuiltin("Skin.SetString(%s.path,plugin://script.extendedinfo?info=%s)" % (string_prefix, keywords[ret])) xbmc.executebuiltin("Skin.SetString(%s.label,%s)" % (string_prefix, labels[ret])) class SettingsMonitor(xbmc.Monitor): def __init__(self): xbmc.Monitor.__init__(self) def onSettingsChanged(self): xbmc.sleep(300) def calculate_age(born, died=False): """ calculate age based on born / died display notification for birthday return death age when already dead """ if died: ref_day = died.split("-") elif born: date = datetime.date.today() ref_day = [date.year, date.month, date.day] else: return "" actor_born = born.split("-") base_age = int(ref_day[0]) - int(actor_born[0]) if len(actor_born) > 1: diff_months = int(ref_day[1]) - int(actor_born[1]) diff_days = int(ref_day[2]) - int(actor_born[2]) if diff_months < 0 or (diff_months == 0 and diff_days < 0): base_age -= 1 elif diff_months == 0 and diff_days == 0 and not died: notify("%s (%i)" % (LANG(32158), base_age)) return base_age def get_playlist_stats(path): start_index = -1 end_index = -1 if (".xsp" in path) and ("special://" in path): start_index = path.find("special://") end_index = path.find(".xsp") + 4 elif ("library://" in path): start_index = path.find("library://") end_index = path.rfind("/") + 1 elif ("videodb://" in path): start_index = path.find("videodb://") end_index = path.rfind("/") + 1 if (start_index > 0) and (end_index > 0): playlist_path = path[start_index:end_index] json_response = get_kodi_json(method="Files.GetDirectory", params='{"directory": "%s", "media": "video", "properties": ["playcount", "resume"]}' % playlist_path) if "result" in json_response: played = 0 in_progress = 0 numitems = json_response["result"]["limits"]["total"] for item in json_response["result"]["files"]: if "playcount" in item: if item["playcount"] > 0: played += 1 if item["resume"]["position"] > 0: in_progress += 1 HOME.setProperty('PlaylistWatched', str(played)) HOME.setProperty('PlaylistUnWatched', str(numitems - played)) HOME.setProperty('PlaylistInProgress', str(in_progress)) HOME.setProperty('PlaylistCount', str(numitems)) def get_sort_letters(path, focused_letter): """ create string including all sortletters and put it into home window property "LetterList" """ listitems = [] letter_list = [] HOME.clearProperty("LetterList") if SETTING("FolderPath") == path: letter_list = SETTING("LetterList").split() elif path: json_response = get_kodi_json(method="Files.GetDirectory", params='{"directory": "%s", "media": "files"}' % path) if "result" in json_response and "files" in json_response["result"]: for movie in json_response["result"]["files"]: cleaned_label = movie["label"].replace("The ", "") if cleaned_label: sortletter = cleaned_label[0] if sortletter not in letter_list: letter_list.append(sortletter) ADDON.setSetting("LetterList", " ".join(letter_list)) ADDON.setSetting("FolderPath", path) HOME.setProperty("LetterList", "".join(letter_list)) if not letter_list or not focused_letter: return None start_ord = ord("A") for i in range(0, 26): letter = chr(start_ord + i) if letter == focused_letter: label = "[B][COLOR FFFF3333]%s[/COLOR][/B]" % letter elif letter in letter_list: label = letter else: label = "[COLOR 55FFFFFF]%s[/COLOR]" % letter listitems.append({"label": label}) return listitems def millify(n): """ make large numbers human-readable, return string """ millnames = [' ', '.000', ' ' + LANG(32000), ' ' + LANG(32001), ' ' + LANG(32002)] if not n or n <= 100: return "" n = float(n) char_count = len(str(n)) millidx = (char_count / 3) - 1 if millidx == 3 or char_count == 9: return '%.2f%s' % (n / 10 ** (3 * millidx), millnames[millidx]) else: return '%.0f%s' % (n / 10 ** (3 * millidx), millnames[millidx]) def media_streamdetails(filename, streamdetails): info = {} video = streamdetails['video'] audio = streamdetails['audio'] info['VideoCodec'] = '' info['VideoAspect'] = '' info['VideoResolution'] = '' info['AudioCodec'] = '' info['AudioChannels'] = '' if video: if (video[0]['width'] <= 720 and video[0]['height'] <= 480): info['VideoResolution'] = "480" elif (video[0]['width'] <= 768 and video[0]['height'] <= 576): info['VideoResolution'] = "576" elif (video[0]['width'] <= 960 and video[0]['height'] <= 544): info['VideoResolution'] = "540" elif (video[0]['width'] <= 1280 and video[0]['height'] <= 720): info['VideoResolution'] = "720" elif (video[0]['width'] >= 1281 or video[0]['height'] >= 721): info['VideoResolution'] = "1080" else: info['videoresolution'] = "" info['VideoCodec'] = str(video[0]['codec']) if (video[0]['aspect'] < 1.4859): info['VideoAspect'] = "1.33" elif (video[0]['aspect'] < 1.7190): info['VideoAspect'] = "1.66" elif (video[0]['aspect'] < 1.8147): info['VideoAspect'] = "1.78" elif (video[0]['aspect'] < 2.0174): info['VideoAspect'] = "1.85" elif (video[0]['aspect'] < 2.2738): info['VideoAspect'] = "2.20" else: info['VideoAspect'] = "2.35" elif (('dvd') in filename and not ('hddvd' or 'hd-dvd') in filename) or (filename.endswith('.vob' or '.ifo')): info['VideoResolution'] = '576' elif (('bluray' or 'blu-ray' or 'brrip' or 'bdrip' or 'hddvd' or 'hd-dvd') in filename): info['VideoResolution'] = '1080' if audio: info['AudioCodec'] = audio[0]['codec'] info['AudioChannels'] = audio[0]['channels'] return info def fetch(dictionary, key): if key in dictionary: if dictionary[key] is not None: return dictionary[key] return "" def get_year(year_string): """ return last 4 chars of string """ if year_string and len(year_string) > 3: return year_string[:4] else: return "" def fetch_musicbrainz_id(artist, artist_id=-1): """ fetches MusicBrainz ID for given *artist and returns it uses musicbrainz.org """ base_url = "http://musicbrainz.org/ws/2/artist/?fmt=json" url = '&query=artist:%s' % urllib.quote_plus(artist) results = get_JSON_response(url=base_url + url, cache_days=30, folder="MusicBrainz") if results and len(results["artists"]) > 0: log("found artist id for %s: %s" % (artist.decode("utf-8"), results["artists"][0]["id"])) return results["artists"][0]["id"] else: return None def get_http(url=None, headers=False): """ fetches data from *url, returns it as a string """ succeed = 0 if not headers: headers = {'User-agent': 'XBMC/14.0 ( phil65@kodi.tv )'} request = urllib2.Request(url) for (key, value) in headers.iteritems(): request.add_header(key, value) while (succeed < 2) and (not xbmc.abortRequested): try: response = urllib2.urlopen(request, timeout=3) data = response.read() return data except: log("get_http: could not get data from %s" % url) xbmc.sleep(1000) succeed += 1 return None def get_JSON_response(url="", cache_days=7.0, folder=False, headers=False): """ get JSON response for *url, makes use of prop and file cache. """ now = time.time() hashed_url = hashlib.md5(url).hexdigest() if folder: cache_path = xbmc.translatePath(os.path.join(ADDON_DATA_PATH, folder)) else: cache_path = xbmc.translatePath(os.path.join(ADDON_DATA_PATH)) path = os.path.join(cache_path, hashed_url + ".txt") cache_seconds = int(cache_days * 86400.0) prop_time = HOME.getProperty(hashed_url + "_timestamp") if prop_time and now - float(prop_time) < cache_seconds: try: prop = simplejson.loads(HOME.getProperty(hashed_url)) log("prop load for %s. time: %f" % (url, time.time() - now)) return prop except: log("could not load prop data for %s" % url) if xbmcvfs.exists(path) and ((now - os.path.getmtime(path)) < cache_seconds): results = read_from_file(path) log("loaded file for %s. time: %f" % (url, time.time() - now)) else: response = get_http(url, headers) try: results = simplejson.loads(response) log("download %s. time: %f" % (url, time.time() - now)) save_to_file(results, hashed_url, cache_path) except: log("Exception: Could not get new JSON data from %s. Tryin to fallback to cache" % url) log(response) if xbmcvfs.exists(path): results = read_from_file(path) else: results = [] HOME.setProperty(hashed_url + "_timestamp", str(now)) HOME.setProperty(hashed_url, simplejson.dumps(results)) return results class FunctionThread(threading.Thread): def __init__(self, function=None, param=None): threading.Thread.__init__(self) self.function = function self.param = param self.setName(self.function.__name__) log("init " + self.function.__name__) def run(self): self.listitems = self.function(self.param) return True class GetFileThread(threading.Thread): def __init__(self, url): threading.Thread.__init__(self) self.url = url def run(self): self.file = get_file(self.url) def get_file(url): clean_url = xbmc.translatePath(urllib.unquote(url)).replace("image://", "") if clean_url.endswith("/"): clean_url = clean_url[:-1] cached_thumb = xbmc.getCacheThumbName(clean_url) vid_cache_file = os.path.join("special://profile/Thumbnails/Video", cached_thumb[0], cached_thumb) cache_file_jpg = os.path.join("special://profile/Thumbnails/", cached_thumb[0], cached_thumb[:-4] + ".jpg").replace("\\", "/") cache_file_png = cache_file_jpg[:-4] + ".png" if xbmcvfs.exists(cache_file_jpg): log("cache_file_jpg Image: " + url + "-->" + cache_file_jpg) return xbmc.translatePath(cache_file_jpg) elif xbmcvfs.exists(cache_file_png): log("cache_file_png Image: " + url + "-->" + cache_file_png) return cache_file_png elif xbmcvfs.exists(vid_cache_file): log("vid_cache_file Image: " + url + "-->" + vid_cache_file) return vid_cache_file try: request = urllib2.Request(url) request.add_header('Accept-encoding', 'gzip') response = urllib2.urlopen(request, timeout=3) data = response.read() response.close() log('image downloaded: ' + url) except: log('image download failed: ' + url) return "" if not data: return "" if url.endswith(".png"): image = cache_file_png else: image = cache_file_jpg try: with open(xbmc.translatePath(image), "wb") as f: f.write(data) return xbmc.translatePath(image) except: log('failed to save image ' + url) return "" def get_favs_by_type(fav_type): """ returns dict list containing favourites with type *fav_type """ favs = get_favs() return [fav for fav in favs if fav["Type"] == fav_type] def get_fav_path(fav): if fav["type"] == "media": return "PlayMedia(%s)" % (fav["path"]) elif fav["type"] == "script": return "RunScript(%s)" % (fav["path"]) elif "window" in fav and "windowparameter" in fav: return "ActivateWindow(%s,%s)" % (fav["window"], fav["windowparameter"]) else: log("error parsing favs") def get_favs(): """ returns dict list containing favourites """ items = [] json_response = get_kodi_json(method="Favourites.GetFavourites", params='{"type": null, "properties": ["path", "thumbnail", "window", "windowparameter"]}') if "result" not in json_response or json_response["result"]["limits"]["total"] == 0: return [] for fav in json_response["result"]["favourites"]: path = get_fav_path(fav) newitem = {'Label': fav["title"], 'thumb': fav["thumbnail"], 'Type': fav["type"], 'Builtin': path, 'path': "plugin://script.extendedinfo/?info=action&&id=" + path} items.append(newitem) return items def get_icon_panel(number): """ get icon panel with index *number, returns dict list based on skin strings """ items = [] offset = number * 5 - 5 for i in range(1, 6): infopanel_path = xbmc.getInfoLabel("Skin.String(IconPanelItem%i.Path)" % (i + offset)) newitem = {'Label': xbmc.getInfoLabel("Skin.String(IconPanelItem%i.Label)" % (i + offset)).decode("utf-8"), 'path': "plugin://script.extendedinfo/?info=action&&id=" + infopanel_path.decode("utf-8"), 'thumb': xbmc.getInfoLabel("Skin.String(IconPanelItem%i.Icon)" % (i + offset)).decode("utf-8"), 'id': "IconPanelitem%i" % (i + offset), 'Type': xbmc.getInfoLabel("Skin.String(IconPanelItem%i.Type)" % (i + offset)).decode("utf-8")} items.append(newitem) return items def get_weather_images(): items = [] for i in range(1, 6): newitem = {'Label': "bla", 'path': "plugin://script.extendedinfo/?info=action&&id=SetFocus(22222)", 'thumb': xbmc.getInfoLabel("Window(weather).Property(Map.%i.Area)" % i), 'Layer': xbmc.getInfoLabel("Window(weather).Property(Map.%i.Layer)" % i), 'Legend': xbmc.getInfoLabel("Window(weather).Property(Map.%i.Legend)" % i)} items.append(newitem) return items def log(txt): if isinstance(txt, str): txt = txt.decode("utf-8", 'ignore') message = u'%s: %s' % (ADDON_ID, txt) xbmc.log(msg=message.encode("utf-8", 'ignore'), level=xbmc.LOGDEBUG) def get_browse_dialog(default="", heading=LANG(1024), dlg_type=3, shares="files", mask="", use_thumbs=False, treat_as_folder=False): dialog = xbmcgui.Dialog() value = dialog.browse(dlg_type, heading, shares, mask, use_thumbs, treat_as_folder, default) return value def save_to_file(content, filename, path=""): """ dump json and save to *filename in *path """ if path == "": text_file_path = get_browse_dialog() + filename + ".txt" else: if not xbmcvfs.exists(path): xbmcvfs.mkdirs(path) text_file_path = os.path.join(path, filename + ".txt") now = time.time() text_file = xbmcvfs.File(text_file_path, "w") simplejson.dump(content, text_file) text_file.close() log("saved textfile %s. Time: %f" % (text_file_path, time.time() - now)) return True def read_from_file(path="", raw=False): """ return data from file with *path """ if path == "": path = get_browse_dialog(dlg_type=1) if not xbmcvfs.exists(path): return False try: with open(path) as f: log("opened textfile %s." % (path)) if not raw: result = simplejson.load(f) else: result = f.read() return result except: log("failed to load textfile: " + path) return False def convert_youtube_url(raw_string): youtube_id = extract_youtube_id(raw_string) if youtube_id: return 'plugin://script.extendedinfo/?info=youtubevideo&&id=%s' % youtube_id return "" def extract_youtube_id(raw_string): vid_ids = None if raw_string and 'youtube.com/v' in raw_string: vid_ids = re.findall('http://www.youtube.com/v/(.{11})\??', raw_string, re.DOTALL) elif raw_string and 'youtube.com/watch' in raw_string: vid_ids = re.findall('youtube.com/watch\?v=(.{11})\??', raw_string, re.DOTALL) if vid_ids: return vid_ids[0] else: return "" def notify(header="", message="", icon=ADDON_ICON, time=5000, sound=True): xbmcgui.Dialog().notification(heading=header, message=message, icon=icon, time=time, sound=sound) def get_kodi_json(method, params): json_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "%s", "params": %s, "id": 1}' % (method, params)) json_query = unicode(json_query, 'utf-8', errors='ignore') return simplejson.loads(json_query) def prettyprint(string): log(simplejson.dumps(string, sort_keys=True, indent=4, separators=(',', ': '))) def pass_dict_to_skin(data=None, prefix="", debug=False, precache=False, window_id=10000): window = xbmcgui.Window(window_id) if not data: return None threads = [] image_requests = [] for (key, value) in data.iteritems(): if not value: continue value = unicode(value) if precache: if value.startswith("http") and (value.endswith(".jpg") or value.endswith(".png")): if value not in image_requests and value: thread = GetFileThread(value) threads += [thread] thread.start() image_requests.append(value) window.setProperty('%s%s' % (prefix, str(key)), value) if debug: log('%s%s' % (prefix, str(key)) + value) for x in threads: x.join() def merge_dict_lists(items, key="job"): crew_id_list = [] crew_list = [] for item in items: if item["id"] not in crew_id_list: crew_id_list.append(item["id"]) crew_list.append(item) else: index = crew_id_list.index(item["id"]) if key in crew_list[index]: crew_list[index][key] = crew_list[index][key] + " / " + item[key] return crew_list def pass_list_to_skin(name="", data=[], prefix="", handle=None, limit=False): if data and limit and int(limit) < len(data): data = data[:int(limit)] if not handle: set_window_props(name, data, prefix) return None HOME.clearProperty(name) if data: HOME.setProperty(name + ".Count", str(len(data))) items = create_listitems(data) itemlist = [(item.getProperty("path"), item, bool(item.getProperty("directory"))) for item in items] xbmcplugin.addDirectoryItems(handle=handle, items=itemlist, totalItems=len(itemlist)) xbmcplugin.endOfDirectory(handle) def set_window_props(name, data, prefix="", debug=False): if not data: HOME.setProperty('%s%s.Count' % (prefix, name), '0') log("%s%s.Count = None" % (prefix, name)) return None for (count, result) in enumerate(data): if debug: log("%s%s.%i = %s" % (prefix, name, count + 1, str(result))) for (key, value) in result.iteritems(): value = unicode(value) HOME.setProperty('%s%s.%i.%s' % (prefix, name, count + 1, str(key)), value) if key.lower() in ["poster", "banner", "fanart", "clearart", "clearlogo", "landscape", "discart", "characterart", "tvshow.fanart", "tvshow.poster", "tvshow.banner", "tvshow.clearart", "tvshow.characterart"]: HOME.setProperty('%s%s.%i.Art(%s)' % (prefix, name, count + 1, str(key)), value) if debug: log('%s%s.%i.%s --> ' % (prefix, name, count + 1, str(key)) + value) HOME.setProperty('%s%s.Count' % (prefix, name), str(len(data))) def create_listitems(data=None, preload_images=0): INT_INFOLABELS = ["year", "episode", "season", "top250", "tracknumber", "playcount", "overlay"] FLOAT_INFOLABELS = ["rating"] STRING_INFOLABELS = ["genre", "director", "mpaa", "plot", "plotoutline", "title", "originaltitle", "sorttitle", "duration", "studio", "tagline", "writer", "tvshowtitle", "premiered", "status", "code", "aired", "credits", "lastplayed", "album", "votes", "trailer", "dateadded"] if not data: return [] itemlist = [] threads = [] image_requests = [] for (count, result) in enumerate(data): listitem = xbmcgui.ListItem('%s' % (str(count))) for (key, value) in result.iteritems(): if not value: continue value = unicode(value) if count < preload_images: if value.startswith("http://") and (value.endswith(".jpg") or value.endswith(".png")): if value not in image_requests: thread = GetFileThread(value) threads += [thread] thread.start() image_requests.append(value) if key.lower() in ["name", "label"]: listitem.setLabel(value) elif key.lower() in ["label2"]: listitem.setLabel2(value) elif key.lower() in ["title"]: listitem.setLabel(value) listitem.setInfo('video', {key.lower(): value}) elif key.lower() in ["thumb"]: listitem.setThumbnailImage(value) listitem.setArt({key.lower(): value}) elif key.lower() in ["icon"]: listitem.setIconImage(value) listitem.setArt({key.lower(): value}) elif key.lower() in ["path"]: listitem.setPath(path=value) # listitem.setProperty('%s' % (key), value) # elif key.lower() in ["season", "episode"]: # listitem.setInfo('video', {key.lower(): int(value)}) # listitem.setProperty('%s' % (key), value) elif key.lower() in ["poster", "banner", "fanart", "clearart", "clearlogo", "landscape", "discart", "characterart", "tvshow.fanart", "tvshow.poster", "tvshow.banner", "tvshow.clearart", "tvshow.characterart"]: listitem.setArt({key.lower(): value}) elif key.lower() in INT_INFOLABELS: try: listitem.setInfo('video', {key.lower(): int(value)}) except: pass elif key.lower() in STRING_INFOLABELS: listitem.setInfo('video', {key.lower(): value}) elif key.lower() in FLOAT_INFOLABELS: try: listitem.setInfo('video', {key.lower(): "%1.1f" % float(value)}) except: pass # else: listitem.setProperty('%s' % (key), value) listitem.setProperty("index", str(count)) itemlist.append(listitem) for x in threads: x.join() return itemlist def clean_text(text): if not text: return "" text = re.sub('(From Wikipedia, the free encyclopedia)|(Description above from the Wikipedia.*?Wikipedia)', '', text) text = re.sub('<(.|\n|\r)*?>', '', text) text = text.replace('<br \/>', '[CR]') text = text.replace('<em>', '[I]').replace('</em>', '[/I]') text = text.replace('&amp;', '&') text = text.replace('&gt;', '>').replace('&lt;', '<') text = text.replace('&#39;', "'").replace('&quot;', '"') text = re.sub("\n\\.$", "", text) text = text.replace('User-contributed text is available under the Creative Commons By-SA License and may also be available under the GNU FDL.', '') while text: s = text[0] e = text[-1] if s in [u'\u200b', " ", "\n"]: text = text[1:] elif e in [u'\u200b', " ", "\n"]: text = text[:-1] elif s.startswith(".") and not s.startswith(".."): text = text[1:] else: break return text.strip()
chimkentec/KodiMODo_rep
script.extendedinfo/resources/lib/Utils.py
Python
gpl-3.0
33,286
import os import unittest import elffiles class TestFileOperations(unittest.TestCase): def testFinalSlash01(self): testPath = "C:\\Test" actual = elffiles.ensureFinalSlash(testPath) self.assertEqual(actual, testPath + os.sep) def testFinalSlash02(self): testPath = "C:\\Test\\" actual = elffiles.ensureFinalSlash(testPath) self.assertEqual(actual, testPath) def testFinalSlash03(self): testPath = "C:/Test/" actual = elffiles.ensureFinalSlash(testPath) self.assertEqual(actual, testPath + os.sep) def testFinalSlash04(self): testPath = "Test/" actual = elffiles.ensureFinalSlash(testPath) self.assertEqual(actual, testPath + os.sep) def testFinalSlash05(self): testPath = "Test" actual = elffiles.ensureFinalSlash(testPath) self.assertEqual(actual, testPath + os.sep) def removeFolder(self, folder): elffiles.removeDirs(folder) self.assertFalse(os.path.exists(folder)) def testEnsureDir01(self): testPath = elffiles.ensureFinalSlash(os.environ['ELVENWARE']) + "Bar" elffiles.ensureDir(testPath) self.assertTrue(os.path.exists(testPath)) self.removeFolder(testPath) def testEnsureDir02(self): testPath = elffiles.ensureFinalSlash(os.environ['ELVENWARE']) + "Bar/Foo/Goober" elffiles.ensureDir(testPath) self.assertTrue(os.path.exists(testPath)) self.removeFolder(elffiles.ensureFinalSlash(os.environ['ELVENWARE']) + "Bar") if __name__ == '__main__': unittest.main()
donlee888/JsObjects
Python/PythonUtils/elfutils/UnitTest01.py
Python
mit
1,445
#!/usr/bin/python # -*- coding: UTF-8 -*- ''' # Formatting decimal places #0题目地址:https://www.codewars.com/kata/5641a03210e973055a00000d/train/python ''' import unittest class TestCases(unittest.TestCase): def setUp(self): pass def test1(self):self.assertEqual(two_decimal_places(4.659725356), 4.66, "didn't work for 4.659725356") def test2(self):self.assertEqual(two_decimal_places(173735326.3783732637948948), 173735326.38, "didn't work for 173735326.3783732637948948") def test3(self):self.assertEqual(two_decimal_places(4.653725356), 4.65, "didn't work for 4.653725356") def test4(self):self.assertEqual(two_decimal_places(2.675), 2.68, "worked for 2.675") def two_decimal_places(n): return round(n, 2) if __name__ == '__main__': unittest.main() ''' 参考解法: '''
karchi/codewars_kata
已完成/Formatting decimal places #0.py
Python
mit
844
from djpcms import sites if sites.settings.CMS_ORM == 'django': from djpcms.core.cmsmodels._django import * elif sites.settings.CMS_ORM == 'stdnet': from djpcms.core.cmsmodels._stdnet import * else: raise NotImplementedError('Objecr Relational Mapper {0} not available for CMS models'.format(sites.settings.CMS_ORM))
strogo/djpcms
djpcms/models.py
Python
bsd-3-clause
354
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Wrapper for Python TPU tests. The py_tpu_test macro will actually use this file as its main, building and executing the user-provided test file as a py_binary instead. This lets us do important work behind the scenes, without complicating the tests themselves. The main responsibilities of this file are: - Define standard set of model flags if test did not. This allows us to safely set flags at the Bazel invocation level using --test_arg. - Pick a random directory on GCS to use for each test case, and set it as the default value of --model_dir. This is similar to how Bazel provides each test with a fresh local directory in $TEST_TMPDIR. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import importlib import os import sys import uuid from tensorflow.python.platform import flags from tensorflow.python.util import tf_inspect FLAGS = flags.FLAGS flags.DEFINE_string( 'wrapped_tpu_test_module_relative', None, 'The Python-style relative path to the user-given test. If test is in same ' 'directory as BUILD file as is common, then "test.py" would be ".test".') flags.DEFINE_string('test_dir_base', os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR'), 'GCS path to root directory for temporary test files.') flags.DEFINE_string( 'bazel_repo_root', 'tensorflow/python', 'Substring of a bazel filepath beginning the python absolute import path.') # List of flags which all TPU tests should accept. REQUIRED_FLAGS = ['tpu', 'zone', 'project', 'model_dir'] def maybe_define_flags(): """Defines any required flags that are missing.""" for f in REQUIRED_FLAGS: try: flags.DEFINE_string(f, None, 'flag defined by test lib') except flags.DuplicateFlagError: pass def set_random_test_dir(): """Pick a random GCS directory under --test_dir_base, set as --model_dir.""" path = os.path.join(FLAGS.test_dir_base, uuid.uuid4().hex) FLAGS.set_default('model_dir', path) def calculate_parent_python_path(test_filepath): """Returns the absolute import path for the containing directory. Args: test_filepath: The filepath which Bazel invoked (ex: /filesystem/path/tensorflow/tensorflow/python/tpu/tpu_test) Returns: Absolute import path of parent (ex: tensorflow.python.tpu). Raises: ValueError: if bazel_repo_root does not appear within test_filepath. """ # We find the last occurrence of bazel_repo_root, and drop everything before. split_path = test_filepath.rsplit(FLAGS.bazel_repo_root, 1) if len(split_path) < 2: raise ValueError('Filepath "%s" does not contain repo root "%s"' % (test_filepath, FLAGS.bazel_repo_root)) path = FLAGS.bazel_repo_root + split_path[1] # We drop the last portion of the path, which is the name of the test wrapper. path = path.rsplit('/', 1)[0] # We convert the directory separators into dots. return path.replace('/', '.') def import_user_module(): """Imports the flag-specified user test code. This runs all top-level statements in the user module, specifically flag definitions. Returns: The user test module. """ return importlib.import_module(FLAGS.wrapped_tpu_test_module_relative, calculate_parent_python_path(sys.argv[0])) def _is_test_class(obj): """Check if arbitrary object is a test class (not a test object!). Args: obj: An arbitrary object from within a module. Returns: True iff obj is a test class inheriting at some point from a module named "TestCase". This is because we write tests using different underlying test libraries. """ return (tf_inspect.isclass(obj) and 'TestCase' in (p.__name__ for p in tf_inspect.getmro(obj))) module_variables = vars() def move_test_classes_into_scope(wrapped_test_module): """Add all test classes defined in wrapped module to our module. The test runner works by inspecting the main module for TestCase classes, so by adding a module-level reference to the TestCase we cause it to execute the wrapped TestCase. Args: wrapped_test_module: The user-provided test code to run. """ for name, obj in wrapped_test_module.__dict__.items(): if _is_test_class(obj): module_variables['tpu_test_imported_%s' % name] = obj def run_user_main(wrapped_test_module): """Runs the "if __name__ == '__main__'" at the bottom of a module. TensorFlow practice is to have a main if at the bottom of the module which might call an API compat function before calling test.main(). Since this is a statement, not a function, we can't cleanly reference it, but we can inspect it from the user module and run it in the context of that module so all imports and variables are available to it. Args: wrapped_test_module: The user-provided test code to run. Raises: NotImplementedError: If main block was not found in module. This should not be caught, as it is likely an error on the user's part -- absltest is all too happy to report a successful status (and zero tests executed) if a user forgets to end a class with "test.main()". """ tree = ast.parse(tf_inspect.getsource(wrapped_test_module)) # Get string representation of just the condition `__name == "__main__"`. target = ast.dump(ast.parse('if __name__ == "__main__": pass').body[0].test) # `tree.body` is a list of top-level statements in the module, like imports # and class definitions. We search for our main block, starting from the end. for expr in reversed(tree.body): if isinstance(expr, ast.If) and ast.dump(expr.test) == target: break else: raise NotImplementedError( 'Could not find `if __name__ == "main":` block in %s.' % wrapped_test_module.__name__) # expr is defined because we would have raised an error otherwise. new_ast = ast.Module(body=expr.body, type_ignores=[]) # pylint:disable=undefined-loop-variable exec( # pylint:disable=exec-used compile(new_ast, '<ast>', 'exec'), globals(), wrapped_test_module.__dict__, ) if __name__ == '__main__': # Partially parse flags, since module to import is specified by flag. unparsed = FLAGS(sys.argv, known_only=True) user_module = import_user_module() maybe_define_flags() # Parse remaining flags. FLAGS(unparsed) set_random_test_dir() move_test_classes_into_scope(user_module) run_user_main(user_module)
gunan/tensorflow
tensorflow/python/tpu/tpu_test_wrapper.py
Python
apache-2.0
7,198
#!/usr/bin/env python from argparse import ArgumentParser from getpass import getpass from sys import stderr from timeit import timeit from uuid import uuid4 from os import fork from rdflib import Graph, BNode from rdflib import Literal from rdfrest.cores.http_client import set_http_option, add_http_credentials from ktbs.client import get_ktbs from ktbs.engine.service import make_ktbs from ktbs.namespace import KTBS from ktbs.config import get_ktbs_configuration ARGS = None BASE = None def parse_args(): global ARGS parser = ArgumentParser("kTBS stress tool") parser.add_argument("-k", "--ktbs", help="the URI of the kTBS to stress (a local " "in-memory kTBS will be used if none is giveb)") parser.add_argument("-u", "--username", help="the username to use (will also prompt for a password)") parser.add_argument("-f", "--forks", type=int, default=1, help="the number of processes to fork") parser.add_argument("-i", "--iterations", type=int, default=10, help="the number of iterations to run") parser.add_argument("-p", "--nbpost", type=int, default=100, help="the number of post to perform at each iteration") parser.add_argument("-o", "--nbobs", type=int, default=1, help="the number of obsels to send per post") parser.add_argument("-U", "--uuid", action="store_true", help="generate UUID for obsels") parser.add_argument("-c", "--cold-start", type=int, default=0, help="the number of iterations to ignore in the average") parser.add_argument("--no-clean", action="store_true", help="if set, do not clean kTBS after stressing") ARGS = parser.parse_args() def setUp(): global ARGS, BASE if ARGS.ktbs is None: my_ktbs = make_ktbs() ARGS.ktbs = my_ktbs.uri elif ARGS.ktbs.startswith("file://"): ktbs_config = get_ktbs_configuration() ktbs_config.set('rdf_database', 'repository', ARGS.ktbs[7:]) my_ktbs = make_ktbs(ktbs_config) ARGS.ktbs = my_ktbs.uri else: my_ktbs = get_ktbs(ARGS.ktbs) BASE = my_ktbs.create_base(label="stress-ktbs-base") model = BASE.create_model("m") model.unit = KTBS.millisecond BASE.create_stored_trace("t/", "m", "2012-09-06T00:00:00Z", default_subject="Alice") def task(): trace = BASE.get("t/") print("Stressing %s %s times with %sx%s obsels" % ( ARGS.ktbs, ARGS.iterations, ARGS.nbpost, ARGS.nbobs)) p = ARGS.nbpost*ARGS.nbobs results = [] for i in range(ARGS.iterations): def create_P_obsels(): for j in range(ARGS.nbpost): if ARGS.nbobs == 1: if ARGS.uuid: obs_id = str(uuid4()) else: obs_id = None trace.create_obsel(obs_id, "#obsel", subject="Alice", no_return=True) else: g = Graph() for k in range(ARGS.nbobs): if ARGS.uuid: obs = URIRef(str(uuid4()), trace.uri) else: obs = BNode() g.add((obs, KTBS.hasTrace, trace.uri)) g.add((obs, KTBS.hasBegin, Literal(i*ARGS.nbpost + j*ARGS.nbobs + k))) g.add((obs, KTBS.hasSubject, Literal("Alice"))) trace.post_graph(g) res = timeit(create_P_obsels, number=1) print("%.3fs \t%.2f obs/s" % (res, p/res)) results.append(res) results = results[ARGS.cold_start:] print("average: %.6fs \t%.4f obs/sec" % ( sum(results)/len(results), (p*len(results)/sum(results)), )) def tearDown(): if not ARGS.no_clean: BASE.get("t/").delete() BASE.get("m").delete() BASE.delete() def main(argv): parse_args() if ARGS.username is not None: pw = getpass('Enter password for %s> ' % ARGS.username) add_http_credentials(ARGS.username, pw) del pw forks = ARGS.forks while forks > 1: forks -= 1 rf = fork() if rf == 0: break # children must not spawn children of their own elif rf < 0: print("Fork failed...", file=stderr) break set_http_option("disable_ssl_certificate_validation", True) setUp() try: task() except KeyboardInterrupt: print("Interrupted") finally: tearDown() if __name__ == "__main__": from sys import argv main(argv)
ktbs/ktbs
examples/stress/stress-ktbs.py
Python
lgpl-3.0
4,822
"""parses configuration and returns useful things""" #pylint: disable=redefined-variable-type #pylint: disable=relative-import from etl_framework.utilities.SqlClause import SqlClause from etl_framework.method_wrappers.check_config_attr import check_config_attr_default_none from .InsertStatementMixin import MySqlInsertStatementMixin, PostgreSqlInsertStatementMixin class MySqlUpsertStatementMixin(MySqlInsertStatementMixin): """requires LoaderMixin""" COALESCE_MAP = { None: 'VALUES(`{0}`)', 'new_values': 'COALESE(VALUES(`{0}`), `{0}`)', 'old_values': 'COALESCE(`{0}`, VALUES(`{0}`))' } @classmethod def _create_on_duplicate_key_update_statement(cls, fields, coalesce=None): """returns update statement for upsert""" coalesce_string = cls.COALESCE_MAP[coalesce] sql_obj = SqlClause(header='ON DUPLICATE KEY UPDATE', phrases=['`{}` = '.format(field) + coalesce_string.format(field) for field in fields]) return sql_obj def create_upsert_statement(self, table, fields, statement_string=False): """returns update statement""" insert_fields, insert_statement = self.create_insert_statement(table, fields) update_statement = self._create_on_duplicate_key_update_statement(fields) sql = SqlClause(phrases=[insert_statement, update_statement], phrase_indents=0, phrase_separator='\n') if statement_string: sql = sql.get_sql_clause() return insert_fields, sql def get_upsert_statement(self): """returns statement to upsert data""" return self.create_upsert_statement(table=self.get_loader_table(), fields=self.get_loader_fields(), statement_string=True) class PostgreSqlUpsertStatementMixin(PostgreSqlInsertStatementMixin): """requires LoaderMixin, BaseConfig""" COALESCE_MAP = { None: 'EXCLUDED.{0}', 'new_values': 'COALESE(EXCLUDED.{0}, {table}.{0})', 'old_values': 'COALESCE({table}.{0}, EXCLUDED.{0})' } UPSERT_CONSTRAINT_FIELDS = 'upsert_constraint_fields' @classmethod def _create_on_duplicate_key_update_statement(cls, fields, constraint_fields, coalesce=None, table=None): """returns update statement for upsert""" coalesce_string = cls.COALESCE_MAP[coalesce] header = ','.join(constraint_fields) header = 'ON CONFLICT (' + header + ') DO UPDATE SET ' sql_obj = SqlClause(header=header, phrases=['{} = '.format(field) + coalesce_string.format(field, table=table) for field in fields]) return sql_obj def create_upsert_statement(self, table, fields, constraint_fields, statement_string=False, coalesce=None): """returns update statement""" insert_fields, insert_statement = self.create_insert_statement(table, fields) update_statement = self._create_on_duplicate_key_update_statement( fields, constraint_fields, table=table, coalesce=coalesce, ) sql = SqlClause(phrases=[insert_statement, update_statement], phrase_indents=0, phrase_separator='\n') if statement_string: sql = sql.get_sql_clause() return insert_fields, sql def get_upsert_statement(self, coalesce=None): """returns statement to upsert data""" return self.create_upsert_statement(self.get_loader_table(), self.get_loader_fields(), self.get_upsert_constraint_fields(), statement_string=True, coalesce=coalesce) @check_config_attr_default_none def get_upsert_constraint_fields(self): """ :summary: getter for upsert_constraint_fields :returns: upsert constraint fields """ return self.config[self.UPSERT_CONSTRAINT_FIELDS]
pantheon-systems/etl-framework
etl_framework/config_mixins/UpsertStatementMixin.py
Python
mit
4,259
#!/app/local/bin/virtual-python #$#HEADER-START # vim:set expandtab ts=4 sw=4 ai ft=python: # # Reflex Configuration Event Engine # # Copyright (C) 2016 Brandon Gillespie # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #$#HEADER-END import rfx.client import dictlib from .setup import create, do def setup(): rcs = rfx.client.Session() rcs.cfg_load() data = dictlib.Obj() ######################################## # master config for reflex tools create(rcs.create, "config", { "name":"reflex", "type":"parameter", "config": { "regions": { "local": { "lanes": [ "tst", "qa", "prd", "sbx" ] } }, "lanes": { "tst": { "short": "t", "full": "Test" }, "qa": { "short": "q", "full": "QA" }, "prd": { "short": "p", "full": "Production" }, "sbx": { "short": "x", "full": "Sandbox" } } } }) ######################################## create(rcs.create, "apikey", { "name":"kirk" }) create(rcs.create, "apikey", { "name":"spock" }) create(rcs.create, "apikey", { "name":"redshirt" }) create(rcs.create, "apikey", { "name":"svc-bct", }) create(rcs.create, "apikey", { "name":"svc-eng", }) data['keys'] = dictlib.Obj({ 'kirk': do(rcs.get, "apikey", "kirk"), 'spock': do(rcs.get, "apikey", "spock"), 'redshirt': do(rcs.get, "apikey", "redshirt"), 'svc-bct': do(rcs.get, "apikey", "svc-bct"), 'svc-eng': do(rcs.get, "apikey", "svc-eng"), }) ######################################## create(rcs.create, "group", { "name":"bridge", "group":["kirk", "spock"], "type":"Apikey" }) create(rcs.create, "group", { "name":"support", "group":["redshirt"], "type":"Apikey" }) create(rcs.create, "policy", { "name": "bridge-all-access", "policy": "token_name in groups.bridge" }) create(rcs.create, "policy", { "name": "support-not-sensitive", "policy": "token_name in groups.support and sensitive == False" }) create(rcs.create, "policy", { "name": "support-sensitive", "policy": "token_name in groups.support and sensitive == True" }) data['policies'] = dictlib.Obj({ 'bridge-all-access': do(rcs.get, "policy", "bridge-all-access"), 'support-not-sensitive': do(rcs.get, "policy", "support-not-sensitive"), 'support-sensitive': do(rcs.get, "policy", "support-sensitive"), }) ######################################## create(rcs.create, "policyscope", { "name": "bridge-all", "policy": "support-not-sensitive", "actions": 'read', "type": 'global', "matches": 'True' }) create(rcs.create, "policyscope", { "name": "support-read-configs-sensitive", "policy": "support-sensitive", "actions": 'read', "type": 'global', "matches": 'obj_type == "Config" and re.match(r"^bct-", obj["name"])' }) create(rcs.create, "policyscope", { "name": "support-read-configs", "policy": "support-not-sensitive", "actions": 'read', "type": 'global', "matches": 'obj_type == "Config" and not re.match(r"^bct-", obj["name"])' }) ######################################## create(rcs.create, "pipeline", { "name": "bridge-navigation", "title": "Navigation Console", }) create(rcs.create, "config", { "name": "bridge-navigation", "type": "parameter", "setenv": { "PORT": 8080 } }) create(rcs.create, "config", { "name": "bridge-navigation-train", "type": "parameter", "extends": ["bridge-navigation"], "sensitive": { "parameters": { "mongodb-pass": "H77OsiFfuH", "mongodb-user": "nav_user", "mongodb-host": "mongo-x42020", "mongodb-name": "navigation_train" } } }) create(rcs.create, "config", { "name": "bridge-navigation-battle", "type": "parameter", "extends": ["bridge-navigation"], "sensitive": { "parameters": { "mongodb-pass": "qhsm1LjO/52K", "mongodb-user": "nav_user", "mongodb-host": "mongo-x42020", "mongodb-name": "navigation_battle" } } }) create(rcs.create, "config", { "name": "bridge-navigation-main", "type": "parameter", "extends": ["bridge-navigation"], "sensitive": { "parameters": { "mongodb-pass": "SKAtGgGl4x+IF", "mongodb-user": "nav_user", "mongodb-host": "mongo-x42020", "mongodb-name": "navigation_main" } } }) create(rcs.create, "service", { "name": "bridge-navigation-train", "title": "Navigation Bridge - Training", "config": "bridge-navigation-train", "pipeline": "bridge-navigation", }) create(rcs.create, "service", { "name": "bridge-navigation-battle", "config": "bridge-navigation-battle", "title": "Navigation Bridge - Battle", "pipeline": "bridge-navigation", }) create(rcs.create, "service", { "name": "bridge-navigation-main", "config": "bridge-navigation-main", "title": "Navigation Bridge - Main", "pipeline": "bridge-navigation", }) ######################################## create(rcs.create, "pipeline", { "name": "bct", "title": "Bat'leth Combat Training" }) create(rcs.create, "config", { "name": "bct", "type": "parameter", "extends": ["common"], "exports": ["bct-config1", "bct-config2"], "sensitive": { "parameters": { "MONGO-URI":"mongodb://%{MONGO-HOSTS}/%{MONGO-DBID}" }, "config": { "db": { "server": "%{MONGO-HOSTS}", "db": "%{MONGO-DBID}", "user": "%{MONGO-USER}", "pass": "%{MONGO-PASS}", "replset": { "rs_name": "myReplicaSetName" } } } }, "setenv": { "MONGO-URI": "%{MONGO-URI}" } }) create(rcs.create, "config", { "name": "bct-tst", "type": "parameter", "extends": ["bct"], "exports": ["bct-keystore"], "procvars": ["sensitive.config.db"], "sensitive": { "parameters": { "MONGO-USER":"test_user", "MONGO-PASS":"not a good password", "MONGO-HOSTS":"test-db", "MONGO-DBID":"test_db" } } }) create(rcs.create, "config", { "name": "bct-qa", "type": "parameter", "extends": ["bct"], "exports": ["bct-keystore"], "procvars": ["sensitive.config.db"], "sensitive": { "parameters": { "MONGO-USER":"qa_user", "MONGO-PASS":"a better passwd", "MONGO-HOSTS":"qa-db", "MONGO-DBID":"qa_db" } } }) create(rcs.create, "config", { "name": "bct-prd", "type": "parameter", "exports": ["bct-keystore"], "procvars": ["sensitive.config.db"], "sensitive": { "parameters": { "MONGO-USER":"test_user", "MONGO-PASS":"not a good password", "MONGO-HOSTS":"test-db", "MONGO-DBID":"test_db" } } }) create(rcs.create, "config", { "name": "common", "type": "parameter", "sensitive": { "parameters": { "SHARED-SECRET":"moar" } } }) create(rcs.create, "config", { "name": "bct-config1", "type": "file", "content": { "source": "local.xml.in", "dest": "local.xml", "varsub": True } }) create(rcs.create, "config", { "name": "bct-config2", "type": "file", "content": { "dest":"local-production.json", "ref":"sensitive.config", "type":"application/json" } }) create(rcs.create, "config", { "name": "bct-keystore", "type": "file", "content": { "dest":"local.keystore", "ref":"sensitive.data", "encoding": "base64" }, "sensitive": { "data": "bm90IHJlYWxseSBhIGtleXN0b3JlIG9iamVjdAo=" } }) create(rcs.create, "service", { "name": "bct-tst", "config": "bct-tst", "title":"Bat'leth Combat Training - TST", "pipeline": "bct", }) create(rcs.create, "service", { "name": "bct-qa", "config": "bct-qa", "title":"Bat'leth Combat Training - QA", "pipeline": "bct", }) create(rcs.create, "service", { "name": "bct-prd", "config": "bct-prd", "title":"Bat'leth Combat Training - PRD", "pipeline": "bct", })
reflexsc/reflex
src/rfxcmd/setup_demo.py
Python
agpl-3.0
10,499
import _plotly_utils.basevalidators class UidValidator(_plotly_utils.basevalidators.StringValidator): def __init__(self, plotly_name="uid", parent_name="histogram2d", **kwargs): super(UidValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), **kwargs )
plotly/plotly.py
packages/python/plotly/plotly/validators/histogram2d/_uid.py
Python
mit
388
#!/usr/bin/env python # encoding: utf-8 import sys import os import re import time import json import functools import requests from bs4 import BeautifulSoup import html2text reload(sys) sys.setdefaultencoding('utf8') # global var _cookies_name = 'cookies.json' _session = None _headers = {'Host': 'www.zhihu.com', 'Referer': 'http://www.zhihu.com/', 'User-Agent': 'Mozilla/5.0', 'X-Requested-With': 'XMLHttpRequest'} # Zhihu login URL _zhihu_url = 'http://www.zhihu.com' _zhihu_login_url = _zhihu_url + '/login' _captcha_url_prefix = _zhihu_url + '/captcha.gif?r=' # zhihu column URL _column_prefix = 'http://zhuanlan.zhihu.com/' _column_GET_user = _column_prefix + 'api/columns/{0}' _column_GET_posts = _column_GET_user + '/posts/{1}' _column_GET_posts_limit = _column_GET_posts[:-4] + '?limit=10&offset={1}' # regex _re_question_url = re.compile(r'http://www\.zhihu\.com/question/\d+/?$') _re_author_url = re.compile(r'http://www\.zhihu\.com/people/[^/]+/?$') _re_column_url = re.compile(r'http://zhuanlan\.zhihu\.com/([^/]+)/?$') _re_column_article_url = re.compile( r'http://zhuanlan\.zhihu\.com/([^/]+)/(\d+)/?$') _re_collection_url = re.compile(r'http://www\.zhihu\.com/collection/\d+/?$') def _init(): global _session if _session is None: _session = requests.Session() _session.headers.update(_headers) if os.path.isfile(_cookies_name): with open(_cookies_name, 'r') as f: cookies_dict = json.load(f) _session.cookies.update(cookies_dict) else: print 'Please run "zhihu.create_cookies()" for further operation.' else: raise Exception('Please don\'t call function _init() manually.') def create_cookies(): if not os.path.isfile(_cookies_name): email = raw_input('email:') password = raw_input('password:') captcha_url = get_captcha_url() save_captcha(captcha_url) print 'Please check "captcha.gif" for captcha' captcha = raw_input('captcha:') os.remove('captcha.gif') r, msg = login(email, password, captcha) if r == 0: print 'cookies file created!' elif r == 1: print 'Failed to login. Error message is:' + msg print 'Please check the error message and try again.' else: print '%s has been created! Please delete it first.' % _cookies_name def get_captcha_url(): return _captcha_url_prefix + str(int(time.time() * 1000)) def save_captcha(url): global _session r = _session.get(url) with open('captcha.gif', 'w') as f: f.write(r.content) def login(email=None, password=None, captcha=None): global _session data = {'email': email, 'password': password, 'captcha': captcha, 'rememberme': 'y'} r = _session.post(_zhihu_login_url, data) j = r.json() status = int(j['r']) msg = j['msg'] if status == 0: with open(_cookies_name, 'wb') as f: data = json.dumps(_session.cookies.get_dict()) f.write(data) cookies_dict = json.loads(data) _session.cookies.update(cookies_dict) return status, msg def set_class_variable(attr): def decorator(func): @functools.wraps(func) def wrapper(self): value = getattr(self, attr) if hasattr(self, attr) else None if value is None: self.make_soup() value = func(self) setattr(self, attr, value) return value else: return value return wrapper return decorator def valid_name(text): invalid_char = ['\\', '/', ':', '*', '?', '<', '>', '|', '"'] valid = '' for char in text: if char not in invalid_char: valid += char return valid def create_file(path, filename, mode, defaultpath, defaultname): if path is None or '.': path = os.path.join(os.getcwd(), valid_name(defaultpath)) if os.path.isdir(path) is False: os.makedirs(path) if filename is None: filename = valid_name(defaultname) temp = filename i = 0 while os.path.isfile(os.path.join(path, filename) + '.' + mode): i += 1 temp = filename + str(i) return os.path.join(path, temp) + '.' + mode class Question: def __init__(self, url): if not _re_question_url.match(str(url)): raise Exception('Hmmm.. Unvalid URL! Please change.') else: self.url = str(url) r = _session.get(self.url) self.soup = BeautifulSoup(r.content) # 获取问题标签 @property def get_tags(self): items = self.soup.find_all('a', class_='zm-item-tag') tags = [unicode(item.string.strip()) for item in items] return tags # 获取问题标题 @property def get_question(self): raw_question = self.soup.find( 'h2', class_='zm-item-title zm-editable-content') return raw_question.string.strip() # 获取问题描述 @property def ques_description(self): data = self.soup.find('div', class_='zm-editable-content') return html2text.html2text(str(data)) # 获取问题关注者人数 @property def ques_followers(self): num = self.soup.find('div', class_='zg-gray-normal').a.strong.string return '关注者人数为' + num # 获取问题回答数 @property def answer_num(self): raw_html = self.soup.find('h3', id='zh-question-answer-num') num = raw_html.get('data-num') self.num = num return '回答人数为:' + num # 获取排在最前面的回答 @property def top_answer(self): self.answer_num top = self.soup.find('div', class_=' zm-editable-content clearfix') answer = html2text.html2text(str(top)) return answer # 获取排名前几位的回答 def top_i_answers(self, num): self.answer_num if not isinstance(num, int) or abs(num) != num: print 'Ohh! Please enter positive integer:' elif num > int(self.num): print 'Sorry, The number of answers for' \ 'this question is %s. Please enter again.' % self.num elif num == 1: return self.top_answer elif num > 1: find = self.soup.find_all(class_=' zm-editable-content clearfix', limit=num) for index, answer in enumerate(find): print '第%d个答案:\n' % (index+1) print html2text.html2text(str(answer)) # 获取所有回答 @property def all_answers(self): self.answer_num find = self.soup.find_all(class_=' zm-editable-content clearfix', limit=self.num) for index, answer in enumerate(find): print '第%d个答案:\n' % (index+1) print html2text.html2text(str(answer)) class Author: def __init__(self, url): if not _re_author_url.match(str(url)): raise Exception('Hmmm.. Unvalid URL! Please change.') else: if not url.endswith('/'): url += '/' self.url = str(url) r = _session.get(self.url) self.soup = BeautifulSoup(r.content) #with open('author.html', 'wb') as f: #f.write(r.content) # 获取用户名字 @property def get_people_name(self): name = self.soup.find('div', class_='zm-profile-header').span.string #self_intro = self.soup.find('span', class_='bio').get('title') #return name + ':' + self_intro return name # 获取用户所在地点 @property def get_people_location(self): locate = self.soup.find('span', class_='location item').get('title') return locate # 获取用户的职业介绍 @property def get_people_career(self): profession = self.soup.find('span', class_='business item') \ .get('title') employment = self.soup.find('span', class_='employment item') \ .get('title') position = self.soup.find('span', class_='position item').get('title') return '行业: ' + profession \ + '\n' + '公司: ' + employment + '\n' + '职位: ' + position # 获取用户的教育情况 @property def get_people_educate(self): educate = self.soup.find('span', class_='education item').get('title') find = self.soup.find('span', class_='education-extra item') educate_extra = find.get('title') return educate + '|' + educate_extra # 获取用户的自我介绍 @property def get_self_description(self): content = self.soup.find('div', attrs={'data-name': 'description'}) return content.span.span.span.string.strip().replace('\n', '') # 获取得到的赞同数 @property def get_agree_num(self): agree = self.soup.find('span', class_='zm-profile-header-user-agree') \ .strong.string return '赞同: ' + agree # 获取得到的感谢数 @property def get_thanks_num(self): thanks = self.soup.find( 'span', class_='zm-profile-header-user-thanks').strong.string return '感谢: ' + thanks # 获取people擅长的话题 @property def get_topics(self): result = self.soup.find_all('h3', class_='zg-gray-darker') t = [unicode(item.string).encode('utf-8') for item in result] vote = [] topic = [] for item in result: vote.append(item.find_next('p', class_='meta').span.contents[1]) combine = zip(t, vote) for index, value in enumerate(combine): topic.append('擅长话题 [' + value[0] + '] 的点赞数为' + value[1]) return topic #value = p.get_topics #for val in value: #print val # 获取回答问题的数量 @property def answers_num(self): start = self.url.find('m') num = self.soup.find('a', class_='item', href=self.url[start+1:]+'answers').span.string return int(num) # 获得该作者的所有答案 @property def all_answers(self): url = self.url + 'answers' payload = {'order_by': 'created'} page = 0 num = self.answers_num while num > 0: num -= 20 page += 1 payload['page'] = str(page) r = _session.get(url, params=payload) soup = BeautifulSoup(r.content) raw_html = soup.find_all('a', class_='question_link') for item in raw_html: text = item.parent.parent.div.textarea.next_element title = item.string file_name = create_file('.', None, 'md', self.get_people_name, title+'-'+self.get_people_name+'的回答') with open(file_name, 'w+') as f: f.write(html2text.html2text(str(text).strip())) # 获取得到排名最高的前几个答案 def top_vote_answers(self, num): url = self.url + 'answers' if not isinstance(num, int) or abs(num) != num: print 'Ohh! Please enter positive integer:' if self.answers_num < num: print 'Sorry. the num of questions the author answered' \ 'is smaller than your expected' else: temp = num payload = {'order_by': 'vote_num'} for i in range((num-1)//20 + 1): if temp > 20: item = 20 temp -= 20 else: item = temp payload['page'] = str(i+1) r = _session.get(url, params=payload) soup = BeautifulSoup(r.content) raw_html = soup.find_all('a', class_='question_link', limit=item) for item in raw_html: text = item.parent.parent.div.textarea.next_element title = item.string file_name = create_file( '.', None, 'md', self.get_people_name+'排名前'+str(num)+'的回答', title+'-'+self.get_people_name+'的回答') with open(file_name, 'w+') as f: f.write(html2text.html2text(str(text).strip())) # 获取最新回答的前几个答案 def newly_creates_answers(self, num): url = self.url + 'answers' if not isinstance(num, int) or abs(num) != num: print 'Ohh! Please enter positive integer:' if self.answers_num < num: print 'Sorry. the num of questions the author answered' \ 'is smaller than your expected' else: temp = num payload = {'order_by': 'created'} for i in range((num-1)//20 + 1): if temp > 20: item = 20 temp -= 20 else: item = temp payload['page'] = str(i+1) r = _session.get(url, params=payload) soup = BeautifulSoup(r.content) raw_html = soup.find_all('a', class_='question_link', limit=item) for item in raw_html: text = item.parent.parent.div.textarea.next_element title = item.string file_name = create_file( '.', None, 'md', self.get_people_name+'最新创建的'+str(num)+'个回答', title+'-'+self.get_people_name+'的回答' ) with open(file_name, 'w+') as f: f.write(html2text.html2text(str(text).strip())) # 备份某专栏所有文章 # 记住是不要翻页的 class Column: def __init__(self, url, title=None, article_num=None, column_owner=None, followers=None, description=None): match = _re_column_url.match(str(url)) if not match: raise Exception('Hmmm. Unvalid URL! Please change.') else: self._in_name = match.group(1) self._url = str(url) self._title = title self._column_owner = column_owner self._followers = followers self._description = description self._article_num = article_num self.soup = None def make_soup(self): global _session if self.soup is None: assert isinstance(_session, requests.Session) origin_host = _session.headers.get('Host') _session.headers.update(Host='zhuanlan.zhihu.com') response = _session.get(_column_GET_user.format(self._in_name)) _session.headers.update(Host=origin_host) self.soup = response.json() # 专栏名称 @property @set_class_variable('_title') def title(self): return self.soup['name'] # 获取专栏创建者 @property @set_class_variable('_column_owner') def owner(self): return self.soup['creator']['name'] # 获取专栏关注人数 @property @set_class_variable('_followers') def followers(self): return int(self.soup['followersCount']) # 获取专栏描述 @property @set_class_variable('_description') def description(self): return self.soup['description'] # 获取专栏文章数 @property @set_class_variable('_article_num') def num(self): return int(self.soup['postsCount']) # 获取专栏所有文章 @property def posts(self): global _session origin_host = _session.headers.get('Host') for offset in range(0, (self.num-1)//10 + 1): _session.headers.update(Host='zhuanlan.zhihu.com') response = _session.get( _column_GET_posts_limit.format(self._in_name, offset*10)) soup = response.json() _session.headers.update(Host=origin_host) for article in soup: url = _column_prefix + article['url'][1:] author = Author(article['author']['profileUrl']) title = article['title'] agree_num = article['likesCount'] comment_num = article['commentsCount'] yield ColumnArticles(url, self, author, title, agree_num, comment_num) class ColumnArticles: def __init__(self, url, column=None, author=None, title=None, argee_num=None, comment_num=None): match = _re_column_article_url.match(str(url)) if not match: raise Exception('Hmmm. Unvalid URL! Please change.') else: self._url = str(url) self._column = column self._author = author self._title = title self._argee_num = argee_num self._comment_num = comment_num self.soup = None self._slug = match.group(1) self._article_code = match.group(2) def make_soup(self): if self.soup is None: global _session assert isinstance(_session, requests.Session) origin_host = _session.headers.get('Host') _session.headers.update(Host='zhuanlan.zhihu.com') self.soup = _session.get( _column_GET_posts.format(self._slug, self._article_code)) \ .json() _session.headers.update(Host=origin_host) # 文章所在专栏 @property @set_class_variable('_column') def column(self): column_name = self.soup['column']['name'] column_url = _column_prefix + self.soup['column']['slug'] return Column(column_url, title=column_name) # 文章作者 @property @set_class_variable('_author') def author(self): return self.soup['author']['name'] # 文章的标题 @property @set_class_variable('_title') def title(self): return self.soup['title'] # 文章的赞同数 @property @set_class_variable('_argee_num') def agree_num(self): return self.soup['likesCount'] # 文章的评论数 @property @set_class_variable('_comment_num') def comment_num(self): return self.soup['commentsCount'] # 保存文章内容 def save(self, filepath=None, filename=None): self.make_soup() if isinstance(self.author, Author): author = self.author.get_people_name else: author = self.author file_name = create_file(filepath, filename, 'md', self.soup['column']['name'], self.title + '-' + author) with open(file_name, 'wb') as f: f.write(html2text.html2text(self.soup['content'])) _init()
shirleyChou/zhihu_crawler
zhihu.py
Python
mit
19,263
''' Author: Jinhui Zhang Course: Elements of AI Username: jinhzhan Email: jinhzhan@indiana.edu ''' import time class State: ''' A class used to denote states of the game ''' def __init__(self, tile_order): ''' Initialization with tile_order which denotes the 2-dimension game board by linear list. For example, if we have a state: 2 5 3 1 6 4 7 8 then we write it as state = State([2, 5, 3, 'blank', 1, 6, 4, 7, 8]) Class State has two variables, tile_order ---- the linear shape of a state parent ---- a state that can generate the current state by moving blank tile one step ''' self.tile_order = tile_order self.parent = "root" def __eq__(self, state): ''' Overide the __eq__ method for comparison between current state and the goal state ''' for i in range(9): if self.tile_order[i] != state.tile_order[i]: return False return True def __str__(self): return str(self.tile_order) def set_parent(self, state): ''' Set the parent state of the state ''' self.parent = state def get_parent(self): ''' Get the parent state of the state ''' return self.parent def get_tile_order(self): ''' Get the tile order of the state to initialize a new state which has the same state of the current state. Used in mathod action(). ''' return list(self.tile_order) def heuristic_one(self, path_cost): ''' h(N) = the number of misplaced tiles ''' misplace_count = 0 for i in range(9): if self.tile_order[i] != goal_state.tile_order[i]: misplace_count += 1 cost = path_cost + misplace_count return cost def heuristic_two(self, path_cost): ''' h(N) = the sum of substractions of the tiles in current state and the tiles in goal state for example, the goal state is 1 2 3 4 5 6 7 8 and the current state is 2 3 1 5 6 4 7 8 The result should be abs(1 - 2) + abs(2 - 0) + abs(3 - 3) + abs(4 - 1) + abs(5 - 5) + abs(6 - 6) + abs(7 - 4) + abs(8 - 7) + abs(0 - 8) = 18. ''' substraction = 0 for i in range(9): if self.tile_order[i] == "blank" and goal_state.tile_order[i] == "blank": substraction += 0 elif goal_state.tile_order[i] == "blank" and isinstance(self.tile_order[i], int): substraction += self.tile_order[i] elif self.tile_order[i] == "blank" and isinstance(goal_state.tile_order[i], int): substraction += goal_state.tile_order[i] else: substraction += abs(self.tile_order[i] - goal_state.tile_order[i]) cost = path_cost + substraction return cost def action(self): ''' Get the possible states that can be reached from the current state ''' position = self.tile_order.index("blank") # find where the blank tile is # calculate the x, y coordinate through the position in list pos_x = position / 3 pos_y = position - 3 * pos_x child_states = [] # there are four directions, so do loop four times. And before exchanging the blank tile and its neighbor, # check if the blank tile is cross the border. If not, do the move. for i in range(4): lst = self.get_tile_order() # the tile list used to generate new state with the same tile order if pos_x - 1 >= 0 and i == 0: # blank tile goes left state = State(lst) state.tile_order[3 * (pos_x - 1) + pos_y], state.tile_order[3 * pos_x + pos_y] = state.tile_order[3 * pos_x + pos_y], state.tile_order[3 * (pos_x - 1) + pos_y] # swap the blank tile and its left neighbor state.set_parent(self) # set the current state as the new state's parent child_states.append(state)# add the new state to a list that is going to be returned if pos_x + 1 <= 2 and i == 1: # blank tile goes right state = State(lst) state.tile_order[3 * (pos_x + 1) + pos_y], state.tile_order[3 * pos_x + pos_y] = state.tile_order[3 * pos_x + pos_y], state.tile_order[3 * (pos_x + 1) + pos_y] state.set_parent(self) child_states.append(state) if pos_y - 1 >= 0 and i == 2: # blank tile goes up state = State(lst) state.tile_order[3 * pos_x + (pos_y - 1)], state.tile_order[3 * pos_x + pos_y] = state.tile_order[3 * pos_x + pos_y], state.tile_order[3 * pos_x + (pos_y - 1)] state.set_parent(self) child_states.append(state) if pos_y + 1 <= 2 and i == 3: # blank tile goes down state = State(lst) state.tile_order[3 * pos_x + (pos_y + 1)], state.tile_order[3 * pos_x + pos_y] = state.tile_order[3 * pos_x + pos_y], state.tile_order[3 * pos_x + (pos_y + 1)] state.set_parent(self) child_states.append(state) return child_states def is_in_set(self, stateset): ''' Check if the state is in some stateset like "frontier" or "explored" ''' for item in stateset: if item == self: return True return False def Astar(state, limit): ''' Astar algorithm. If you want to change the heuristic function, just change the "heuristic_one" and "heuristic_two" in line 157 and line 178 ''' path_cost = 0 if goal_test(state): # if the initial state is the goal state, return the solution return solution(state) # or initial the frontier and explored variables, start searching frontier = [state] cost_list = [state.heuristic_two(path_cost)] # the corresponding costs of the states in frontier explored = [] while True: if len(frontier) == 0: # when the frontier is empty and no solution is found, this is an unsolvable problem return "No solution for ", str(state) mincost_pos = cost_list.index(min(cost_list)) # get the position of the state with lowest cost now_state = frontier.pop(mincost_pos) cost_list.pop(mincost_pos) explored.append(now_state) child_state = now_state.action() # expand the lowest cost state path_cost += 1 limit -= 1 for s in child_state: if not s.is_in_set(explored) and not s.is_in_set(frontier): if goal_test(s): return solution(s) elif limit == 0: return "Limit reached" else: frontier.append(s) cost_list.append(s.heuristic_two(path_cost)) def solution(state): ''' According to the parent state stored in the solution state, get the path iteratively ''' result = [state] parent = state.get_parent() while parent != "root": # add the parent state to the result list until getting to root result.append(parent) parent = parent.get_parent() result.reverse() return result def goal_test(state): ''' Checking if state is exactly the goal state. If so, return true. ''' if state == goal_state: return True else: return False def makeState(nw, n, ne, w, c, e, sw, s, se): ''' nw | NorthWest element of the puzzle n | North element of the puzzle ne | NorthEast element of the puzzle w | West element of the puzzle c | Center element of the puzzle e | East element of the puzzle sw | SouthWest element of the puzzle s | South element of the puzzle se | SouthEast element of the puzzle ''' l = list([nw, n, ne, w, c, e, sw, s, se]) state = State(l) return state def testInformedSearch(init, goal, limit): global goal_state goal_state = goal # configure the goal state result = Astar(init, limit) # get the solution or failure message if isinstance(result, str): # if it's a message, just print it and return print result return # otherwise, print the solution path for r in result: for i in range(3): for j in range(3): if r.tile_order[3 * i + j] == "blank": print " ", else: print r.tile_order[3 * i + j], print print "-----------------\n" print "Depth is", len(result) # return len(result) goal_state = makeState(1, 2, 3, 4, 5, 6, 7, 8, "blank") # global variable "goal_state" goalState = makeState(1, 2, 3, 4, 5, 6, 7, 8, "blank") # First group of test cases - should have solutions with depth <= 5 initialState1 = makeState(2, "blank", 3, 1, 5, 6, 4, 7, 8) initialState2 = makeState(1, 2, 3, "blank", 4, 6, 7, 5, 8) initialState3 = makeState(1, 2, 3, 4, 5, 6, 7, "blank", 8) initialState4 = makeState(1, "blank", 3, 5, 2, 6, 4, 7, 8) initialState5 = makeState(1, 2, 3, 4, 8, 5, 7, "blank", 6) # Second group of test cases - should have solutions with depth <= 10 initialState6 = makeState(2, 8, 3, 1, "blank", 5, 4, 7, 6) initialState7 = makeState(1, 2, 3, 4, 5, 6, "blank", 7, 8) initialState8 = makeState("blank", 2, 3, 1, 5, 6, 4, 7, 8) initialState9 = makeState(1, 3, "blank", 4, 2, 6, 7, 5, 8) initialState10 = makeState(1, 3, "blank", 4, 2, 5, 7, 8, 6) # Third group of test cases - should have solutions with depth <= 20 initialState11 = makeState("blank", 5, 3, 2, 1, 6, 4, 7, 8) initialState12 = makeState(5, 1, 3, 2, "blank", 6, 4, 7, 8) initialState13 = makeState(2, 3, 8, 1, 6, 5, 4, 7, "blank") initialState14 = makeState(1, 2, 3, 5, "blank", 6, 4, 7, 8) initialState15 = makeState("blank", 3, 6, 2, 1, 5, 4, 7, 8) # Fourth group of test cases - should have solutions with depth <= 50 initialState16 = makeState(2, 6, 5, 4, "blank", 3, 7, 1, 8) initialState17 = makeState(3, 6, "blank", 5, 7, 8, 2, 1, 4) initialState18 = makeState(1, 5, "blank", 2, 3, 8, 4, 6, 7) initialState19 = makeState(2, 5, 3, 4, "blank", 8, 6, 1, 7) initialState20 = makeState(3, 8, 5, 1, 6, 7, 4, 2, "blank") initialState = [initialState1, initialState2, initialState3, initialState4, initialState5, initialState6, initialState7, initialState8, initialState9, initialState10, initialState11, initialState12, initialState13, initialState14, initialState15, initialState16, initialState18, initialState19, initialState20] testInformedSearch(initialState1, goalState, 2000) ''' t = [] depth = [] for item in initialState: start = time.time() depth.append(testInformedSearch(item, goalState, 200000)) end = time.time() # print "Time elapsed:", end - start t.append(end - start) print len(t) print "time:", t print "depth:", depth '''
OldPanda/B551
hw3/jinhzhan-informed.py
Python
gpl-2.0
9,833
# Copyright (c) 2011-2013 Jason Dobies # # This file is part of Okaara. # # Okaara is free software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, either version 3 # of the License, or (at your option) any later version. # # Okaara is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with Okaara. # If not, see <http://www.gnu.org/licenses/>. from builtins import object import gettext import logging import os import sys from okaara.prompt import Prompt t = gettext.translation('okaara', fallback=True) if sys.version_info[0] < 3: _ = t.ugettext else: _ = t.gettext LOG = logging.getLogger(__name__) class Exit(Exception): """ May be raised by any menu item function to stop the shell loop. """ pass class Shell(object): """ Represents a single shell interface. A shell constists of one or more screens that drive the different sections of the shell. At any given time, only one screen is active. Only the active screen's menu will be used when interacting with the user's input. Based on the user's decisions, the state of the shell may be transitioned between different screens. This class contains methods screens and actions may use for transitioning between screens and interacting with user input. """ def __init__(self, prompt=None, auto_render_menu=False, include_long_triggers=True): """ Creates an empty shell. At least one screen must be added to the shell before it is used. :param prompt: specifies a prompt object to use for reading/writing to the console; if not specified will default to L{Prompt} :type prompt: L{Prompt} :param auto_render_menu: if True, the menu will automatically be rendered after the execution of each menu item; defaults to False :type auto_render_menu: bool :param include_long_triggers: if True, the long versions of default triggers will be added, if False only single-character triggers will be added; defaults to True :type include_long_triggers: bool """ self.home_screen = None self.current_screen = None self.previous_screen = None self.screens = {} self.auto_render_menu = auto_render_menu # Set the default prompt prefix to substitute in the current screen ID self.prompt_prefix = '($s) => ' # Create a default prompt if one was not explicitly specified self.prompt = prompt or Prompt() # Create the shell-level menu; these can be changed before the shell is run previous_triggers = ['<'] home_triggers = ['^'] quit_triggers = ['q'] help_triggers = ['?'] clear_triggers = ['/'] if include_long_triggers: home_triggers.append('home') quit_triggers.append('quit') quit_triggers.append('exit') help_triggers.append('help') clear_triggers.append('clear') self.shell_menu_items = {} self.ordered_menu_items = [] # kinda ghetto, need to replace with ordered dict for menu_items self.add_menu_item(MenuItem(home_triggers, _('move to the home screen'), self.home)) self.add_menu_item(MenuItem(previous_triggers, _('move to the previous screen'), self.previous)) self.add_menu_item(MenuItem(help_triggers, _('display help'), self.render_menu)) self.add_menu_item(MenuItem(clear_triggers, _('clears the screen'), self.clear_screen)) self.add_menu_item(MenuItem(quit_triggers, _('exit'), self.stop)) def add_screen(self, screen, is_home=False): """ Adds a new screen for the shell. If a screen was previously added with the same screen ID, the newly added screen will replace it. :param screen: describes a screen in the shell; may not be None :type screen: L{Screen} """ if screen is None: raise ValueError('screen may not be None') # Overwrite a previously added screen if one exists self.screens[screen.id] = screen # If there is no current screen set, use the newly added one if self.current_screen is None: self.current_screen = screen # If the screen is indicated as the home screen or one has not been set, # set the home screen if is_home or self.home_screen is None: self.home_screen = screen def add_menu_item(self, menu_item): """ Adds a new menu item that will be available anywhere in the shell. Each menu item added to this screen must have a unique trigger. If a menu item with the same trigger already exists, it will be removed from the menu and replaced by the newly added item. :param menu_item: new item to add to the shell; may not be None :type menu_item: L{MenuItem} """ if menu_item is None: raise ValueError('menu_item may not be None') # Overwrite the existing menu item with the same trigger if one exists for trigger in menu_item.triggers: self.shell_menu_items[trigger] = menu_item if menu_item not in self.ordered_menu_items: self.ordered_menu_items.append(menu_item) # -- user input handling ----------------------------------------------------------------------- def start(self, show_menu=True, clear=True): """ Starts the loop to listen for user input and handle according to the current screen. """ if clear: self.clear_screen() if show_menu: self.render_menu() running = True while running: # Read the next input from the user try: input = self.prompt.prompt(self._prompt_prefix()) except (EOFError, KeyboardInterrupt): self.prompt.write('\n') return # Make sure the user entered something if input is None or input.strip() == '': continue trigger = input.split()[0] command_args = input.split()[1:] # Search the shell for the menu item first item = None if trigger in self.shell_menu_items: item = self.shell_menu_items[trigger] # If a menu item wasn't found in the shell items, try the screen if item is None: item = self.current_screen.item(trigger) if item is None: self.prompt.write(_('Invalid menu item; type "?" for a list of available commands')) continue # Call the function for the menu item if one was found try: args = item.args + tuple(command_args) self.execute(item.func, *args, **item.kwargs) except Exit: break # If the menu is set to auto-render, render it before moving on if self.auto_render_menu: self.render_menu() def safe_start(self, show_menu=True, clear=True): """ Launches the shell in an exception block to catch all unexpected exceptions to prevent the entire thing from crashing. If an exception is caught, the start loop will be restarted. """ try: self.start(show_menu=show_menu, clear=clear) except SystemExit: sys.exit(0) except: LOG.exception('Unexpected error caught at the shell level') self.prompt.write('') self.prompt.write(_('An unexpected error has occurred during the last operation.')) self.prompt.write(_('More information can be found in the log.')) self.prompt.write('') self.safe_start(show_menu=False, clear=False) def stop(self): """ Causes the shell to stop listening for user commands. """ raise Exit() def execute(self, func, *args, **kwargs): """ Executes a function selected by the user from a menu item. This call may raise Exit in order to quit the shell. Subclasses should override this method to inject pre-run and post-run functionality. """ func(*args, **kwargs) # -- screen transition calls ------------------------------------------------------------------- def transition(self, to_screen_id, show_menu=False, clear=False): """ Transitions the state of the shell to the identified screen. If no screen exists with the given ID, the shell will be transitioned to the home screen. :param to_screen_id: identifies the screen to change the shell to; may not be None :type to_screen_id: string :param show_menu: if True, the menu for the newly transitioned to screen will be displayed :type show_menu: bool :param clear: if True, the screen will be cleared before the transition is made :type clear: bool """ if to_screen_id is None or to_screen_id not in self.screens: LOG.error('Attempt to transition to non-existent screen [%s]' % to_screen_id) to_screen_id = self.home_screen.id if clear: self.clear_screen() self.previous_screen = self.current_screen self.current_screen = self.screens[to_screen_id] if show_menu: self.render_menu(display_shell_menu=False) def previous(self): """ Transitions the state of the shell to the previous screen. If there is no previous screen, the shell will be transitioned to the home screen. """ if self.previous_screen is None: self.home() else: self.transition(self.previous_screen.id) def home(self): """ Transitions the state of the shell to the home screen. """ self.transition(self.home_screen.id) # -- display related calls --------------------------------------------------------------------- def clear_screen(self): """ Calls to the command line to clear the console. """ os.system('clear') def render_menu(self, display_shell_menu=True): """ Renders the menu for the current screen to the screen. """ # Screen menu items self.prompt.write('') for item in self.current_screen.items(): self._render_menu_item(', '.join(item.triggers), item.description) # Shell triggers if display_shell_menu: self.prompt.write('') # Shell menu items if len(self.shell_menu_items) > 0: self.prompt.write('') for item in self.ordered_menu_items: self._render_menu_item(', '.join(item.triggers), item.description) self.prompt.write('') def _render_menu_item(self, trigger, description): """ Writes a single menu item to the screen, wrapping appropriately for long triggers """ if len(trigger) < 4: self.prompt.write(' %-4s%s' % (trigger, description)) else: self.prompt.write(' %s' % trigger) self.prompt.write(' %s' % description) def _prompt_prefix(self): """ Returns the prompt prefix, substituting in any variables that have been defined. """ p = self.prompt_prefix.replace('$s', self.current_screen.id) return p class Screen(object): """ A screen is an individual "section" of a shell. The granularity of its use will vary based on the application but can most easily be related to different screens in a graphical UI. """ def __init__(self, id): """ :param id: uniquely identifies this screen instance in a shell; may not be None :type id: string """ if id is None: raise ValueError('id may not be None') self.id = id self.menu_items = {} self.ordered_menu_items = [] # kinda ghetto, need to replace with ordered dict for menu_items def __str__(self): return 'ID [%s]' % self.id def add_menu_item(self, menu_item): """ Adds a new menu item that will be available on this screen. Each menu item added to this screen must have a unique trigger. If a menu item with the same trigger already exists, it will be removed from the menu and replaced by the newly added item. :param menu_item: new item to add to this screen; may not be None :type menu_item: L{MenuItem} """ if menu_item is None: raise ValueError('menu_item may not be None') # Overwrite the existing menu item with the same trigger if one exists for trigger in menu_item.triggers: self.menu_items[trigger] = menu_item if menu_item not in self.ordered_menu_items: self.ordered_menu_items.append(menu_item) def item(self, trigger): """ Returns the menu item for the given trigger if one exists; None otherwise. :param trigger: identifies the menu item being searched for :type trigger: string :return: menu item for the given trigger if one is found; None otherwise :rtype: L{MenuItem} or None """ return self.menu_items.get(trigger) def items(self): """ Returns a list of menu items in this screen. :return: list of menu items; empty list if none have been added :rtype: list of L{MenuItem} """ return tuple(self.ordered_menu_items) def noop(): """ Stub method used as the default in menu items to facilitate prototyping of the menu without needing to have all the method implementations in place. """ pass class MenuItem(object): """ An individual menu item the user can interact with. The shell instance will take care of determining which menu item the user has selected and invoking its associated function. Any extra arguments input by the user when calling the menu item will be passed to the function on invocation. The shell reserves certain triggers for general use. Be sure that a menu item trigger does not overlap with one of the shell-level triggers defined in the shell instance. """ def __init__(self, triggers, description, func=noop, *args, **kwargs): """ :param triggers: character or string (or list of them) the user will input to cause the associated function to be invoked; may not be None :type triggers: str or list :param description: short (1-2 line) description of what the menu item does; displayed :type description: string :param func: function to invoke when this menu item is selected; extra arguments specified after the trigger will be passed to this function; may not be None :type func: function :param args: arguments that will be passed to the function when it is executed :param kwargs: key word arguments to be passed to the function when it is executed """ if triggers is None: raise ValueError('trigger may not be None') if func is None: raise ValueError('func may not be None') # Make sure it's in list form if not isinstance(triggers, list) and not isinstance(triggers, tuple): triggers = [triggers] self.triggers = triggers self.description = description self.func = func self.args = args self.kwargs = kwargs def __str__(self): return 'Triggers: [%s], Description [%s], Function: [%s]' % (', '.join(self.triggers), self.description, self.func.__name__) def __eq__(self, other): return self.triggers == other.triggers
jdob/okaara
okaara/shell.py
Python
gpl-2.0
16,631
# GromacsWrapper plugin: cysaccessibility.py # Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com> # Released under the GNU Public License 3 (or higher, your choice) # See the file COPYING for details. """ CysAccessibility plugin ======================= Cysteine accessibility is analyzed by histogramming the distance of shortest approach of water molecules to the sulfhydryl group of Cys. See class docs for more details. .. note:: This plugin is the canonical example for how to structure plugins that conform to the plugin API (see docs :mod:`gromacs.analysis.core` for details). Plugin class ------------ .. autoclass:: CysAccessibility :members: worker_class :undoc-members: Worker class ------------ The worker class performs the analysis. .. autoclass:: _CysAccessibility :members: """ from __future__ import with_statement __docformat__ = "restructuredtext en" import sys import os.path import warnings import subprocess import gromacs from gromacs.utilities import AttributeDict from gromacs.analysis.core import Worker, Plugin import dist # Worker classes that are registered via Plugins (see below) # ---------------------------------------------------------- # These must be defined before the plugins. class _CysAccessibility(Worker): """Analysis of Cysteine accessibility.""" def __init__(self,**kwargs): """Set up customized Cysteine accessibility analysis. :Arguments: cysteines : list list of *all* resids (eg from the sequence) that are used as labels or in the form 'Cys<resid>'. (**required**) cys_cutoff : number cutoff in nm for the minimum S-OW distance [1.0] Note that *all* Cys residues in the protein are analyzed. Therefore, the list of cysteine labels *must* contain as many entries as there are cysteines in the protein. There are no sanity checks. """ # specific arguments: take them before calling the super class that # does not know what to do with them cysteines = kwargs.pop('cysteines',None) # sequence resids as labels (NOT necessarily Gromacs itp) cys_cutoff = kwargs.pop('cys_cutoff', 1.0) # nm # super class init: do this before doing anything else # (also sets up self.parameters and self.results) super(_CysAccessibility,self).__init__(**kwargs) # process specific parameters now try: self.parameters.cysteines = map(int, cysteines) # sequence resids except (TypeError,ValueError): raise ValueError("Keyword argument cysteines MUST be set to sequence of resids.") self.parameters.cysteines.sort() # sorted because make_ndx returns sorted order self.parameters.cutoff = cys_cutoff if not self.simulation is None: self._register_hook() def _register_hook(self, **kwargs): """Run when registering; requires simulation.""" super(_CysAccessibility, self)._register_hook(**kwargs) assert not self.simulation is None # filename of the index file that we generate for the cysteines self.parameters.ndx = self.plugindir('cys.ndx') # output filenames for g_dist, indexed by Cys resid self.parameters.filenames = {resid: self.plugindir('Cys%d_OW_dist.txt.bz2' % resid) for resid in self.parameters.cysteines} # default filename for the combined plot self.parameters.figname = self.figdir('mindist_S_OW') # override 'API' methods of base class def run(self, cutoff=None, force=False, **gmxargs): """Run ``g_dist -dist cutoff`` for each cysteine and save output for further analysis. By default existing data files are *not* overwritten. If this behaviour is desired then set the *force* = ``True`` keyword argument. All other parameters are passed on to :func:`gromacs.g_dist`. """ if cutoff is None: cutoff = self.parameters.cutoff else: self.parameters.cutoff = cutoff # record cutoff used ndx = self.parameters.ndx # could move this into _register_hook? if not os.path.isfile(ndx): warnings.warn("Cysteine index file %r missing: running 'make_index_cys'." % ndx) self.make_index_cys() for resid in self.parameters.cysteines: groupname = 'Cys%(resid)d' % vars() commands = [groupname, 'OW'] filename = self.parameters.filenames[resid] if not force and self.check_file_exists(filename, resolve='warning'): continue print "run_g_dist: %(groupname)s --> %(filename)r" % vars() sys.stdout.flush() with open(filename, 'w') as datafile: p = gromacs.g_dist.Popen( s=self.simulation.tpr, f=self.simulation.xtc, n=ndx, dist=cutoff, input=commands, stderr=None, stdout=subprocess.PIPE, **gmxargs) compressor = subprocess.Popen(['bzip2', '-c'], stdin=p.stdout, stdout=datafile) p.communicate() def analyze(self,**kwargs): """Mindist analysis for all cysteines. Returns results for interactive analysis.""" results = AttributeDict() for resid in self.parameters.cysteines: groupname = 'Cys%(resid)d' % vars() # identifier should be a valid python variable name results[groupname] = self._mindist(resid) self.results = results return results def plot(self, **kwargs): """Plot all results in one graph, labelled by the result keys. :Keywords: figure - ``True``: save figures in the given formats - "name.ext": save figure under this filename (``ext`` -> format) - ``False``: only show on screen formats : sequence sequence of all formats that should be saved [('png', 'pdf')] plotargs keyword arguments for pylab.plot() """ import pylab figure = kwargs.pop('figure', False) extensions = kwargs.pop('formats', ('pdf','png')) for name,result in self.results.items(): kwargs['label'] = name result.plot(**kwargs) pylab.legend(loc='best') if figure is True: for ext in extensions: self.savefig(ext=ext) elif figure: self.savefig(filename=figure) # specific methods def make_index_cys(self): """Make index file for all cysteines and water oxygens. **NO SANITY CHECKS**: The SH atoms are simply labelled consecutively with the resids from the cysteines parameter. """ commands_1 = ['keep 0', 'del 0', 'r CYSH & t S', 'splitres 0', 'del 0'] # CYS-S sorted by resid commands_2 = ['t OW', 'q'] # water oxygens commands = commands_1[:] for groupid, resid in enumerate(self.parameters.cysteines): commands.append('name %(groupid)d Cys%(resid)d' % vars()) # name CYS-S groups canonically commands.extend(commands_2) return gromacs.make_ndx(f=self.simulation.tpr, o=self.parameters.ndx, input=commands, stdout=None) def _mindist(self,resid): """Analyze minimum distance for resid.""" filename = self.parameters.filenames[resid] return dist.Mindist(filename,cutoff=self.parameters.cutoff) # Public classes that register the worker classes #------------------------------------------------ class CysAccessibility(Plugin): """*CysAccessibility* plugin. For each frame of a trajectory, the shortest distance of all water oxygens to all cysteine sulphur atoms is computed. For computational efficiency, only distances smaller than a cutoff are taken into account. A histogram of the distances shows how close water molecules can get to cysteines. The closest approach distance should be indicative of the reactivity of the SH group with crosslinking agents. .. class:: CysAccessibility(cysteines, [cys_cutoff[, name[, simulation]]]) :Arguments: name : string plugin name (used to access it) simulation : instance The :class:`gromacs.analysis.Simulation` instance that owns the plugin. cysteines : list list of *all* resids (eg from the sequence) that are used as labels or in the form 'Cys<resid>'. (**required**) cys_cutoff : number cutoff in nm for the minimum S-OW distance [1.0] Note that *all* Cys residues in the protein are analyzed. Therefore, the list of cysteine labels *must* contain as many entries as there are cysteines in the protein. There are no sanity checks. """ worker_class = _CysAccessibility
pslacerda/GromacsWrapper
gromacs/analysis/plugins/cysaccessibility.py
Python
gpl-3.0
9,034
''' Python mapping for the Collaboration framework. This module does not contain docstrings for the wrapped code, check Apple's documentation for details on how to use these functions and classes. ''' import sys import objc import Foundation from Collaboration import _metadata sys.modules['Collaboration'] = objc.ObjCLazyModule( "Collaboration", "com.apple.Collaboration", objc.pathForFramework("/System/Library/Frameworks/Collaboration.framework"), _metadata.__dict__, None, { '__doc__': __doc__, '__path__': __path__, 'objc': objc, }, (Foundation,))
rishabhmalhotra/FireSync
PyObjC/Collaboration/__init__.py
Python
gpl-2.0
597
from registration.backends.default.urls import * from django.contrib import admin # Add next two lines to display static and media files # then add +static statement at the end of the urlpatterns from django.conf import * from django.conf import settings from django.conf.urls import url, include from django.conf.urls.static import static from django.contrib.auth import views as auth_views from django.core.urlresolvers import reverse_lazy from django.views.generic import TemplateView from django.views.static import serve as Static_Serve from accounts.views.sms import sms_login from accounts.views import logout from accounts.forms.other import (RegistrationFormUserTOSAndEmail, RegistrationFormDeveloperTOSAndEmail) from apps.api.views import * from apps.home.views import WhatIsNewListView, versionView from apps.v1api.views.metadata import fhir_metadata from apps.home import views from django.contrib import admin admin.autodiscover() urlpatterns = [ # url(r'^$', 'bbonfhiruser.views.home', name='home'), url(r'^$', views.home_index , name='home'), url(r'^metadata$', fhir_metadata, name="conformance"), # url(r'^about/$', views.about, # name='about'), url(r'^base/', include('apps.home.urls', namespace='base')), url(r'^version/$', views.versionView, name="versionview"), # url(r'^whatsnew/$', WhatIsNewListView.as_view(), # name='whatsnew'), # Accounts url(r'^accounts/', include('accounts.urls', namespace='accounts')), # AppMgmt url(r'^appmanagement/', include('appmgmt.urls', namespace='appmgmt')), # API Entry point # v1api is referenced inside apps.api.urls as v1 # api url(r'^api/', include('apps.api.urls', namespace='api')), # registration # Registration Options: # 1. Developer # 2. Beneficiary - Not Authenticated # 3. Beneficiary - Authenticated but not activated url(r'^registration/register/$', RegistrationView.as_view(form_class=RegistrationFormUserTOSAndEmail), name='register'), url(r'^registration/registerdeveloper/$', RegistrationView.as_view(form_class=RegistrationFormDeveloperTOSAndEmail), name='register_developer'), url(r'^registration/', include( 'registration.backends.default.urls', )), url(r'^auto_registration/', include('registration.backends.simple.urls')), # password url(r'^password/reset/$', auth_views.password_reset, {'post_reset_redirect': reverse_lazy( 'password_reset_done')}, name='password_reset'), url(r'^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', auth_views.password_reset_confirm, name='password_reset_confirm'), url(r'^password/reset/complete/$', auth_views.password_reset_complete, {'post_reset_redirect': reverse_lazy( 'password_reset_complete')}, name='password_reset_complete'), url(r'^password/reset/done/$', auth_views.password_reset_done, name='password_reset_done'), # fhir url(r'^fhir/', include('fhir.urls', namespace='fhir')), # OAuth2_provider url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')), # OAuth2 Provider Library url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')), # login / logout url(r'^login$', sms_login, name='login'), url(r'^logout$', logout, name='logout'), # Templates url(r'^support/$', TemplateView.as_view(template_name='support.html')), # Setup url(r'^setup/', include('apps.setup.urls', namespace='setup')), # Admin url(r'^admin/', include(admin.site.urls)), # Uncomment the admin/doc line below to enable admin # documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), ]# + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # Django 1.8 method for serving static media files if settings.DEBUG: urlpatterns.append(url(r'^media/(?P<path>.*)$', Static_Serve, {'document_root': settings.MEDIA_ROOT}))
ekivemark/BlueButtonFHIR_API
bbapi/urls.py
Python
apache-2.0
5,752
import numpy as np from numpy.linalg import * import matplotlib.pyplot as plt import pylab as P from sklearn.preprocessing import normalize ### Load data ### def loadData(fname): data = [] with open(fname, 'r') as f: for l in f: data.append(l.split(",")) f.close return np.array(data, float) data = loadData('ML2015TrafficSignsTrain.csv') classes = data[:, 1568] data = data[:, 0:1568] ### Normalize data ### meanVec = np.mean(data, axis=0) varVec = np.var(data, axis=0) for i in range(len(data)): data[i] = data[i] - meanVec data[i] = data[i] / np.sqrt(varVec) ### Histogram ### def makeFreqHistogram(size, classes): P.figure() n, bins, patches = P.hist(classes, size, normed=1, histtype='bar', color=['red'], label=['Traffic sign frequency']) P.legend() P.show() return makeFreqHistogram(43, classes) ### PCA ### def pca(data, classes): data = data[:, 0:1568] meanV = np.mean(data, axis=0) covM = np.cov(data, rowvar=0) eigVal, eigVec = np.linalg.eig(covM) eigVal = np.real(eigVal) eigVec = np.real(eigVec) eigP = [] for i in range(len(eigVal)): eigP.append((np.abs(eigVal[i]), eigVec[:,i])) matW = np.hstack((eigP[0][1].reshape(1568, 1), eigP[1][1].reshape(1568,1))) newData = np.dot(np.transpose(matW), np.transpose(data)) return (eigVal, newData) ### Eigenspectrum plot ### (eigVal, newData) = pca(data, classes) eigVal.sort() plt.yscale('log') plt.plot(np.arange(1,1569), eigVal[::-1], 'k') plt.title('Eigenspectrum') plt.show() ### Number of principal components to explain 90% of variance ### var90 = np.sum(eigVal)*0.9 b = 0.0 i = 0 for a in eigVal[::-1]: i += 1 b += a if (b > var90): print "Number of principal components to explain 90% of the variance:" print i break ### Scatter plot ### for i in range(1275): c = classes[i] if (c == 0 or c == 1 or c == 2 or c == 3 or c == 4 or c == 5 or c == 6 or c == 7 or c == 8 or c == 9 or c == 10 or c == 15 or c == 16 or c == 17 or c == 32 or c == 33 or c == 34 or c == 35 or c == 36 or c == 37 or c == 38 or c == 39 or c == 40 or c == 41 or c == 42): plt.plot(newData[0, i], newData[1, i], 'ro') elif (c == 11 or c == 18 or c == 19 or c == 20 or c == 21 or c == 22 or c == 23 or c == 24 or c == 25 or c == 26 or c == 27 or c == 28 or c == 29 or c == 30 or c == 31): plt.plot(newData[0, i], newData[1, i], 'b^') elif (c == 12): plt.plot(newData[0, i], newData[1, i], 'gD') elif (c == 13): plt.plot(newData[0, i], newData[1, i], 'kv') else: plt.plot(newData[0, i], newData[1, i], 'y8') plt.xlabel('x-values') plt.ylabel('y-values') plt.title('Result from PCA') plt.show() ### K-means Clustering ### np.random.seed(1) def kmeans(k, data, classes): entries = data.shape[0] centroids = [None] * k for i in range(k): rand = np.random.random_integers(0, entries-1) centroids[i] = data[rand] centroids = np.array(centroids, float) dists = np.zeros((entries, k)) labels = np.zeros((entries, 1)) oldCentroids = np.zeros(centroids.shape) while (np.linalg.norm(centroids - oldCentroids) > 0.0001): oldCentroids = np.copy(centroids) for i in range(k): for j in range(entries): dists[j, i] = np.linalg.norm(centroids[i] - data[j]) for i in range(entries): entryDists = dists[i ,:] labels[i] = np.argmin(entryDists) for i in range(k): centroids[i] = np.mean(data[labels[:, 0] == i], axis=0) return centroids cents = kmeans(4, data, classes) modData = np.r_[data, cents] modClasses = np.append(classes, [99, 99, 99, 99]) ### Scatter plot with centroids ### (eigVal, newData) = pca(modData, modClasses) for i in range(1279): c = modClasses[i] if (c == 0 or c == 1 or c == 2 or c == 3 or c == 4 or c == 5 or c == 6 or c == 7 or c == 8 or c == 9 or c == 10 or c == 15 or c == 16 or c == 17 or c == 32 or c == 33 or c == 34 or c == 35 or c == 36 or c == 37 or c == 38 or c == 39 or c == 40 or c == 41 or c == 42): plt.plot(newData[0, i], newData[1, i], 'ro') elif (c == 11 or c == 18 or c == 19 or c == 20 or c == 21 or c == 22 or c == 23 or c == 24 or c == 25 or c == 26 or c == 27 or c == 28 or c == 29 or c == 30 or c == 31): plt.plot(newData[0, i], newData[1, i], 'b^') elif (c == 12): plt.plot(newData[0, i], newData[1, i], 'gD') elif (c == 13): plt.plot(newData[0, i], newData[1, i], 'kv') elif (c == 99): plt.plot(newData[0, i], newData[1, i], 'm*', ms=20) else: plt.plot(newData[0, i], newData[1, i], 'y8') plt.xlabel('x-values') plt.ylabel('y-values') plt.title('Result from 4-means clustering') plt.show()
Rathcke/uni
ml/exam/src/q1.py
Python
gpl-3.0
4,966
from __future__ import print_function import argparse, re import numpy as np from pyspark import SparkSession from pyspark.sql import Row from pyspark.sql.functions import col, datediff, lit, sum as gsum from pyspark.ml.feature import CountVectorizer, VectorAssembler def pairFeatures(sseries, dseries, sday, dday): lag = abs(dday - sday) # throw away the sign to allow learning lagBin = str(int(np.log(lag))) if lag > 0 else '-inf' return ["src:" + sseries, "dst:" + dseries, "pair:" + sseries + ":" + dseries, "lag:" + lagBin] def normalizeText(s): return re.sub("[^\w\s]", "", re.sub("\s+", " ", s.strip().lower())) def clusterFeatures(c, gap): ## Sorting by string date needs to be consistent with numerical date wits = sorted(c[1], key=lambda w: w.date + ' ' + w.series) n = len(wits) res = [] curday = wits[0].day - gap prevday = curday for d in range(n): dst = wits[d] dstClean = normalizeText(dst.text) if dst.day > curday: prevday = curday curday = dst.day allowRoot = 1 if ( curday - prevday >= gap ) else 0 res.append(Row(cluster=long(c[0]), src=0, dst=d+1, label=allowRoot, longer=0.0, shorter=0.0, raw=["root:" + dst.series])) for s in range(n): src = wits[s] if (s != d) and (abs(dst.day - src.day) < gap): srcClean = normalizeText(src.text) growth = (len(dstClean) - len(srcClean))/float(len(srcClean)) res.append(Row(cluster=long(c[0]), src=s+1, dst=d+1, label=(1 if dst.day > src.day else 0), longer=growth if growth > 0 else 0.0, shorter=abs(growth) if growth < 0 else 0.0, raw=pairFeatures(src.series, dst.series, src.day, dst.day))) return res # Pad upper left row/col with zeros. def padUpLeft(m): size = m.shape[0] return np.concatenate((np.zeros((size+1, 1)), np.concatenate((np.zeros((1, size)), m), axis=0)), axis=1) def laplaceGradient(L): tinv = padUpLeft(np.transpose(np.linalg.inv(L[1:, 1:]))) return tinv - tinv.diagonal() ## Should figure out how to reuse this in clusterGradients def clusterPosteriors(c, w): n = max(map(lambda r: r.dst, c[1])) + 1 L = np.zeros((n, n)) for r in c[1]: score = -np.exp(w[np.array(r.features.indices)].dot(r.features.values)) L[r.src, r.dst] = score if r.label == 1 else 0 L += np.diag(-L.sum(axis=0)) Lgrad = laplaceGradient(L) posts = [] for r in c[1]: mom = r.src kid = r.dst post = L[mom, kid] * Lgrad[mom, kid] posts.append(Row(post=float(post), **(r.asDict()))) return posts def clusterGradients(c, w): n = max(map(lambda r: r.dst, c[1])) + 1 numL = np.zeros((n, n)) denL = np.zeros((n, n)) for r in c[1]: score = -np.exp(w[np.array(r.features.indices)].dot(r.features.values)) numL[r.src, r.dst] = score if r.label == 1 else 0 denL[r.src, r.dst] = score numL += np.diag(-numL.sum(axis=0)) denL += np.diag(-denL.sum(axis=0)) numLgrad = laplaceGradient(numL) denLgrad = laplaceGradient(denL) fgrad = [] for r in c[1]: mom = r.src kid = r.dst grad = -numL[mom, kid] * numLgrad[mom, kid] + denL[mom, kid] * denLgrad[mom, kid] fgrad += [(long(f), float(grad * v)) for f, v in zip(r.features.indices, r.features.values)] return fgrad def featurizeData(raw, gap, vocabFile, featFile): feats = raw.dropDuplicates(['cluster', 'series', 'date'])\ .withColumn('day', datediff(col('date'), lit('1970-01-01')))\ .na.drop(subset=['day'])\ .rdd.groupBy(lambda r: r.cluster)\ .flatMap(lambda c: clusterFeatures(c, gap))\ .toDF() feats.cache() cv = CountVectorizer(inputCol='raw', outputCol='features', minDF=4.0) interner = cv.fit(feats) # alternate possibility: grab features only from label==1 edges full = interner.transform(feats) # combiner = VectorAssembler(inputCols=realCols + ['categorial'], outputCol='features') # # I don't think a Pipeline will work here since we need to get the interner.vocabulary # full = combiner.transform(interner.transform(feats)).drop('categorial') full.write.parquet(featFile) np.savetxt(vocabFile, np.array(interner.vocabulary), fmt='%s') feats.unpersist() if __name__ == "__main__": argparser = argparse.ArgumentParser(description='Cascade features') argparser.add_argument('-f', '--input', help='Input data') argparser.add_argument('-g', '--gap', type=int, default=730) argparser.add_argument('-i', '--iterations', type=int, default=20) argparser.add_argument('-r', '--rate', type=float, default=1.0) argparser.add_argument('-v', '--variance', type=float, default=1.0) argparser.add_argument('-p', '--posteriors', metavar='params') argparser.add_argument('outdir', help='Output directory') args = argparser.parse_args() spark = SparkSession.builder.appName('Cascade Features').getOrCreate() vocabFile = args.outdir + "/vocab.gz" featFile = args.outdir + "/feats.parquet" try: full = spark.read.load(featFile) vocab = np.loadtxt(vocabFile, 'string') except: featurizeData(spark.read.load(args.input), args.gap, vocabFile, featFile) full = spark.read.load(featFile) vocab = np.loadtxt(vocabFile, 'string') if args.posteriors: w = np.loadtxt(args.posteriors) full.rdd.groupBy(lambda r: r.cluster).flatMap(lambda c: clusterPosteriors(c, w)).toDF()\ .write.save(args.posteriors + ".parquet") exit(0) fcount = len(vocab) w = np.zeros(fcount) fdata = full.select('cluster', 'src', 'dst', 'label', 'features')\ .rdd.groupBy(lambda r: r.cluster) fdata.cache() rate = args.rate / fdata.count() # scale with training size for i in range(args.iterations): grad = fdata.flatMap(lambda c: clusterGradients(c, w)).toDF(['feat', 'grad'])\ .groupBy('feat').agg(gsum('grad').alias('grad'))\ .collect() update = np.zeros(fcount) for g in grad: update[g.feat] = g.grad if args.variance > 0: update += w / args.variance w -= rate * update np.savetxt("%s/iter%03d.gz" % (args.outdir, i), w) spark.stop()
ViralTexts/vt-passim
scripts/place-cascade.py
Python
apache-2.0
6,658
# Copyright 2017 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic benchmark running Apache Beam Integration Tests as benchmarks. This benchmark provides the piping necessary to run Apache Beam Integration Tests as benchmarks. It provides the minimum additional configuration necessary to get the benchmark going. """ import copy import datetime import tempfile from absl import flags from perfkitbenchmarker import beam_benchmark_helper from perfkitbenchmarker import beam_pipeline_options from perfkitbenchmarker import configs from perfkitbenchmarker import dpb_service from perfkitbenchmarker import errors from perfkitbenchmarker import kubernetes_helper from perfkitbenchmarker import sample from perfkitbenchmarker.dpb_service import BaseDpbService BENCHMARK_NAME = 'beam_integration_benchmark' BENCHMARK_CONFIG = """ beam_integration_benchmark: description: Run word count on dataflow and dataproc dpb_service: service_type: dataflow worker_group: vm_spec: GCP: machine_type: n1-standard-1 boot_disk_size: 500 AWS: machine_type: m3.medium disk_spec: GCP: disk_type: nodisk AWS: disk_size: 500 disk_type: gp2 worker_count: 2 """ DEFAULT_JAVA_IT_CLASS = 'org.apache.beam.examples.WordCountIT' DEFAULT_PYTHON_IT_MODULE = ('apache_beam.examples.wordcount_it_test:' 'WordCountIT.test_wordcount_it') flags.DEFINE_string('beam_it_class', None, 'Path to IT class') flags.DEFINE_string('beam_it_args', None, 'Args to provide to the IT.' ' Deprecated & replaced by beam_it_options') flags.DEFINE_string('beam_it_options', None, 'Pipeline Options sent to the' ' integration test.') flags.DEFINE_string('beam_kubernetes_scripts', None, 'A local path to the' ' Kubernetes scripts to run which will instantiate a' ' datastore.') flags.DEFINE_string('beam_options_config_file', None, 'A local path to the' ' yaml file defining static and dynamic pipeline options to' ' use for this benchmark run.') FLAGS = flags.FLAGS def GetConfig(user_config): return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def CheckPrerequisites(benchmark_config_spec): """Verifies that the required resources are present. Raises: perfkitbenchmarker.errors.Config.InvalidValue: If no Beam args are provided. NotImplementedError: If an invalid runner is specified. """ if FLAGS.beam_it_options is None and FLAGS.beam_it_args is None: raise errors.Config.InvalidValue( 'No options provided. To run with default class (WordCountIT), must' ' provide --beam_it_options=--tempRoot=<temp dir,' ' e.g. gs://my-dir/temp>.') if FLAGS.beam_sdk is None: raise errors.Config.InvalidValue( 'No sdk provided. To run Beam integration benchmark, the test must' 'specify which sdk is used in the pipeline. For example, java/python.') if benchmark_config_spec.dpb_service.service_type != dpb_service.DATAFLOW: raise NotImplementedError('Currently only works against Dataflow.') if (FLAGS.beam_it_options and (not FLAGS.beam_it_options.endswith(']') or not FLAGS.beam_it_options.startswith('['))): raise Exception("beam_it_options must be of form" " [\"--option=value\",\"--option2=val2\"]") def Prepare(benchmark_spec): beam_benchmark_helper.InitializeBeamRepo(benchmark_spec) benchmark_spec.always_call_cleanup = True kubernetes_helper.CreateAllFiles(getKubernetesScripts()) pass def getKubernetesScripts(): if FLAGS.beam_kubernetes_scripts: scripts = FLAGS.beam_kubernetes_scripts.split(',') return scripts else: return [] def Run(benchmark_spec): # Get handle to the dpb service dpb_service_instance = benchmark_spec.dpb_service # Create a file handle to contain the response from running the job on # the dpb service stdout_file = tempfile.NamedTemporaryFile(suffix='.stdout', prefix='beam_integration_benchmark', delete=False) stdout_file.close() if FLAGS.beam_it_class is None: if FLAGS.beam_sdk == beam_benchmark_helper.BEAM_JAVA_SDK: classname = DEFAULT_JAVA_IT_CLASS elif FLAGS.beam_sdk == beam_benchmark_helper.BEAM_PYTHON_SDK: classname = DEFAULT_PYTHON_IT_MODULE else: raise NotImplementedError('Unsupported Beam SDK: %s.' % FLAGS.beam_sdk) else: classname = FLAGS.beam_it_class static_pipeline_options, dynamic_pipeline_options = \ beam_pipeline_options.ReadPipelineOptionConfigFile() job_arguments = beam_pipeline_options.GenerateAllPipelineOptions( FLAGS.beam_it_args, FLAGS.beam_it_options, static_pipeline_options, dynamic_pipeline_options) job_type = BaseDpbService.BEAM_JOB_TYPE results = [] metadata = copy.copy(dpb_service_instance.GetMetadata()) start = datetime.datetime.now() dpb_service_instance.SubmitJob( classname=classname, job_arguments=job_arguments, job_stdout_file=stdout_file, job_type=job_type) end_time = datetime.datetime.now() run_time = (end_time - start).total_seconds() results.append(sample.Sample('run_time', run_time, 'seconds', metadata)) return results def Cleanup(benchmark_spec): kubernetes_helper.DeleteAllFiles(getKubernetesScripts()) pass
GoogleCloudPlatform/PerfKitBenchmarker
perfkitbenchmarker/linux_benchmarks/beam_integration_benchmark.py
Python
apache-2.0
6,060
"""Interface implementation for cloud client.""" from __future__ import annotations import asyncio import logging from pathlib import Path from typing import Any import aiohttp from hass_nabucasa.client import CloudClient as Interface from homeassistant.components.alexa import ( errors as alexa_errors, smart_home as alexa_sh, ) from homeassistant.components.google_assistant import const as gc, smart_home as ga from homeassistant.const import HTTP_OK from homeassistant.core import Context, HomeAssistant, callback from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.event import async_call_later from homeassistant.util.aiohttp import MockRequest from . import alexa_config, google_config, utils from .const import DISPATCHER_REMOTE_UPDATE, DOMAIN from .prefs import CloudPreferences class CloudClient(Interface): """Interface class for Home Assistant Cloud.""" def __init__( self, hass: HomeAssistant, prefs: CloudPreferences, websession: aiohttp.ClientSession, alexa_user_config: dict[str, Any], google_user_config: dict[str, Any], ) -> None: """Initialize client interface to Cloud.""" self._hass = hass self._prefs = prefs self._websession = websession self.google_user_config = google_user_config self.alexa_user_config = alexa_user_config self._alexa_config = None self._google_config = None @property def base_path(self) -> Path: """Return path to base dir.""" return Path(self._hass.config.config_dir) @property def prefs(self) -> CloudPreferences: """Return Cloud preferences.""" return self._prefs @property def loop(self) -> asyncio.BaseEventLoop: """Return client loop.""" return self._hass.loop @property def websession(self) -> aiohttp.ClientSession: """Return client session for aiohttp.""" return self._websession @property def aiohttp_runner(self) -> aiohttp.web.AppRunner: """Return client webinterface aiohttp application.""" return self._hass.http.runner @property def cloudhooks(self) -> dict[str, dict[str, str]]: """Return list of cloudhooks.""" return self._prefs.cloudhooks @property def remote_autostart(self) -> bool: """Return true if we want start a remote connection.""" return self._prefs.remote_enabled async def get_alexa_config(self) -> alexa_config.AlexaConfig: """Return Alexa config.""" if self._alexa_config is None: assert self.cloud is not None cloud_user = await self._prefs.get_cloud_user() self._alexa_config = alexa_config.AlexaConfig( self._hass, self.alexa_user_config, cloud_user, self._prefs, self.cloud ) await self._alexa_config.async_initialize() return self._alexa_config async def get_google_config(self) -> google_config.CloudGoogleConfig: """Return Google config.""" if not self._google_config: assert self.cloud is not None cloud_user = await self._prefs.get_cloud_user() self._google_config = google_config.CloudGoogleConfig( self._hass, self.google_user_config, cloud_user, self._prefs, self.cloud ) await self._google_config.async_initialize() return self._google_config async def cloud_started(self) -> None: """When cloud is started.""" is_new_user = await self.prefs.async_set_username(self.cloud.username) async def enable_alexa(_): """Enable Alexa.""" aconf = await self.get_alexa_config() try: await aconf.async_enable_proactive_mode() except aiohttp.ClientError as err: # If no internet available yet if self._hass.is_running: logging.getLogger(__package__).warning( "Unable to activate Alexa Report State: %s. Retrying in 30 seconds", err, ) async_call_later(self._hass, 30, enable_alexa) except alexa_errors.NoTokenAvailable: pass async def enable_google(_): """Enable Google.""" gconf = await self.get_google_config() gconf.async_enable_local_sdk() if gconf.should_report_state: gconf.async_enable_report_state() if is_new_user: await gconf.async_sync_entities(gconf.agent_user_id) tasks = [] if self._prefs.alexa_enabled and self._prefs.alexa_report_state: tasks.append(enable_alexa) if self._prefs.google_enabled: tasks.append(enable_google) if tasks: await asyncio.gather(*(task(None) for task in tasks)) async def cloud_stopped(self) -> None: """When the cloud is stopped.""" async def logout_cleanups(self) -> None: """Cleanup some stuff after logout.""" await self.prefs.async_set_username(None) self._google_config = None @callback def user_message(self, identifier: str, title: str, message: str) -> None: """Create a message for user to UI.""" self._hass.components.persistent_notification.async_create( message, title, identifier ) @callback def dispatcher_message(self, identifier: str, data: Any = None) -> None: """Match cloud notification to dispatcher.""" if identifier.startswith("remote_"): async_dispatcher_send(self._hass, DISPATCHER_REMOTE_UPDATE, data) async def async_cloud_connect_update(self, connect: bool) -> None: """Process cloud remote message to client.""" await self._prefs.async_update(remote_enabled=connect) async def async_alexa_message(self, payload: dict[Any, Any]) -> dict[Any, Any]: """Process cloud alexa message to client.""" cloud_user = await self._prefs.get_cloud_user() aconfig = await self.get_alexa_config() return await alexa_sh.async_handle_message( self._hass, aconfig, payload, context=Context(user_id=cloud_user), enabled=self._prefs.alexa_enabled, ) async def async_google_message(self, payload: dict[Any, Any]) -> dict[Any, Any]: """Process cloud google message to client.""" if not self._prefs.google_enabled: return ga.turned_off_response(payload) gconf = await self.get_google_config() return await ga.async_handle_message( self._hass, gconf, gconf.cloud_user, payload, gc.SOURCE_CLOUD ) async def async_webhook_message(self, payload: dict[Any, Any]) -> dict[Any, Any]: """Process cloud webhook message to client.""" cloudhook_id = payload["cloudhook_id"] found = None for cloudhook in self._prefs.cloudhooks.values(): if cloudhook["cloudhook_id"] == cloudhook_id: found = cloudhook break if found is None: return {"status": HTTP_OK} request = MockRequest( content=payload["body"].encode("utf-8"), headers=payload["headers"], method=payload["method"], query_string=payload["query"], mock_source=DOMAIN, ) response = await self._hass.components.webhook.async_handle_webhook( found["webhook_id"], request ) response_dict = utils.aiohttp_serialize_response(response) body = response_dict.get("body") return { "body": body, "status": response_dict["status"], "headers": {"Content-Type": response.content_type}, } async def async_cloudhooks_update(self, data: dict[str, dict[str, str]]) -> None: """Update local list of cloudhooks.""" await self._prefs.async_update(cloudhooks=data)
lukas-hetzenecker/home-assistant
homeassistant/components/cloud/client.py
Python
apache-2.0
8,121
import pytest from collections.abc import Sequence @pytest.mark.asyncio async def test_endpoint(client): assert repr(client.health) == "<HealthEndpoint(%r)>" % str(client.address) @pytest.mark.parametrize("node", [ "server1", {"Node": "server1"}, {"ID": "server1"} ]) @pytest.mark.asyncio async def test_node(client, node): data, meta = await client.health.node(node) assert isinstance(data, Sequence) assert data[0]["Node"] == "server1" assert_meta(meta) @pytest.mark.parametrize("service", [ "redis", {"ServiceID": "redis"}, {"ID": "redis"} ]) @pytest.mark.asyncio async def test_checks(client, service): data, meta = await client.health.checks(service) assert isinstance(data, Sequence) assert_meta(meta) @pytest.mark.parametrize("service", [ "redis", {"ServiceID": "redis"}, {"ID": "redis"} ]) @pytest.mark.asyncio async def test_service(client, service): data, meta = await client.health.service(service) assert isinstance(data, Sequence) assert_meta(meta) @pytest.mark.parametrize("state", [ "any", "passing", "warning", "critical" ]) @pytest.mark.asyncio async def test_state(client, state): data, meta = await client.health.state(state) assert isinstance(data, Sequence) assert_meta(meta) def assert_meta(data): assert "Index" in data assert "KnownLeader" in data assert "LastContact" in data
johnnoone/aioconsul
tests/test_health.py
Python
bsd-3-clause
1,397
from django import forms from django.contrib.auth.models import User from .CaptchaSecuredForm import CaptchaSecuredForm class RegistrationForm(CaptchaSecuredForm): email = forms.EmailField(label='E-mail address') firstName = forms.CharField(label='First name') lastName = forms.CharField(label='Last name') password = forms.CharField(label=u'Password', widget=forms.PasswordInput(render_value=False)) retyped = forms.CharField(label=u'Confirm Password', widget=forms.PasswordInput(render_value=False)) def clean(self): cleaned_data = super(RegistrationForm, self).clean() password = cleaned_data.get('password') retyped = cleaned_data.get('retyped') if password and retyped: if password != retyped: self.errors['retyped'] = self.error_class(['The passwords do not match.']) # Remove invalid entries if password: del cleaned_data['password'] if retyped: del cleaned_data['retyped'] return cleaned_data def clean_email(self): user = None data = self.cleaned_data['email'] try: user = User.objects.get(email=data) except User.DoesNotExist: pass if user is not None: raise forms.ValidationError('This email address is already registered.') return data
danrg/RGT-tool
src/RGT/authentication/forms/RegistrationForm.py
Python
mit
1,488
import re import os import sys from astroid.builder import AstroidBuilder from astroid import MANAGER, Name, Assign, Keyword, List, Tuple, Const from requirements_detector.__compat__ import Call, AssignName from requirements_detector.requirement import DetectedRequirement __all__ = [ "find_requirements", "from_requirements_txt", "from_requirements_dir", "from_setup_py", "RequirementsNotFound", "CouldNotParseRequirements", ] # PEP263, see http://legacy.python.org/dev/peps/pep-0263/ _ENCODING_REGEXP = re.compile(r"coding[:=]\s*([-\w.]+)") _PY3K = sys.version_info >= (3, 0) _PIP_OPTIONS = ( "-i", "--index-url", "--extra-index-url", "--no-index", "-f", "--find-links", "-r", ) class RequirementsNotFound(Exception): pass class CouldNotParseRequirements(Exception): pass def _load_file_contents(filepath): # This function is a bit of a tedious workaround (AKA 'hack'). # Astroid calls 'compile' under the hood, which refuses to accept a Unicode # object which contains a PEP-263 encoding definition. However if we give # Astroid raw bytes, it'll assume ASCII. Therefore we need to detect the encoding # here, convert the file contents to a Unicode object, *and also strip the encoding # declaration* to avoid the compile step breaking. with open(filepath) as f: if _PY3K: return f.read() contents = f.readlines() result = [] encoding_lines = contents[0:2] encoding = "utf-8" for line in encoding_lines: match = _ENCODING_REGEXP.search(line) if match is None: result.append(line.strip()) else: encoding = match.group(1) result += [line.rstrip() for line in contents[2:]] result = "\n".join(result) return result.decode(encoding) def find_requirements(path): """ This method tries to determine the requirements of a particular project by inspecting the possible places that they could be defined. It will attempt, in order: 1) to parse setup.py in the root for an install_requires value 2) to read a requirements.txt file or a requirements.pip in the root 3) to read all .txt files in a folder called 'requirements' in the root 4) to read files matching "*requirements*.txt" and "*reqs*.txt" in the root, excluding any starting or ending with 'test' If one of these succeeds, then a list of pkg_resources.Requirement's will be returned. If none can be found, then a RequirementsNotFound will be raised """ requirements = [] setup_py = os.path.join(path, "setup.py") if os.path.exists(setup_py) and os.path.isfile(setup_py): try: requirements = from_setup_py(setup_py) requirements.sort() return requirements except CouldNotParseRequirements: pass for reqfile_name in ("requirements.txt", "requirements.pip"): reqfile_path = os.path.join(path, reqfile_name) if os.path.exists(reqfile_path) and os.path.isfile(reqfile_path): try: requirements += from_requirements_txt(reqfile_path) except CouldNotParseRequirements as e: pass requirements_dir = os.path.join(path, "requirements") if os.path.exists(requirements_dir) and os.path.isdir(requirements_dir): from_dir = from_requirements_dir(requirements_dir) if from_dir is not None: requirements += from_dir from_blob = from_requirements_blob(path) if from_blob is not None: requirements += from_blob requirements = list(set(requirements)) if len(requirements) > 0: requirements.sort() return requirements raise RequirementsNotFound class SetupWalker(object): def __init__(self, ast): self._ast = ast self._setup_call = None self._top_level_assigns = {} self.walk() def walk(self, node=None): top = node is None node = node or self._ast # test to see if this is a call to setup() if isinstance(node, Call): for child_node in node.get_children(): if isinstance(child_node, Name) and child_node.name == "setup": # TODO: what if this isn't actually the distutils setup? self._setup_call = node for child_node in node.get_children(): if top and isinstance(child_node, Assign): for target in child_node.targets: if isinstance(target, AssignName): self._top_level_assigns[target.name] = child_node.value self.walk(child_node) def _get_list_value(self, list_node): values = [] for child_node in list_node.get_children(): if not isinstance(child_node, Const): # we can't handle anything fancy, only constant values raise CouldNotParseRequirements values.append(child_node.value) return values def get_requires(self): # first, if we have a call to setup, then we can see what its "install_requires" argument is if not self._setup_call: raise CouldNotParseRequirements found_requirements = [] for child_node in self._setup_call.get_children(): if not isinstance(child_node, Keyword): # do we want to try to handle positional arguments? continue if child_node.arg not in ("install_requires", "requires"): continue if isinstance(child_node.value, (List, Tuple)): # joy! this is a simple list or tuple of requirements # this is a Keyword -> List or Keyword -> Tuple found_requirements += self._get_list_value(child_node.value) continue if isinstance(child_node.value, Name): # otherwise, it's referencing a value defined elsewhere # this will be a Keyword -> Name try: reqs = self._top_level_assigns[child_node.value.name] except KeyError: raise CouldNotParseRequirements else: if isinstance(reqs, (List, Tuple)): found_requirements += self._get_list_value(reqs) continue # otherwise it's something funky and we can't handle it raise CouldNotParseRequirements # if we've fallen off the bottom with nothing in our list of requirements, # we simply didn't find anything useful if len(found_requirements) > 0: return found_requirements raise CouldNotParseRequirements def from_setup_py(setup_file): try: from astroid import AstroidBuildingException except ImportError: syntax_exceptions = (SyntaxError,) else: syntax_exceptions = (SyntaxError, AstroidBuildingException) try: contents = _load_file_contents(setup_file) ast = AstroidBuilder(MANAGER).string_build(contents) except syntax_exceptions: # if the setup file is broken, we can't do much about that... raise CouldNotParseRequirements walker = SetupWalker(ast) requirements = [] for req in walker.get_requires(): requirements.append(DetectedRequirement.parse(req, setup_file)) return [requirement for requirement in requirements if requirement is not None] def from_requirements_txt(requirements_file): # see http://www.pip-installer.org/en/latest/logic.html requirements = [] with open(requirements_file) as f: for req in f.readlines(): if req.strip() == "": # empty line continue if req.strip().startswith("#"): # this is a comment continue if req.strip().split()[0] in _PIP_OPTIONS: # this is a pip option continue detected = DetectedRequirement.parse(req, requirements_file) if detected is None: continue requirements.append(detected) return requirements def from_requirements_dir(path): requirements = [] for entry in os.listdir(path): filepath = os.path.join(path, entry) if not os.path.isfile(filepath): continue if entry.endswith(".txt") or entry.endswith(".pip"): # TODO: deal with duplicates requirements += from_requirements_txt(filepath) return requirements def from_requirements_blob(path): requirements = [] for entry in os.listdir(path): filepath = os.path.join(path, entry) if not os.path.isfile(filepath): continue m = re.match(r"^(\w*)req(uirement)?s(\w*)\.txt$", entry) if m is None: continue if m.group(1).startswith("test") or m.group(3).endswith("test"): continue requirements += from_requirements_txt(filepath) return requirements
landscapeio/requirements-detector
requirements_detector/detect.py
Python
mit
9,181
#!/usr/bin/python3 # -*- coding: utf-8 -*-import stations import datetime import random import stations import os now = datetime.date.today() curMonth = datetime.date(now.year, now.month, 1) nextMonth = datetime.date(now.year+curMonth.month//12, (now.month%12)+1, 1) for city, info in stations.city.items(): if info.accuweatherStationId != None: try: os.mkdir('{city}/accuweather'.format(**locals())) except FileExistsError: pass cityName = city if info.accuweatherCityName != None: cityName = info.accuweatherCityName for fn, m in (('thisMonth', curMonth), ('nextMonth', nextMonth)): monthName = m.strftime('%B').lower() monthStr = m.strftime('%m/1/%Y').lower() timeout = random.randint(8,12) print('while true; do wget -nv -T {timeout} -O {city}/accuweather/{fn}.html "http://www.accuweather.com/en/ca/{cityName}/k1s/{monthName}-weather/{info.accuweatherStationId}?monyr={monthStr}&view=table" && break; done'.format(**locals()))
endlisnis/weather-records
updateAccuWeather.py
Python
gpl-3.0
1,064
import random import numpy as np from collections import namedtuple class GA: """ Genetic Algorithm for a simple optimization problem Parameters ---------- generation : int number of iteration to train the algorithm pop_size : int number of chromosomes in the population chromo_size : int number of possible values (genes) per chromosome low, high : int lower_bound and upper_bound possible value of the randomly generated chromosome retain_rate : float 0 ~ 1 the fraction of the best chromosome to retain. used to mate the children for the next generation mutate_rate : float 0 ~ 1 the probability that each chromosome will mutate """ def __init__( self, generation, pop_size, chromo_size, low, high, retain_rate, mutate_rate ): self.low = low self.high = high self.pop_size = pop_size self.chromo_size = chromo_size self.generation = generation self.retain_len = int(pop_size * retain_rate) self.mutate_rate = mutate_rate self.info = namedtuple( 'info', ['cost', 'chromo'] ) def fit(self, target): """ target : int the targeted solution """ # randomly generate the initial population, and evaluate its cost array_size = self.pop_size, self.chromo_size pop = np.random.randint(self.low, self.high + 1, array_size) graded_pop = self._compute_cost( pop, target ) # store the best chromosome and its cost for each generation, # so we can get an idea of when the algorithm converged self.generation_history = [] for _ in range(self.generation): graded_pop, generation_best = self._evolve(graded_pop, target) self.generation_history.append(generation_best) self.best = self.generation_history[self.generation - 1] self.is_fitted = True return self def _compute_cost(self, pop, target): """ combine the cost and chromosome into one list and sort them in ascending order """ cost = np.abs( np.sum(pop, axis = 1) - target ) graded = [ self.info( c, list(p) ) for p, c in zip(pop, cost) ] graded = sorted(graded) return graded def _evolve(self, graded_pop, target): """ core method that does the crossover, mutation to generate the possibly best children for the next generation """ # retain the best chromos (number determined by the retain_len) graded_pop = graded_pop[:self.retain_len] parent = [ p.chromo for p in graded_pop ] # generate the children for the next generation children = [] while len(children) < self.pop_size: child = self._crossover(parent) child = self._mutate(child) children.append(child) # evaluate the children chromosome and retain the overall best, # overall simply means the best from the parent and the children, where # the size retained is determined by the population size graded_children = self._compute_cost(children, target) graded_pop.extend(graded_children) graded_pop = sorted(graded_pop) graded_pop = graded_pop[:self.pop_size] # also return the current generation's best chromosome and its cost generation_best = graded_pop[0] return graded_pop, generation_best def _crossover(self, parent): """ mate the children by randomly choosing two parents and mix the first half element of one parent with the later half element of the other """ index1, index2 = random.sample( range(self.retain_len), k = 2 ) male, female = parent[index1], parent[index2] pivot = len(male) // 2 child = male[:pivot] + female[pivot:] return child def _mutate(self, child): """ randomly change one element of the chromosome if it exceeds the user-specified threshold (mutate_rate) """ if self.mutate_rate > random.random(): idx_to_mutate = random.randrange(self.chromo_size) child[idx_to_mutate] = random.randint(self.low, self.high) return child
ethen8181/machine-learning
ga/ga.py
Python
mit
3,802
import datetime as dt from time import mktime import numpy as np from bokeh.core.properties import value from bokeh.document import Document from bokeh.embed import file_html from bokeh.models import (ColumnDataSource, DatetimeAxis, DatetimeTickFormatter, FixedTicker, Legend, LegendItem, Line, Patch, Plot, Text,) from bokeh.resources import INLINE from bokeh.sampledata import daylight from bokeh.util.browser import view df = daylight.daylight_warsaw_2013 source = ColumnDataSource(dict( dates = df.Date, sunrises = df.Sunrise, sunsets = df.Sunset, )) patch1_source = ColumnDataSource(dict( dates = np.concatenate((df.Date, df.Date[::-1])), times = np.concatenate((df.Sunrise, df.Sunset[::-1])) )) summer = df[df.Summer == 1] patch2_source = ColumnDataSource(dict( dates = np.concatenate((summer.Date, summer.Date[::-1])), times = np.concatenate((summer.Sunrise, summer.Sunset[::-1])) )) summer_start = df.Summer.tolist().index(1) summer_end = df.Summer.tolist().index(0, summer_start) calendar_start = df.Date.iloc[0] summer_start = df.Date.iloc[summer_start] summer_end = df.Date.iloc[summer_end] calendar_end = df.Date.iloc[-1] d1 = calendar_start + (summer_start - calendar_start)/2 d2 = summer_start + (summer_end - summer_start)/2 d3 = summer_end + (calendar_end - summer_end)/2 text_source = ColumnDataSource(dict( dates = [d1, d2, d3], times = [dt.time(11, 30)]*3, texts = ["CST (UTC+1)", "CEST (UTC+2)", "CST (UTC+1)"], )) plot = Plot(plot_width=800, plot_height=400) plot.title.text = "Daylight Hours 2013 - Warsaw, Poland" plot.toolbar_location = None plot.x_range.range_padding = 0 patch1 = Patch(x="dates", y="times", fill_color="#282e54") plot.add_glyph(patch1_source, patch1) patch2 = Patch(x="dates", y="times", fill_color="#ffdd91") plot.add_glyph(patch2_source, patch2) sunrise_line = Line(x="dates", y="sunrises", line_color="orange", line_width=4) sunrise_line_renderer = plot.add_glyph(source, sunrise_line) sunset_line = Line(x="dates", y="sunsets", line_color="crimson", line_width=4) sunset_line_renderer = plot.add_glyph(source, sunset_line) text = Text(x="dates", y="times", text="texts", text_align="center", text_color="grey") plot.add_glyph(text_source, text) xformatter = DatetimeTickFormatter(months="%b %d %Y") min_time = dt.datetime.min.time() xticker = FixedTicker(ticks=[ mktime(dt.datetime.combine(summer_start, min_time).timetuple()) * 1000, mktime(dt.datetime.combine(summer_end, min_time).timetuple()) * 1000 ]) xaxis = DatetimeAxis(formatter=xformatter, ticker=xticker) plot.add_layout(xaxis, 'below') yaxis = DatetimeAxis() yaxis.formatter.hours = ['%H:%M'] plot.add_layout(yaxis, 'left') legend = Legend(items=[ LegendItem(label=value('sunset'), renderers=[sunset_line_renderer]), LegendItem(label=value('sunrise'), renderers=[sunrise_line_renderer]), ]) plot.add_layout(legend) doc = Document() doc.add_root(plot) if __name__ == "__main__": doc.validate() filename = "daylight.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Daylight Plot")) print("Wrote %s" % filename) view(filename)
ericmjl/bokeh
examples/models/file/daylight.py
Python
bsd-3-clause
3,185
# encoding: utf-8 # -*- test-case-name: IPython.kernel.test.test_enginepb -*- """ Expose the IPython EngineService using the Foolscap network protocol. Foolscap is a high-performance and secure network protocol. """ __docformat__ = "restructuredtext en" #------------------------------------------------------------------------------- # Copyright (C) 2008 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Imports #------------------------------------------------------------------------------- import os, time import cPickle as pickle from twisted.python import components, log, failure from twisted.python.failure import Failure from twisted.internet import defer, reactor, threads from twisted.internet.interfaces import IProtocolFactory from zope.interface import Interface, implements, Attribute from twisted.internet.base import DelayedCall DelayedCall.debug = True from foolscap import Referenceable, DeadReferenceError from foolscap.referenceable import RemoteReference from IPython.kernel.pbutil import packageFailure, unpackageFailure from IPython.kernel.util import printer from IPython.kernel.twistedutil import gatherBoth from IPython.kernel import newserialized from IPython.kernel.error import ProtocolError from IPython.kernel import controllerservice from IPython.kernel.controllerservice import IControllerBase from IPython.kernel.engineservice import \ IEngineBase, \ IEngineQueued, \ EngineService, \ StrictDict from IPython.kernel.pickleutil import \ can, \ canDict, \ canSequence, \ uncan, \ uncanDict, \ uncanSequence #------------------------------------------------------------------------------- # The client (Engine) side of things #------------------------------------------------------------------------------- # Expose a FC interface to the EngineService class IFCEngine(Interface): """An interface that exposes an EngineService over Foolscap. The methods in this interface are similar to those from IEngine, but their arguments and return values slightly different to reflect that FC cannot send arbitrary objects. We handle this by pickling/ unpickling that the two endpoints. If a remote or local exception is raised, the appropriate Failure will be returned instead. """ pass class FCEngineReferenceFromService(Referenceable, object): """Adapt an `IEngineBase` to an `IFCEngine` implementer. This exposes an `IEngineBase` to foolscap by adapting it to a `foolscap.Referenceable`. See the documentation of the `IEngineBase` methods for more details. """ implements(IFCEngine) def __init__(self, service): assert IEngineBase.providedBy(service), \ "IEngineBase is not provided by" + repr(service) self.service = service self.collectors = {} def remote_get_id(self): return self.service.id def remote_set_id(self, id): self.service.id = id def _checkProperties(self, result): dosync = self.service.properties.modified self.service.properties.modified = False return (dosync and pickle.dumps(self.service.properties, 2)), result def remote_execute(self, lines): d = self.service.execute(lines) d.addErrback(packageFailure) d.addCallback(self._checkProperties) d.addErrback(packageFailure) #d.addCallback(lambda r: log.msg("Got result: " + str(r))) return d #--------------------------------------------------------------------------- # Old version of push #--------------------------------------------------------------------------- def remote_push(self, pNamespace): try: namespace = pickle.loads(pNamespace) except: return defer.fail(failure.Failure()).addErrback(packageFailure) else: return self.service.push(namespace).addErrback(packageFailure) #--------------------------------------------------------------------------- # pull #--------------------------------------------------------------------------- def remote_pull(self, keys): d = self.service.pull(keys) d.addCallback(pickle.dumps, 2) d.addErrback(packageFailure) return d #--------------------------------------------------------------------------- # push/pullFuction #--------------------------------------------------------------------------- def remote_push_function(self, pNamespace): try: namespace = pickle.loads(pNamespace) except: return defer.fail(failure.Failure()).addErrback(packageFailure) else: # The usage of globals() here is an attempt to bind any pickled functions # to the globals of this module. What we really want is to have it bound # to the globals of the callers module. This will require walking the # stack. BG 10/3/07. namespace = uncanDict(namespace, globals()) return self.service.push_function(namespace).addErrback(packageFailure) def remote_pull_function(self, keys): d = self.service.pull_function(keys) if len(keys)>1: d.addCallback(canSequence) elif len(keys)==1: d.addCallback(can) d.addCallback(pickle.dumps, 2) d.addErrback(packageFailure) return d #--------------------------------------------------------------------------- # Other methods #--------------------------------------------------------------------------- def remote_get_result(self, i=None): return self.service.get_result(i).addErrback(packageFailure) def remote_reset(self): return self.service.reset().addErrback(packageFailure) def remote_kill(self): return self.service.kill().addErrback(packageFailure) def remote_keys(self): return self.service.keys().addErrback(packageFailure) #--------------------------------------------------------------------------- # push/pull_serialized #--------------------------------------------------------------------------- def remote_push_serialized(self, pNamespace): try: namespace = pickle.loads(pNamespace) except: return defer.fail(failure.Failure()).addErrback(packageFailure) else: d = self.service.push_serialized(namespace) return d.addErrback(packageFailure) def remote_pull_serialized(self, keys): d = self.service.pull_serialized(keys) d.addCallback(pickle.dumps, 2) d.addErrback(packageFailure) return d #--------------------------------------------------------------------------- # Properties interface #--------------------------------------------------------------------------- def remote_set_properties(self, pNamespace): try: namespace = pickle.loads(pNamespace) except: return defer.fail(failure.Failure()).addErrback(packageFailure) else: return self.service.set_properties(namespace).addErrback(packageFailure) def remote_get_properties(self, keys=None): d = self.service.get_properties(keys) d.addCallback(pickle.dumps, 2) d.addErrback(packageFailure) return d def remote_has_properties(self, keys): d = self.service.has_properties(keys) d.addCallback(pickle.dumps, 2) d.addErrback(packageFailure) return d def remote_del_properties(self, keys): d = self.service.del_properties(keys) d.addErrback(packageFailure) return d def remote_clear_properties(self): d = self.service.clear_properties() d.addErrback(packageFailure) return d components.registerAdapter(FCEngineReferenceFromService, IEngineBase, IFCEngine) #------------------------------------------------------------------------------- # Now the server (Controller) side of things #------------------------------------------------------------------------------- class EngineFromReference(object): """Adapt a `RemoteReference` to an `IEngineBase` implementing object. When an engine connects to a controller, it calls the `register_engine` method of the controller and passes the controller a `RemoteReference` to itself. This class is used to adapt this `RemoteReference` to an object that implements the full `IEngineBase` interface. See the documentation of `IEngineBase` for details on the methods. """ implements(IEngineBase) def __init__(self, reference): self.reference = reference self._id = None self._properties = StrictDict() self.currentCommand = None def callRemote(self, *args, **kwargs): try: return self.reference.callRemote(*args, **kwargs) except DeadReferenceError: self.notifier() self.stopNotifying(self.notifier) return defer.fail() def get_id(self): """Return the Engines id.""" return self._id def set_id(self, id): """Set the Engines id.""" self._id = id return self.callRemote('set_id', id) id = property(get_id, set_id) def syncProperties(self, r): try: psync, result = r except (ValueError, TypeError): return r else: if psync: log.msg("sync properties") pick = self.checkReturnForFailure(psync) if isinstance(pick, failure.Failure): self.properties = pick return pick else: self.properties = pickle.loads(pick) return result def _set_properties(self, dikt): self._properties.clear() self._properties.update(dikt) def _get_properties(self): if isinstance(self._properties, failure.Failure): self._properties.raiseException() return self._properties properties = property(_get_properties, _set_properties) #--------------------------------------------------------------------------- # Methods from IEngine #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- # execute #--------------------------------------------------------------------------- def execute(self, lines): # self._needProperties = True d = self.callRemote('execute', lines) d.addCallback(self.syncProperties) return d.addCallback(self.checkReturnForFailure) #--------------------------------------------------------------------------- # push #--------------------------------------------------------------------------- def push(self, namespace): try: package = pickle.dumps(namespace, 2) except: return defer.fail(failure.Failure()) else: if isinstance(package, failure.Failure): return defer.fail(package) else: d = self.callRemote('push', package) return d.addCallback(self.checkReturnForFailure) #--------------------------------------------------------------------------- # pull #--------------------------------------------------------------------------- def pull(self, keys): d = self.callRemote('pull', keys) d.addCallback(self.checkReturnForFailure) d.addCallback(pickle.loads) return d #--------------------------------------------------------------------------- # push/pull_function #--------------------------------------------------------------------------- def push_function(self, namespace): try: package = pickle.dumps(canDict(namespace), 2) except: return defer.fail(failure.Failure()) else: if isinstance(package, failure.Failure): return defer.fail(package) else: d = self.callRemote('push_function', package) return d.addCallback(self.checkReturnForFailure) def pull_function(self, keys): d = self.callRemote('pull_function', keys) d.addCallback(self.checkReturnForFailure) d.addCallback(pickle.loads) # The usage of globals() here is an attempt to bind any pickled functions # to the globals of this module. What we really want is to have it bound # to the globals of the callers module. This will require walking the # stack. BG 10/3/07. if len(keys)==1: d.addCallback(uncan, globals()) elif len(keys)>1: d.addCallback(uncanSequence, globals()) return d #--------------------------------------------------------------------------- # Other methods #--------------------------------------------------------------------------- def get_result(self, i=None): return self.callRemote('get_result', i).addCallback(self.checkReturnForFailure) def reset(self): self._refreshProperties = True d = self.callRemote('reset') d.addCallback(self.syncProperties) return d.addCallback(self.checkReturnForFailure) def kill(self): #this will raise pb.PBConnectionLost on success d = self.callRemote('kill') d.addCallback(self.syncProperties) d.addCallback(self.checkReturnForFailure) d.addErrback(self.killBack) return d def killBack(self, f): log.msg('filling engine: %s' % f) return None def keys(self): return self.callRemote('keys').addCallback(self.checkReturnForFailure) #--------------------------------------------------------------------------- # Properties methods #--------------------------------------------------------------------------- def set_properties(self, properties): try: package = pickle.dumps(properties, 2) except: return defer.fail(failure.Failure()) else: if isinstance(package, failure.Failure): return defer.fail(package) else: d = self.callRemote('set_properties', package) return d.addCallback(self.checkReturnForFailure) return d def get_properties(self, keys=None): d = self.callRemote('get_properties', keys) d.addCallback(self.checkReturnForFailure) d.addCallback(pickle.loads) return d def has_properties(self, keys): d = self.callRemote('has_properties', keys) d.addCallback(self.checkReturnForFailure) d.addCallback(pickle.loads) return d def del_properties(self, keys): d = self.callRemote('del_properties', keys) d.addCallback(self.checkReturnForFailure) # d.addCallback(pickle.loads) return d def clear_properties(self): d = self.callRemote('clear_properties') d.addCallback(self.checkReturnForFailure) return d #--------------------------------------------------------------------------- # push/pull_serialized #--------------------------------------------------------------------------- def push_serialized(self, namespace): """Older version of pushSerialize.""" try: package = pickle.dumps(namespace, 2) except: return defer.fail(failure.Failure()) else: if isinstance(package, failure.Failure): return defer.fail(package) else: d = self.callRemote('push_serialized', package) return d.addCallback(self.checkReturnForFailure) def pull_serialized(self, keys): d = self.callRemote('pull_serialized', keys) d.addCallback(self.checkReturnForFailure) d.addCallback(pickle.loads) return d #--------------------------------------------------------------------------- # Misc #--------------------------------------------------------------------------- def checkReturnForFailure(self, r): """See if a returned value is a pickled Failure object. To distinguish between general pickled objects and pickled Failures, the other side should prepend the string FAILURE: to any pickled Failure. """ return unpackageFailure(r) components.registerAdapter(EngineFromReference, RemoteReference, IEngineBase) #------------------------------------------------------------------------------- # Now adapt an IControllerBase to incoming FC connections #------------------------------------------------------------------------------- class IFCControllerBase(Interface): """ Interface that tells how an Engine sees a Controller. In our architecture, the Controller listens for Engines to connect and register. This interface defines that registration method as it is exposed over the Foolscap network protocol """ def remote_register_engine(self, engineReference, id=None, pid=None, pproperties=None): """ Register new engine on the controller. Engines must call this upon connecting to the controller if they want to do work for the controller. See the documentation of `IControllerCore` for more details. """ class FCRemoteEngineRefFromService(Referenceable): """ Adapt an `IControllerBase` to an `IFCControllerBase`. """ implements(IFCControllerBase) def __init__(self, service): assert IControllerBase.providedBy(service), \ "IControllerBase is not provided by " + repr(service) self.service = service def remote_register_engine(self, engine_reference, id=None, pid=None, pproperties=None): # First adapt the engine_reference to a basic non-queued engine engine = IEngineBase(engine_reference) if pproperties: engine.properties = pickle.loads(pproperties) # Make it an IQueuedEngine before registration remote_engine = IEngineQueued(engine) # Get the ip/port of the remote side peer_address = engine_reference.tracker.broker.transport.getPeer() ip = peer_address.host port = peer_address.port reg_dict = self.service.register_engine(remote_engine, id, ip, port, pid) # Now setup callback for disconnect and unregistering the engine def notify(*args): return self.service.unregister_engine(reg_dict['id']) engine_reference.tracker.broker.notifyOnDisconnect(notify) engine.notifier = notify engine.stopNotifying = engine_reference.tracker.broker.dontNotifyOnDisconnect return reg_dict components.registerAdapter(FCRemoteEngineRefFromService, IControllerBase, IFCControllerBase)
mastizada/kuma
vendor/packages/ipython/IPython/kernel/enginefc.py
Python
mpl-2.0
19,756
import png import sys import os args = list(sys.argv)[1:] def convert(arg): r = png.Reader(arg) bitmap = r.read()[2] stdColLen = 0 switcher = { "1": 20, "2": 28, "3": 42, } switcherWidth = { "1": 10, "2": 14, "3": 20, } stdColLen = switcher[os.path.basename(arg)[0]] stdWidth = switcherWidth[os.path.basename(arg)[0]] with open(arg + '.conv', 'wb') as f: numRow = 0 for row in bitmap: extendedRow = list(row) extendedRow = extendedRow[1::2] if len(extendedRow) < stdWidth: extendedRow += [0]*(stdWidth - len(extendedRow)) f.write(bytearray(extendedRow)) numRow += 1 while numRow < stdColLen: f.write(bytearray([0]*stdWidth)) numRow += 1 for arg in args: convert(arg)
TakefiveInteractive/TedkOS
gui/font_to_png_tedkos.py
Python
gpl-2.0
871
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/thumbor/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com thumbor@googlegroups.com from thumbor.filters import BaseFilter, filter_method from thumbor.utils import logger ALLOWED_FORMATS = ["png", "jpeg", "jpg", "gif", "webp"] class Filter(BaseFilter): @filter_method(BaseFilter.String) async def format(self, file_format): if file_format.lower() not in ALLOWED_FORMATS: logger.debug("Format not allowed: %s", file_format.lower()) self.context.request.format = None else: logger.debug("Format specified: %s", file_format.lower()) self.context.request.format = file_format.lower()
kkopachev/thumbor
thumbor/filters/format.py
Python
mit
823
#!/usr/bin/python # -*- coding: utf-8 -*- """ Implements the RFN algorithm as easily understandable code. Copyright © 2015 Thomas Unterthiner Licensed under GPL, version 2 or a later (see LICENSE.txt) Contains a very basic CPU and a GPU implementation that is easy to understand. This code is meant as an instructional ressource, and not suited for production runs. The GPU implementation assumes that scikits.cuda.linalg works properly (which in turn requires CULA). Also, this requires the current development version of scikits.cuda (as of 2014-08-11). """ import time import numpy as np #from scikits.cuda import linalg as la #import pycuda.curandom as curand #import pycuda.gpuarray as gpu #import pycuda.elementwise as el #import pycuda.driver as drv #from pycuda.compiler import SourceModule #from pycuda.tools import DeviceMemoryPool #from scikits.cuda.cublas import cublasSgemv #from pycuda.elementwise import ElementwiseKernel #from pycuda import cumath _dropout_kernel = None _saltpepper_kernel = None _rng_state = None _rng_blocks = 128 _rng_threads = 128 #_mempool = DeviceMemoryPool() def init_rng(seed): global _dropout_kernel, _saltpepper_kernel, _rng_state, _rng_threads, _rng_blocks from pycuda.characterize import sizeof ds = sizeof("curandState", "#include <curand_kernel.h>") _rng_state = drv.mem_alloc(_rng_threads * _rng_blocks * ds) src = SourceModule( ''' #include <curand_kernel.h> extern "C" { __global__ void setup_rng(curandState* rng_state, const unsigned seed) { const unsigned tid = blockIdx.x*blockDim.x+threadIdx.x; curand_init(seed, tid, 0, &rng_state[tid]); } __global__ void dropout_eltw(float* x, const unsigned size, float dropout_rate, curandState* rng_state) { const unsigned tid = blockIdx.x*blockDim.x+threadIdx.x; const unsigned num_threads = gridDim.x*blockDim.x; curandState localState = rng_state[tid]; for (unsigned i = tid; i < size; i += num_threads) x[i] = (curand_uniform(&localState) < dropout_rate) ? 0.0 : x[i]; rng_state[tid] = localState; } __global__ void saltpepper_eltw(float* x, const unsigned size, float dropout_rate, curandState* rng_state) { const unsigned tid = blockIdx.x*blockDim.x+threadIdx.x; const unsigned num_threads = gridDim.x*blockDim.x; curandState localState = rng_state[tid]; for (unsigned i = tid; i < size; i += num_threads) x[i] = (curand_uniform(&localState) < dropout_rate) ? 0.0 : x[i]; x[i] = (curand_uniform(&localState) < dropout_rate) ? 1.0 : x[i]; rng_state[tid] = localState; } } ''', no_extern_c=True) setup_rng = src.get_function("setup_rng") setup_rng.prepare("Pi") setup_rng.prepared_call((_rng_threads, 1, 1), (_rng_blocks, 1, 1), _rng_state, np.uint32(seed)) _dropout_kernel = src.get_function("dropout_eltw") _dropout_kernel.prepare("PifP") _saltpepper_kernel = src.get_function("saltpepper_eltw") _saltpepper_kernel.prepare("PifP") def dropout(X, dropout_rate): _dropout_kernel.prepared_call((_rng_threads, 1, 1), (_rng_blocks, 1, 1), X.gpudata, np.prod(X.shape), np.float32(dropout_rate), _rng_state) return X def saltpepper_noise(X, dropout_rate): _saltpepper_kernel.prepared_call((_rng_threads, 1, 1), (_rng_blocks, 1, 1), X.gpudata, np.prod(X.shape), np.float32(dropout_rate), _rng_state) return X def to_unit_variance(H): ''' Scales H so that column has a variance of 1. ''' from scikits.cuda.misc import _global_cublas_handle as cublas_handle ones = gpu.empty((H.shape[0], 1), np.float32, allocator=_mempool.allocate) ones.fill(1.0) Hsq = gpu.empty(H.shape, np.float32, allocator=_mempool.allocate) mean = gpu.empty((1, H.shape[1]), np.float32, allocator=_mempool.allocate) cublasSgemv(cublas_handle, "n", H.shape[1], H.shape[0], 1.0/H.shape[0], H.gpudata, H.shape[1], ones.gpudata, 1, 0.0, mean.gpudata, 1) _unitvariance_step1_kernel(H, mean, Hsq, H.shape[1]) cublasSgemv(cublas_handle, "n", Hsq.shape[1], H.shape[0], 1.0/H.shape[0], Hsq.gpudata, H.shape[1], ones.gpudata, 1, 0.0, mean.gpudata, 1) _unitvariance_step2_kernel(mean, H.shape[1]) _unitvariance_step3_kernel(H, mean, H.shape[1]) return H def calculate_H_gpu(X, W, P): WPW = la.add_diag(P, la.dot(W, W, "t", "n")) tmp = la.dot(W, la.inv(WPW, overwrite=True)) H = la.dot(X, tmp, "n", "t") H = gpu.maximum(H, 0) H = to_unit_variance(H) return H, tmp def train_rfn_gpu(X, n_hidden, n_iter, learnrateW, learnratePsi, dropout_rate, input_droput_rate, minPsi=0.1, seed=32): k = n_hidden n, m = X.shape W = np.random.normal(scale=0.01, size=(k, m)).astype(np.float32) P = np.array([0.1] * m, dtype=np.float32) XXdiag = np.diag(np.dot(X.T, X) / n).copy() # explicit copy to avoid numpy 1.8 warning W = gpu.to_gpu(W, allocator=_mempool.allocate) P = gpu.to_gpu(P, allocator=_mempool.allocate) X = gpu.to_gpu(X, allocator=_mempool.allocate) XXdiag = gpu.to_gpu(XXdiag, allocator=_mempool.allocate) I = la.eye(k, dtype=np.float32) init_rng(seed) t0 = time.time() for cur_iter in range(n_iter): H, tmp = calculate_H_gpu(X, W, P) if dropout_rate > 0: dropout(H, dropout_rate) Xtmp = X if input_dropout_rate > 0: Xtmp = X.copy() saltpepper_noise(Xtmp, input_dropout_rate) U = la.dot(Xtmp, H, "t", "n") / n S = la.dot(H, H, "t", "n") / n S += I S -= la.dot(tmp, W, "n", "t") Cii = la.dot(la.dot(W, S, "t") - 2*U, W) Sinv = la.inv(S, overwrite=True) dW = la.dot(Sinv, U, "n", "t") - W dP = XXdiag + la.diag(Cii) - P W += learnrateW * dW P += learnratePsi * dP P = gpu.maximum(P, minPsi) if cur_iter % 25 == 0: print "iter %3d (elapsed time: %5.2fs)" % (cur_iter, time.time() - t0) return W.get(), P.get() def train_rfn_cpu(X, n_hidden, n_iter, learnrateW, learnratePsi, dropout_rate): n, m = X.shape k = n_hidden W = np.absolute(np.random.normal(scale=0.01, size=(k, m)).astype(np.float32)) P = np.array([0.1] * m) H = np.zeros((k, n), dtype=np.float32) C = np.dot(X.T, X) / n t0 = time.time() for cur_iter in range(n_iter): I = np.eye(k, dtype=np.float32) tmp = I + np.dot(W * 1.0/P, W.T) tmp = np.linalg.inv(tmp) Wout = np.dot(tmp, W) * (1.0/P) H = np.dot(Wout, X.T) H = np.maximum(0, H) H /= (H.std(1) + 1e-9)[:, None] if dropout_rate > 0: H *= np.random.binomial(1, 1-dropout_rate, size=H.shape).astype(np.float32) #E-step2 U = np.dot(X.T, H.T) / n S = (np.dot(H, H.T) + tmp) / n #M-step Cii = C - np.dot(-2*U + np.dot(W.T, S), W) #error dP = np.diag(Cii) - P #print np.amin(H) #print '\n' #if cur_iter != n_iter - 1: # W = np.maximum(W, 0.0) #W = W*0.99 dW = np.dot(np.linalg.inv(S), U.T) - W W += learnrateW * dW W = np.maximum(0, W) #W /= (W.std(1) + 1e-9)[:, None] P += learnratePsi * dP P = np.maximum(P, 0.1) if cur_iter % 1000 == 0: print "iter %3d (elllapsed time: %5.2fs)" % (cur_iter, time.time() - t0) #print H return W, H, P
Tamme/mutationalsignaturesNCSUT
RFN/cpu_final_basic_python_implementation.py
Python
gpl-3.0
7,707
# coding=utf-8 """File object write workaround procedure.""" import codecs from .chars import * # When 'fp' is a FILE-like object, the fp.write() method will # not handle unicode objects with chars beyond ASCII; it # will throw UnicodeEncodeError -- there is a wrapper # codecs.open() that returns a FILE-like object that # handles encoding, but a wrapper for fdopen() is absent # (or undocumented) -- so use this fp_write() if py_v_is_3: def fp_write(f_object, s): try: return f_object.write(s) except: # TODO: specify exceptions! if _ucode_type == None: _cod = 'ascii' else: _cod = _ucode_type return f_object.write(bytes(s, _cod)) else: def fp_write(f_object, s): if isinstance(s, unicode): if _ucode_type == None: _cod = 'ascii' else: _cod = _ucode_type return f_object.write(codecs.encode(_Tnec(s), _cod)) return f_object.write(s)
ehy/cprec
wxdvdbackuppkg/fp_write.py
Python
gpl-2.0
1,040
#!/usr/bin/env python """settings.py Udacity conference server-side Python App Engine app user settings $Id$ created/forked from conference.py by wesc on 2014 may 24 """ # Replace the following lines with client IDs obtained from the APIs # Console or Cloud Console. WEB_CLIENT_ID = '417931993762-2pcig2vqpqjqob0moripd5fu2aifuupg.apps.googleusercontent.com'
biratrai/Udacity-ConferenceCentral-P4
settings.py
Python
apache-2.0
365
from collections import defaultdict from datetime import datetime import json import logging import pytz from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.db import connection from django.http import HttpResponse from django.utils.timezone import UTC import pystache_custom as pystache from opaque_keys.edx.locations import i4xEncoder from opaque_keys.edx.keys import CourseKey from xmodule.modulestore.django import modulestore from django_comment_common.models import Role, FORUM_ROLE_STUDENT from django_comment_client.permissions import check_permissions_by_view, has_permission from django_comment_client.settings import MAX_COMMENT_DEPTH from edxmako import lookup_template from courseware.access import has_access from openedx.core.djangoapps.course_groups.cohorts import ( get_course_cohort_settings, get_cohort_by_id, get_cohort_id, is_commentable_cohorted, is_course_cohorted ) from openedx.core.djangoapps.course_groups.models import CourseUserGroup log = logging.getLogger(__name__) def extract(dic, keys): return {k: dic.get(k) for k in keys} def strip_none(dic): return dict([(k, v) for k, v in dic.iteritems() if v is not None]) def strip_blank(dic): def _is_blank(v): return isinstance(v, str) and len(v.strip()) == 0 return dict([(k, v) for k, v in dic.iteritems() if not _is_blank(v)]) # TODO should we be checking if d1 and d2 have the same keys with different values? def merge_dict(dic1, dic2): return dict(dic1.items() + dic2.items()) def get_role_ids(course_id): roles = Role.objects.filter(course_id=course_id).exclude(name=FORUM_ROLE_STUDENT) return dict([(role.name, list(role.users.values_list('id', flat=True))) for role in roles]) def has_forum_access(uname, course_id, rolename): try: role = Role.objects.get(name=rolename, course_id=course_id) except Role.DoesNotExist: return False return role.users.filter(username=uname).exists() def get_accessible_discussion_modules(course, user, include_all=False): # pylint: disable=invalid-name """ Return a list of all valid discussion modules in this course that are accessible to the given user. """ all_modules = modulestore().get_items(course.id, qualifiers={'category': 'discussion'}) def has_required_keys(module): for key in ('discussion_id', 'discussion_category', 'discussion_target'): if getattr(module, key, None) is None: log.warning("Required key '%s' not in discussion %s, leaving out of category map" % (key, module.location)) return False return True return [ module for module in all_modules if has_required_keys(module) and (include_all or has_access(user, 'load', module, course.id)) ] def get_discussion_id_map(course, user): """ Transform the list of this course's discussion modules (visible to a given user) into a dictionary of metadata keyed by discussion_id. """ def get_entry(module): # pylint: disable=missing-docstring discussion_id = module.discussion_id title = module.discussion_target last_category = module.discussion_category.split("/")[-1].strip() return (discussion_id, {"location": module.location, "title": last_category + " / " + title}) return dict(map(get_entry, get_accessible_discussion_modules(course, user))) def _filter_unstarted_categories(category_map): now = datetime.now(UTC()) result_map = {} unfiltered_queue = [category_map] filtered_queue = [result_map] while unfiltered_queue: unfiltered_map = unfiltered_queue.pop() filtered_map = filtered_queue.pop() filtered_map["children"] = [] filtered_map["entries"] = {} filtered_map["subcategories"] = {} for child in unfiltered_map["children"]: if child in unfiltered_map["entries"]: if unfiltered_map["entries"][child]["start_date"] <= now: filtered_map["children"].append(child) filtered_map["entries"][child] = {} for key in unfiltered_map["entries"][child]: if key != "start_date": filtered_map["entries"][child][key] = unfiltered_map["entries"][child][key] else: log.debug(u"Filtering out:%s with start_date: %s", child, unfiltered_map["entries"][child]["start_date"]) else: if unfiltered_map["subcategories"][child]["start_date"] < now: filtered_map["children"].append(child) filtered_map["subcategories"][child] = {} unfiltered_queue.append(unfiltered_map["subcategories"][child]) filtered_queue.append(filtered_map["subcategories"][child]) return result_map def _sort_map_entries(category_map, sort_alpha): things = [] for title, entry in category_map["entries"].items(): if entry["sort_key"] is None and sort_alpha: entry["sort_key"] = title things.append((title, entry)) for title, category in category_map["subcategories"].items(): things.append((title, category)) _sort_map_entries(category_map["subcategories"][title], sort_alpha) category_map["children"] = [x[0] for x in sorted(things, key=lambda x: x[1]["sort_key"])] def get_discussion_category_map(course, user, cohorted_if_in_list=False, exclude_unstarted=True): """ Transform the list of this course's discussion modules into a recursive dictionary structure. This is used to render the discussion category map in the discussion tab sidebar for a given user. Args: course: Course for which to get the ids. user: User to check for access. cohorted_if_in_list (bool): If True, inline topics are marked is_cohorted only if they are in course_cohort_settings.discussion_topics. Example: >>> example = { >>> "entries": { >>> "General": { >>> "sort_key": "General", >>> "is_cohorted": True, >>> "id": "i4x-edx-eiorguegnru-course-foobarbaz" >>> } >>> }, >>> "children": ["General", "Getting Started"], >>> "subcategories": { >>> "Getting Started": { >>> "subcategories": {}, >>> "children": [ >>> "Working with Videos", >>> "Videos on edX" >>> ], >>> "entries": { >>> "Working with Videos": { >>> "sort_key": None, >>> "is_cohorted": False, >>> "id": "d9f970a42067413cbb633f81cfb12604" >>> }, >>> "Videos on edX": { >>> "sort_key": None, >>> "is_cohorted": False, >>> "id": "98d8feb5971041a085512ae22b398613" >>> } >>> } >>> } >>> } >>> } """ unexpanded_category_map = defaultdict(list) modules = get_accessible_discussion_modules(course, user) course_cohort_settings = get_course_cohort_settings(course.id) for module in modules: id = module.discussion_id title = module.discussion_target sort_key = module.sort_key category = " / ".join([x.strip() for x in module.discussion_category.split("/")]) # Handle case where module.start is None entry_start_date = module.start if module.start else datetime.max.replace(tzinfo=pytz.UTC) unexpanded_category_map[category].append({"title": title, "id": id, "sort_key": sort_key, "start_date": entry_start_date}) category_map = {"entries": defaultdict(dict), "subcategories": defaultdict(dict)} for category_path, entries in unexpanded_category_map.items(): node = category_map["subcategories"] path = [x.strip() for x in category_path.split("/")] # Find the earliest start date for the entries in this category category_start_date = None for entry in entries: if category_start_date is None or entry["start_date"] < category_start_date: category_start_date = entry["start_date"] for level in path[:-1]: if level not in node: node[level] = {"subcategories": defaultdict(dict), "entries": defaultdict(dict), "sort_key": level, "start_date": category_start_date} else: if node[level]["start_date"] > category_start_date: node[level]["start_date"] = category_start_date node = node[level]["subcategories"] level = path[-1] if level not in node: node[level] = {"subcategories": defaultdict(dict), "entries": defaultdict(dict), "sort_key": level, "start_date": category_start_date} else: if node[level]["start_date"] > category_start_date: node[level]["start_date"] = category_start_date always_cohort_inline_discussions = ( # pylint: disable=invalid-name not cohorted_if_in_list and course_cohort_settings.always_cohort_inline_discussions ) dupe_counters = defaultdict(lambda: 0) # counts the number of times we see each title for entry in entries: is_entry_cohorted = ( course_cohort_settings.is_cohorted and ( always_cohort_inline_discussions or entry["id"] in course_cohort_settings.cohorted_discussions ) ) title = entry["title"] if node[level]["entries"][title]: # If we've already seen this title, append an incrementing number to disambiguate # the category from other categores sharing the same title in the course discussion UI. dupe_counters[title] += 1 title = u"{title} ({counter})".format(title=title, counter=dupe_counters[title]) node[level]["entries"][title] = {"id": entry["id"], "sort_key": entry["sort_key"], "start_date": entry["start_date"], "is_cohorted": is_entry_cohorted} # TODO. BUG! : course location is not unique across multiple course runs! # (I think Kevin already noticed this) Need to send course_id with requests, store it # in the backend. for topic, entry in course.discussion_topics.items(): category_map['entries'][topic] = { "id": entry["id"], "sort_key": entry.get("sort_key", topic), "start_date": datetime.now(UTC()), "is_cohorted": (course_cohort_settings.is_cohorted and entry["id"] in course_cohort_settings.cohorted_discussions) } _sort_map_entries(category_map, course.discussion_sort_alpha) return _filter_unstarted_categories(category_map) if exclude_unstarted else category_map def get_discussion_categories_ids(course, user, include_all=False): """ Returns a list of available ids of categories for the course that are accessible to the given user. Args: course: Course for which to get the ids. user: User to check for access. include_all (bool): If True, return all ids. Used by configuration views. """ accessible_discussion_ids = [ module.discussion_id for module in get_accessible_discussion_modules(course, user, include_all=include_all) ] return course.top_level_discussion_topic_ids + accessible_discussion_ids class JsonResponse(HttpResponse): def __init__(self, data=None): content = json.dumps(data, cls=i4xEncoder) super(JsonResponse, self).__init__(content, mimetype='application/json; charset=utf-8') class JsonError(HttpResponse): def __init__(self, error_messages=[], status=400): if isinstance(error_messages, basestring): error_messages = [error_messages] content = json.dumps({'errors': error_messages}, indent=2, ensure_ascii=False) super(JsonError, self).__init__(content, mimetype='application/json; charset=utf-8', status=status) class HtmlResponse(HttpResponse): def __init__(self, html=''): super(HtmlResponse, self).__init__(html, content_type='text/plain') class ViewNameMiddleware(object): def process_view(self, request, view_func, view_args, view_kwargs): request.view_name = view_func.__name__ class QueryCountDebugMiddleware(object): """ This middleware will log the number of queries run and the total time taken for each request (with a status code of 200). It does not currently support multi-db setups. """ def process_response(self, request, response): if response.status_code == 200: total_time = 0 for query in connection.queries: query_time = query.get('time') if query_time is None: # django-debug-toolbar monkeypatches the connection # cursor wrapper and adds extra information in each # item in connection.queries. The query time is stored # under the key "duration" rather than "time" and is # in milliseconds, not seconds. query_time = query.get('duration', 0) / 1000 total_time += float(query_time) log.info('%s queries run, total %s seconds' % (len(connection.queries), total_time)) return response def get_ability(course_id, content, user): return { 'editable': check_permissions_by_view(user, course_id, content, "update_thread" if content['type'] == 'thread' else "update_comment"), 'can_reply': check_permissions_by_view(user, course_id, content, "create_comment" if content['type'] == 'thread' else "create_sub_comment"), 'can_delete': check_permissions_by_view(user, course_id, content, "delete_thread" if content['type'] == 'thread' else "delete_comment"), 'can_openclose': check_permissions_by_view(user, course_id, content, "openclose_thread") if content['type'] == 'thread' else False, 'can_vote': check_permissions_by_view(user, course_id, content, "vote_for_thread" if content['type'] == 'thread' else "vote_for_comment"), } # TODO: RENAME def get_annotated_content_info(course_id, content, user, user_info): """ Get metadata for an individual content (thread or comment) """ voted = '' if content['id'] in user_info['upvoted_ids']: voted = 'up' elif content['id'] in user_info['downvoted_ids']: voted = 'down' return { 'voted': voted, 'subscribed': content['id'] in user_info['subscribed_thread_ids'], 'ability': get_ability(course_id, content, user), } # TODO: RENAME def get_annotated_content_infos(course_id, thread, user, user_info): """ Get metadata for a thread and its children """ infos = {} def annotate(content): infos[str(content['id'])] = get_annotated_content_info(course_id, content, user, user_info) for child in ( content.get('children', []) + content.get('endorsed_responses', []) + content.get('non_endorsed_responses', []) ): annotate(child) annotate(thread) return infos def get_metadata_for_threads(course_id, threads, user, user_info): def infogetter(thread): return get_annotated_content_infos(course_id, thread, user, user_info) metadata = reduce(merge_dict, map(infogetter, threads), {}) return metadata # put this method in utils.py to avoid circular import dependency between helpers and mustache_helpers def render_mustache(template_name, dictionary, *args, **kwargs): template = lookup_template('main', template_name).source return pystache.render(template, dictionary) def permalink(content): if isinstance(content['course_id'], CourseKey): course_id = content['course_id'].to_deprecated_string() else: course_id = content['course_id'] if content['type'] == 'thread': return reverse('django_comment_client.forum.views.single_thread', args=[course_id, content['commentable_id'], content['id']]) else: return reverse('django_comment_client.forum.views.single_thread', args=[course_id, content['commentable_id'], content['thread_id']]) + '#' + content['id'] def extend_content(content): roles = {} if content.get('user_id'): try: user = User.objects.get(pk=content['user_id']) roles = dict(('name', role.name.lower()) for role in user.roles.filter(course_id=content['course_id'])) except User.DoesNotExist: log.error('User ID {0} in comment content {1} but not in our DB.'.format(content.get('user_id'), content.get('id'))) content_info = { 'displayed_title': content.get('highlighted_title') or content.get('title', ''), 'displayed_body': content.get('highlighted_body') or content.get('body', ''), 'permalink': permalink(content), 'roles': roles, 'updated': content['created_at'] != content['updated_at'], } return merge_dict(content, content_info) def add_courseware_context(content_list, course, user, id_map=None): """ Decorates `content_list` with courseware metadata. """ if id_map is None: id_map = get_discussion_id_map(course, user) for content in content_list: commentable_id = content['commentable_id'] if commentable_id in id_map: location = id_map[commentable_id]["location"].to_deprecated_string() title = id_map[commentable_id]["title"] url = reverse('jump_to', kwargs={"course_id": course.id.to_deprecated_string(), "location": location}) content.update({"courseware_url": url, "courseware_title": title}) def prepare_content(content, course_key, is_staff=False, course_is_cohorted=None): """ This function is used to pre-process thread and comment models in various ways before adding them to the HTTP response. This includes fixing empty attribute fields, enforcing author anonymity, and enriching metadata around group ownership and response endorsement. @TODO: not all response pre-processing steps are currently integrated into this function. Arguments: content (dict): A thread or comment. course_key (CourseKey): The course key of the course. is_staff (bool): Whether the user is a staff member. course_is_cohorted (bool): Whether the course is cohorted. """ fields = [ 'id', 'title', 'body', 'course_id', 'anonymous', 'anonymous_to_peers', 'endorsed', 'parent_id', 'thread_id', 'votes', 'closed', 'created_at', 'updated_at', 'depth', 'type', 'commentable_id', 'comments_count', 'at_position_list', 'children', 'highlighted_title', 'highlighted_body', 'courseware_title', 'courseware_url', 'unread_comments_count', 'read', 'group_id', 'group_name', 'pinned', 'abuse_flaggers', 'stats', 'resp_skip', 'resp_limit', 'resp_total', 'thread_type', 'endorsed_responses', 'non_endorsed_responses', 'non_endorsed_resp_total', 'endorsement', ] if (content.get('anonymous') is False) and ((content.get('anonymous_to_peers') is False) or is_staff): fields += ['username', 'user_id'] content = strip_none(extract(content, fields)) if content.get("endorsement"): endorsement = content["endorsement"] endorser = None if endorsement["user_id"]: try: endorser = User.objects.get(pk=endorsement["user_id"]) except User.DoesNotExist: log.error("User ID {0} in endorsement for comment {1} but not in our DB.".format( content.get('user_id'), content.get('id')) ) # Only reveal endorser if requester can see author or if endorser is staff if ( endorser and ("username" in fields or has_permission(endorser, "endorse_comment", course_key)) ): endorsement["username"] = endorser.username else: del endorsement["user_id"] if course_is_cohorted is None: course_is_cohorted = is_course_cohorted(course_key) for child_content_key in ["children", "endorsed_responses", "non_endorsed_responses"]: if child_content_key in content: children = [ prepare_content(child, course_key, is_staff, course_is_cohorted=course_is_cohorted) for child in content[child_content_key] ] content[child_content_key] = children if course_is_cohorted: # Augment the specified thread info to include the group name if a group id is present. if content.get('group_id') is not None: content['group_name'] = get_cohort_by_id(course_key, content.get('group_id')).name else: # Remove any cohort information that might remain if the course had previously been cohorted. content.pop('group_id', None) return content def get_group_id_for_comments_service(request, course_key, commentable_id=None): """ Given a user requesting content within a `commentable_id`, determine the group_id which should be passed to the comments service. Returns: int: the group_id to pass to the comments service or None if nothing should be passed Raises: ValueError if the requested group_id is invalid """ if commentable_id is None or is_commentable_cohorted(course_key, commentable_id): if request.method == "GET": requested_group_id = request.GET.get('group_id') elif request.method == "POST": requested_group_id = request.POST.get('group_id') if has_permission(request.user, "see_all_cohorts", course_key): if not requested_group_id: return None try: group_id = int(requested_group_id) get_cohort_by_id(course_key, group_id) except CourseUserGroup.DoesNotExist: raise ValueError else: # regular users always query with their own id. group_id = get_cohort_id(request.user, course_key) return group_id else: # Never pass a group_id to the comments service for a non-cohorted # commentable return None def is_comment_too_deep(parent): """ Determine whether a comment with the given parent violates MAX_COMMENT_DEPTH parent can be None to determine whether root comments are allowed """ return ( MAX_COMMENT_DEPTH is not None and ( MAX_COMMENT_DEPTH < 0 or (parent and parent["depth"] >= MAX_COMMENT_DEPTH) ) )
sbalde/edx-platform
lms/djangoapps/django_comment_client/utils.py
Python
agpl-3.0
23,946
import unittest import psycopg2 from ccytsk.core.database.utils import get_connection_string class TestDatabase(unittest.TestCase): def setUp(self): self.conn = psycopg2.connect(get_connection_string({ 'user': 'postgres', })) self.cursor = self.conn.cursor() # self.cursor.execute('create table if not exists %(table)s (id text, col1 text, col2 text)', {'table': AsIs(self.table_name)}) # self.conn.commit()s def tearDown(self): # self.cursor.execute('drop table if exists %(table)s', {'table': AsIs(self.table_name)}) self.cursor.execute('drop function if exists test_function_operation_1(text, int, json)') self.cursor.execute('drop function if exists test_function_operation_2(text, int, json)') self.conn.commit() # @unittest.skip("Disabled") # def test_task_record_crud(self): # records = [ # dict(id=uid(), col1=uid()), # dict(id=uid(), col1=uid()) # ] # task = RecordCRUDTask(config={}) # # testing insert operation # task.execute( # records=records, # cursor=self.cursor, # operation='insert', # table_name=self.table_name, # table_keys=['id'], # table_cols=['id', 'col1'], # common_values={'col2': 'COMMON'}, # ) # self.assertEqual(task.processed_records, 2) # self.assertEqual(task.inserted_records, 2) # self.assertEqual(task.updated_records, 0) # item1 = records[0] # self.cursor.execute('select col1, col2 from %(table)s where id=%(id)s', { # 'table': AsIs(self.table_name), # 'id': item1['id'] # }) # value = self.cursor.fetchone() # self.assertIsNotNone(value) # self.assertEqual(value[0], item1['col1']) # self.assertEqual(value[1], 'COMMON') # # # testing update operation # new_value_col1 = uid() # item1['col1'] = new_value_col1 # task.execute( # records=[item1], # cursor=self.cursor, # operation='update', # table_name=self.table_name, # table_keys=['id'], # table_cols=['col1'], # ) # self.assertEqual(task.processed_records, 1) # self.assertEqual(task.inserted_records, 0) # self.assertEqual(task.updated_records, 1) # self.cursor.execute('select col1, col2 from %(table)s where id=%(id)s', { # 'table': AsIs(self.table_name), # 'id': item1['id'] # }) # value = self.cursor.fetchone() # self.assertIsNotNone(value) # self.assertEqual(value[0], new_value_col1) # self.assertEqual(value[1], 'COMMON') # # # testing upsert operation (default) # record_to_upsert = dict(id=uid(), col1=uid()) # task.execute( # records=[record_to_upsert], # cursor=self.cursor, # table_name=self.table_name, # table_keys=['id'], # table_cols=['id', 'col1'], # ) # self.assertEqual(task.processed_records, 1) # self.assertEqual(task.inserted_records, 1) # self.assertEqual(task.updated_records, 0) # record_to_upsert['col1'] = 'COL1_NEW_VALUE' # record_to_upsert['col2'] = 'COL2_NEW_VALUE' # task.execute( # records=[record_to_upsert], # cursor=self.cursor, # table_name=self.table_name, # table_keys=['id'], # table_cols=['col1', 'col2'], # ) # self.assertEqual(task.processed_records, 1) # self.assertEqual(task.inserted_records, 0) # self.assertEqual(task.updated_records, 1) # self.cursor.execute('select col1, col2 from %(table)s where id=%(id)s', { # 'table': AsIs(self.table_name), # 'id': record_to_upsert['id'] # }) # value = self.cursor.fetchone() # self.assertIsNotNone(value) # self.assertEqual(value[0], 'COL1_NEW_VALUE') # self.assertEqual(value[1], 'COL2_NEW_VALUE') # # # testing function operation # self.cursor.execute( # 'create or replace function test_function_operation_1(p_id text, p_col1 int, p_col2 json) ' # 'returns integer language plpgsql as $function$ begin' # ' raise notice \'%\', p_col2->>\'a\';' # ' return p_col2->>\'c\';' # ' end $function$' # ) # function_testing_record = dict(id=uid(), col2={"a": "bxxxx", "c": 2}, col1=7) # function_name = 'test_function_operation_1' # function_args_order = ['id', 'col1', 'col2'] # task.execute( # records=[function_testing_record], # cursor=self.cursor, # operation='function', # common_values={'col1': 5}, # function_name=function_name, # function_args_order=function_args_order # ) # self.assertEqual(task.processed_records, 1) # self.assertEqual(task.inserted_records, 0) # self.assertEqual(task.updated_records, 1) # # # testing task marks # self.assertTrue( # os.path.exists( # os.path.join( # task.execution_path, # '%s.finished' % get_fs_safe_name(task.name) # ) # ) # ) # self.assertEquals(task.running_status, 2) # self.assertEquals(task.execution_exit_code, 0) # # @unittest.skip("Disabled") # def test_call_database_function_task(self): # config = CallDatabaseFunctionTaskConfig( # db={"user": "postgres"}, # fn_name='now' # ) # fn_task = CallDatabaseFunctionTask(config=config) # r = fn_task.execute() # self.assertEqual(r.fn_result[0].minute, datetime.datetime.now().minute) # # self.cursor.execute( # 'create or replace function test_function_operation_1(p_id text, p_col1 int, p_col2 json) ' # 'returns integer language plpgsql as $function$ begin' # ' return p_col1;' # ' end $function$' # ) # self.conn.commit() # config = CallDatabaseFunctionTaskConfig( # db={"user": "postgres"}, # fn_name='test_function_operation_1', # fn_args=['42', 42, {"foo": "bar"}] # ) # fn_task = CallDatabaseFunctionTask(config=config) # r = fn_task.execute() # self.assertEqual(r.fn_result[0], 42) # # self.cursor.execute( # 'create or replace function test_function_operation_2(p_id text, p_col1 int, p_col2 json) ' # 'returns setof record language plpgsql as $function$ begin' # ' return query select p_id, p_col1, p_col2;' # ' end $function$' # ) # self.conn.commit() # config = CallDatabaseFunctionTaskConfig( # db={"user": "postgres"}, # fn_name='test_function_operation_2', # fn_args=['42', 42, {"foo": "bar"}], # fn_result_record=['p_id text', 'p_col1 int', 'p_col2 json'] # ) # fn_task = CallDatabaseFunctionTask(config=config) # r = fn_task.execute() # self.assertListEqual(list(r.fn_result), ['42', 42, {"foo": "bar"}]) if __name__ == '__main__': unittest.main()
Cocuyo-Labs-Team/ccytsk
test/test_core_database.py
Python
bsd-3-clause
8,145
# Test the windows specific win32reg module. # Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey import os, sys, errno import unittest from test import test_support threading = test_support.import_module("threading") from platform import machine # Do this first so test will be skipped if module doesn't exist test_support.import_module('_winreg') # Now import everything from _winreg import * try: REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1] except (IndexError, ValueError): REMOTE_NAME = None # tuple of (major, minor) WIN_VER = sys.getwindowsversion()[:2] # Some tests should only run on 64-bit architectures where WOW64 will be. WIN64_MACHINE = True if machine() == "AMD64" else False # Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses # registry reflection and formerly reflected keys are shared instead. # Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some # tests are only valid up until 6.1 HAS_REFLECTION = True if WIN_VER < (6, 1) else False # Use a per-process key to prevent concurrent test runs (buildbot!) from # stomping on each other. test_key_base = "Python Test Key [%d] - Delete Me" % (os.getpid(),) test_key_name = "SOFTWARE\\" + test_key_base # On OS'es that support reflection we should test with a reflected key test_reflect_key_name = "SOFTWARE\\Classes\\" + test_key_base test_data = [ ("Int Value", 45, REG_DWORD), ("String Val", "A string value", REG_SZ), ("StringExpand", "The path is %path%", REG_EXPAND_SZ), ("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ), ("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY), ("Big String", "x"*(2**14-1), REG_SZ), ("Big Binary", "x"*(2**14), REG_BINARY), ] if test_support.have_unicode: test_data += [ (unicode("Unicode Val"), unicode("A Unicode value"), REG_SZ,), ("UnicodeExpand", unicode("The path is %path%"), REG_EXPAND_SZ), ("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"), unicode("values")], REG_MULTI_SZ), ("Multi-mixed", [unicode("Unicode"), unicode("and"), "string", "values"], REG_MULTI_SZ), ] class BaseWinregTests(unittest.TestCase): def setUp(self): # Make sure that the test key is absent when the test # starts. self.delete_tree(HKEY_CURRENT_USER, test_key_name) def delete_tree(self, root, subkey): try: hkey = OpenKey(root, subkey, KEY_ALL_ACCESS) except WindowsError: # subkey does not exist return while True: try: subsubkey = EnumKey(hkey, 0) except WindowsError: # no more subkeys break self.delete_tree(hkey, subsubkey) CloseKey(hkey) DeleteKey(root, subkey) def _write_test_data(self, root_key, CreateKey=CreateKey): # Set the default value for this key. SetValue(root_key, test_key_name, REG_SZ, "Default value") key = CreateKey(root_key, test_key_name) # Create a sub-key sub_key = CreateKey(key, "sub_key") # Give the sub-key some named values for value_name, value_data, value_type in test_data: SetValueEx(sub_key, value_name, 0, value_type, value_data) # Check we wrote as many items as we thought. nkeys, nvalues, since_mod = QueryInfoKey(key) self.assertEqual(nkeys, 1, "Not the correct number of sub keys") self.assertEqual(nvalues, 1, "Not the correct number of values") nkeys, nvalues, since_mod = QueryInfoKey(sub_key) self.assertEqual(nkeys, 0, "Not the correct number of sub keys") self.assertEqual(nvalues, len(test_data), "Not the correct number of values") # Close this key this way... # (but before we do, copy the key as an integer - this allows # us to test that the key really gets closed). int_sub_key = int(sub_key) CloseKey(sub_key) try: QueryInfoKey(int_sub_key) self.fail("It appears the CloseKey() function does " "not close the actual key!") except EnvironmentError: pass # ... and close that key that way :-) int_key = int(key) key.Close() try: QueryInfoKey(int_key) self.fail("It appears the key.Close() function " "does not close the actual key!") except EnvironmentError: pass def _read_test_data(self, root_key, OpenKey=OpenKey): # Check we can get default value for this key. val = QueryValue(root_key, test_key_name) self.assertEqual(val, "Default value", "Registry didn't give back the correct value") key = OpenKey(root_key, test_key_name) # Read the sub-keys with OpenKey(key, "sub_key") as sub_key: # Check I can enumerate over the values. index = 0 while 1: try: data = EnumValue(sub_key, index) except EnvironmentError: break self.assertIn(data, test_data, "Didn't read back the correct test data") index = index + 1 self.assertEqual(index, len(test_data), "Didn't read the correct number of items") # Check I can directly access each item for value_name, value_data, value_type in test_data: read_val, read_typ = QueryValueEx(sub_key, value_name) self.assertEqual(read_val, value_data, "Could not directly read the value") self.assertEqual(read_typ, value_type, "Could not directly read the value") sub_key.Close() # Enumerate our main key. read_val = EnumKey(key, 0) self.assertEqual(read_val, "sub_key", "Read subkey value wrong") try: EnumKey(key, 1) self.fail("Was able to get a second key when I only have one!") except EnvironmentError: pass key.Close() def _delete_test_data(self, root_key): key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS) sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS) # It is not necessary to delete the values before deleting # the key (although subkeys must not exist). We delete them # manually just to prove we can :-) for value_name, value_data, value_type in test_data: DeleteValue(sub_key, value_name) nkeys, nvalues, since_mod = QueryInfoKey(sub_key) self.assertEqual(nkeys, 0, "subkey not empty before delete") self.assertEqual(nvalues, 0, "subkey not empty before delete") sub_key.Close() DeleteKey(key, "sub_key") try: # Shouldnt be able to delete it twice! DeleteKey(key, "sub_key") self.fail("Deleting the key twice succeeded") except EnvironmentError: pass key.Close() DeleteKey(root_key, test_key_name) # Opening should now fail! try: key = OpenKey(root_key, test_key_name) self.fail("Could open the non-existent key") except WindowsError: # Use this error name this time pass def _test_all(self, root_key): self._write_test_data(root_key) self._read_test_data(root_key) self._delete_test_data(root_key) class LocalWinregTests(BaseWinregTests): def test_registry_works(self): self._test_all(HKEY_CURRENT_USER) def test_registry_works_extended_functions(self): # Substitute the regular CreateKey and OpenKey calls with their # extended counterparts. # Note: DeleteKeyEx is not used here because it is platform dependent cke = lambda key, sub_key: CreateKeyEx(key, sub_key, 0, KEY_ALL_ACCESS) self._write_test_data(HKEY_CURRENT_USER, cke) oke = lambda key, sub_key: OpenKeyEx(key, sub_key, 0, KEY_READ) self._read_test_data(HKEY_CURRENT_USER, oke) self._delete_test_data(HKEY_CURRENT_USER) def test_connect_registry_to_local_machine_works(self): # perform minimal ConnectRegistry test which just invokes it h = ConnectRegistry(None, HKEY_LOCAL_MACHINE) self.assertNotEqual(h.handle, 0) h.Close() self.assertEqual(h.handle, 0) def test_inexistant_remote_registry(self): connect = lambda: ConnectRegistry("abcdefghijkl", HKEY_CURRENT_USER) self.assertRaises(WindowsError, connect) def test_expand_environment_strings(self): r = ExpandEnvironmentStrings(u"%windir%\\test") self.assertEqual(type(r), unicode) self.assertEqual(r, os.environ["windir"] + "\\test") def test_context_manager(self): # ensure that the handle is closed if an exception occurs try: with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as h: self.assertNotEqual(h.handle, 0) raise WindowsError except WindowsError: self.assertEqual(h.handle, 0) def test_changing_value(self): # Issue2810: A race condition in 2.6 and 3.1 may cause # EnumValue or QueryValue to raise "WindowsError: More data is # available" done = False class VeryActiveThread(threading.Thread): def run(self): with CreateKey(HKEY_CURRENT_USER, test_key_name) as key: use_short = True long_string = 'x'*2000 while not done: s = 'x' if use_short else long_string use_short = not use_short SetValue(key, 'changing_value', REG_SZ, s) thread = VeryActiveThread() thread.start() try: with CreateKey(HKEY_CURRENT_USER, test_key_name+'\\changing_value') as key: for _ in range(1000): num_subkeys, num_values, t = QueryInfoKey(key) for i in range(num_values): name = EnumValue(key, i) QueryValue(key, name[0]) finally: done = True thread.join() with OpenKey(HKEY_CURRENT_USER, test_key_name, 0, KEY_ALL_ACCESS) as key: DeleteKey(key, 'changing_value') DeleteKey(HKEY_CURRENT_USER, test_key_name) def test_long_key(self): # Issue2810, in 2.6 and 3.1 when the key name was exactly 256 # characters, EnumKey raised "WindowsError: More data is # available" name = 'x'*256 try: with CreateKey(HKEY_CURRENT_USER, test_key_name) as key: SetValue(key, name, REG_SZ, 'x') num_subkeys, num_values, t = QueryInfoKey(key) EnumKey(key, 0) finally: with OpenKey(HKEY_CURRENT_USER, test_key_name, 0, KEY_ALL_ACCESS) as key: DeleteKey(key, name) DeleteKey(HKEY_CURRENT_USER, test_key_name) def test_dynamic_key(self): # Issue2810, when the value is dynamically generated, these # raise "WindowsError: More data is available" in 2.6 and 3.1 try: EnumValue(HKEY_PERFORMANCE_DATA, 0) except OSError as e: if e.errno in (errno.EPERM, errno.EACCES): self.skipTest("access denied to registry key " "(are you running in a non-interactive session?)") raise QueryValueEx(HKEY_PERFORMANCE_DATA, None) # Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff # or DeleteKeyEx so make sure their use raises NotImplementedError @unittest.skipUnless(WIN_VER < (5, 2), "Requires Windows XP") def test_reflection_unsupported(self): try: with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck: self.assertNotEqual(ck.handle, 0) key = OpenKey(HKEY_CURRENT_USER, test_key_name) self.assertNotEqual(key.handle, 0) with self.assertRaises(NotImplementedError): DisableReflectionKey(key) with self.assertRaises(NotImplementedError): EnableReflectionKey(key) with self.assertRaises(NotImplementedError): QueryReflectionKey(key) with self.assertRaises(NotImplementedError): DeleteKeyEx(HKEY_CURRENT_USER, test_key_name) finally: DeleteKey(HKEY_CURRENT_USER, test_key_name) def test_setvalueex_value_range(self): # Test for Issue #14420, accept proper ranges for SetValueEx. # Py2Reg, which gets called by SetValueEx, was using PyLong_AsLong, # thus raising OverflowError. The implementation now uses # PyLong_AsUnsignedLong to match DWORD's size. try: with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck: self.assertNotEqual(ck.handle, 0) SetValueEx(ck, "test_name", None, REG_DWORD, 0x80000000) finally: DeleteKey(HKEY_CURRENT_USER, test_key_name) def test_setvalueex_with_memoryview(self): try: with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck: self.assertNotEqual(ck.handle, 0) with self.assertRaises(TypeError): SetValueEx(ck, "test_name", None, REG_BINARY, memoryview('val')) finally: DeleteKey(HKEY_CURRENT_USER, test_key_name) def test_queryvalueex_return_value(self): # Test for Issue #16759, return unsigned int from QueryValueEx. # Reg2Py, which gets called by QueryValueEx, was returning a value # generated by PyLong_FromLong. The implementation now uses # PyLong_FromUnsignedLong to match DWORD's size. try: with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck: self.assertNotEqual(ck.handle, 0) test_val = 0x80000000 SetValueEx(ck, "test_name", None, REG_DWORD, test_val) ret_val, ret_type = QueryValueEx(ck, "test_name") self.assertEqual(ret_type, REG_DWORD) self.assertEqual(ret_val, test_val) finally: DeleteKey(HKEY_CURRENT_USER, test_key_name) def test_setvalueex_crash_with_none_arg(self): # Test for Issue #21151, segfault when None is passed to SetValueEx try: with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck: self.assertNotEqual(ck.handle, 0) test_val = None SetValueEx(ck, "test_name", 0, REG_BINARY, test_val) ret_val, ret_type = QueryValueEx(ck, "test_name") self.assertEqual(ret_type, REG_BINARY) self.assertEqual(ret_val, test_val) finally: DeleteKey(HKEY_CURRENT_USER, test_key_name) @unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests") class RemoteWinregTests(BaseWinregTests): def test_remote_registry_works(self): remote_key = ConnectRegistry(REMOTE_NAME, HKEY_CURRENT_USER) self._test_all(remote_key) @unittest.skipUnless(WIN64_MACHINE, "x64 specific registry tests") class Win64WinregTests(BaseWinregTests): def test_reflection_functions(self): # Test that we can call the query, enable, and disable functions # on a key which isn't on the reflection list with no consequences. with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key: # HKLM\Software is redirected but not reflected in all OSes self.assertTrue(QueryReflectionKey(key)) self.assertEqual(None, EnableReflectionKey(key)) self.assertEqual(None, DisableReflectionKey(key)) self.assertTrue(QueryReflectionKey(key)) @unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection") def test_reflection(self): # Test that we can create, open, and delete keys in the 32-bit # area. Because we are doing this in a key which gets reflected, # test the differences of 32 and 64-bit keys before and after the # reflection occurs (ie. when the created key is closed). try: with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key: self.assertNotEqual(created_key.handle, 0) # The key should now be available in the 32-bit area with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_ALL_ACCESS | KEY_WOW64_32KEY) as key: self.assertNotEqual(key.handle, 0) # Write a value to what currently is only in the 32-bit area SetValueEx(created_key, "", 0, REG_SZ, "32KEY") # The key is not reflected until created_key is closed. # The 64-bit version of the key should not be available yet. open_fail = lambda: OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_READ | KEY_WOW64_64KEY) self.assertRaises(WindowsError, open_fail) # Now explicitly open the 64-bit version of the key with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_ALL_ACCESS | KEY_WOW64_64KEY) as key: self.assertNotEqual(key.handle, 0) # Make sure the original value we set is there self.assertEqual("32KEY", QueryValue(key, "")) # Set a new value, which will get reflected to 32-bit SetValueEx(key, "", 0, REG_SZ, "64KEY") # Reflection uses a "last-writer wins policy, so the value we set # on the 64-bit key should be the same on 32-bit with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_READ | KEY_WOW64_32KEY) as key: self.assertEqual("64KEY", QueryValue(key, "")) finally: DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, KEY_WOW64_32KEY, 0) @unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection") def test_disable_reflection(self): # Make use of a key which gets redirected and reflected try: with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key: # QueryReflectionKey returns whether or not the key is disabled disabled = QueryReflectionKey(created_key) self.assertEqual(type(disabled), bool) # HKCU\Software\Classes is reflected by default self.assertFalse(disabled) DisableReflectionKey(created_key) self.assertTrue(QueryReflectionKey(created_key)) # The key is now closed and would normally be reflected to the # 64-bit area, but let's make sure that didn't happen. open_fail = lambda: OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_READ | KEY_WOW64_64KEY) self.assertRaises(WindowsError, open_fail) # Make sure the 32-bit key is actually there with OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0, KEY_READ | KEY_WOW64_32KEY) as key: self.assertNotEqual(key.handle, 0) finally: DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, KEY_WOW64_32KEY, 0) def test_exception_numbers(self): with self.assertRaises(WindowsError) as ctx: QueryValue(HKEY_CLASSES_ROOT, 'some_value_that_does_not_exist') self.assertEqual(ctx.exception.errno, 2) def test_main(): test_support.run_unittest(LocalWinregTests, RemoteWinregTests, Win64WinregTests) if __name__ == "__main__": if not REMOTE_NAME: print "Remote registry calls can be tested using", print "'test_winreg.py --remote \\\\machine_name'" test_main()
Jeff-Tian/mybnb
Python27/Lib/test/test_winreg.py
Python
apache-2.0
21,312
#coding: utf-8 # website settings username = 'Pocky Nya' email = 'pocky@loli.club' title = u'Pocky Nya - Pocky 盒子' motto = u'我是好吃的 Pocky 喵~' disqus_shortname = 'pockynya' # themes name theme = 'default' # development debug = True analytics_code = ''' ''' post_of_page = 3 try: from localsettings import connect_str, cookie_secret except ImportError: raise Exception('Please add connect_str and cookie_secret in localsettings.py') try: module = __import__('themes.%s.config' % theme, globals(), locals(), ['*']) for k in dir(module): locals()[k] = getattr(module, k) except ImportError: pass
PockyNya/pyprint
pyprint/settings.py
Python
mit
647
from pyprint.handler import BaseHandler from pyprint.models import Link class ListLinksHandler(BaseHandler): def get(self): return self.render('links.html', title='Links & About', links=self.orm.query(Link).all())
Leo02/pyprint
pyprint/views/links.py
Python
mit
227
#!/usr/bin/env python # cbus/protocol/application/enable.py - Enable Control Application # Copyright 2012-2019 Michael Farrell <micolous+git@gmail.com> # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import annotations import warnings from typing import Set, List from six import byte2int, indexbytes from cbus.common import Application, EnableCommand from cbus.protocol.application.sal import BaseApplication, SAL __all__ = [ 'EnableApplication', 'EnableSAL', 'EnableSetNetworkVariableSAL', ] class EnableSAL(SAL): """ Base type for enable control application SALs. """ @property def application(self) -> Application: return Application.ENABLE @staticmethod def decode_sals(data: bytes) -> List[EnableSAL]: """ Decodes a enable control application packet and returns it's SAL(s). :param data: SAL data to be parsed. :type data: str :returns: The SAL messages contained within the given data. :rtype: list of cbus.protocol.application.enable.EnableSAL """ output = [] while data: # parse the data if len(data) < 3: # not enough data to go on. warnings.warn( 'Got less than 3 bytes of stray SAL for enable ' 'application (malformed packet)', UserWarning) break command_code = byte2int(data) data = data[1:] if (command_code & 0x80) == 0x80: warnings.warn( 'Got unknown enable command {}, stopping processing ' 'prematurely'.format(command_code), UserWarning) break if (command_code & 0x07) != 2: warnings.warn( 'Got invalid length for enable command {}, must be ' '2 (Enable s9.4.1)'.format(command_code), UserWarning) break sal, data = EnableSetNetworkVariableSAL.decode(data) if sal: output.append(sal) return output class EnableSetNetworkVariableSAL(EnableSAL): """ Enable control Set Network Variable SAL. Sets a network variable. """ def __init__(self, variable, value): """ Creates a new SAL Enable Control Set Network Variable :param variable: The variable ID being changed :type variable: int :param value: The value of the network variable :type value: int """ super().__init__() self.variable = variable self.value = value @classmethod def decode(cls, data): """ Do not call this method directly -- use EnableSAL.decode """ # print "data == %r" % data variable = byte2int(data) value = indexbytes(data, 1) data = data[2:] return cls(variable, value), data def encode(self) -> bytes: variable = self.variable & 0xff value = self.value & 0xff return super().encode() + bytes([ EnableCommand.SET_NETWORK_VARIABLE, variable, value ]) class EnableApplication(BaseApplication): """ This class is called in the cbus.protocol.applications.APPLICATIONS dict in order to describe how to decode enable broadcast application events received from the network. Do not call this class directly. """ @staticmethod def supported_applications() -> Set[Application]: return {Application.ENABLE} @classmethod def decode_sals(cls, data: bytes) -> List[EnableSAL]: """ Decodes a enable broadcast application packet and returns its SAL(s). """ return EnableSAL.decode_sals(data)
micolous/cbus
cbus/protocol/application/enable.py
Python
lgpl-3.0
4,441
# try: global env gui = env.tkmol except NameError: print 'Error: This script should be run from the GUI shell window' import sys sys.exit() for v in gui.vis_list: v.Hide()
alexei-matveev/ccp1gui
scripts/hideall.py
Python
gpl-2.0
198
from unittest import TestCase from celery.app.base import Celery from nose.tools import eq_, ok_ from recommendation.celery import celery class TestCeleryApp(TestCase): def test_celery_app(self): from recommendation import celery as celery_module ok_(hasattr(celery_module, 'celery')) def test_celery_app_type(self): eq_(type(celery), Celery) def test_celery_app_singleton(self): from recommendation.celery import celery as celery_2 eq_(id(celery), id(celery_2))
mozilla/universal-search-recommendation
recommendation/tests/test_celery.py
Python
mpl-2.0
521
# Copyright 2022 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from logging import getLogger from sis_provisioner.models import Job from sis_provisioner.views.admin import RESTDispatch, get_user from django.utils.timezone import utc from datetime import datetime import json logger = getLogger(__name__) class JobView(RESTDispatch): """ Retrieves a Job model. GET returns 200 with Job details. PUT returns 200. """ def get(self, request, *args, **kwargs): job_id = kwargs['job_id'] try: job = Job.objects.get(id=job_id) return self.json_response(job.json_data()) except Job.DoesNotExist: return self.error_response(404, "Job %s not found" % job_id) def put(self, request, *args, **kwargs): if not self.can_manage_jobs(request): return self.error_response(401, "Unauthorized") job_id = kwargs['job_id'] try: job = Job.objects.get(id=job_id) data = json.loads(request.body).get('job', {}) if 'is_active' in data: job.is_active = data['is_active'] job.changed_by = get_user(request) job.changed_date = datetime.utcnow().replace(tzinfo=utc) job.save() logger.info('%s %s Job "%s"' % ( job.changed_by, 'enabled' if job.is_active else 'disabled', job.name)) return self.json_response({'job': job.json_data()}) except Job.DoesNotExist: return self.error_response(404, "Job %s not found" % job_id) def delete(self, request, *args, **kwargs): if not self.can_manage_jobs(request): return self.error_response(401, "Unauthorized") job_id = kwargs['job_id'] try: job = Job.objects.get(id=job_id) job.delete() logger.info('%s deleted Job "%s"' % (job.changed_by, job.name)) return self.json_response({'job': job.json_data()}) except Job.DoesNotExist: return self.error_response(404, "Job %s not found" % job_id) class JobListView(RESTDispatch): """ Retrieves a list of Jobs. """ def get(self, request, *args, **kwargs): read_only = not self.can_manage_jobs(request) jobs = [] for job in Job.objects.all().order_by('title'): data = job.json_data() data['read_only'] = read_only jobs.append(data) return self.json_response({'jobs': jobs})
uw-it-aca/canvas-sis-provisioner
sis_provisioner/views/jobs.py
Python
apache-2.0
2,588
""" System logger class """ from logging import Logger import os,sys class logger(Logger): def __init__(self,name): Logger.__init__(self,name) self.computer = os.environ['COMPUTERNAME'] name = os.path.basename(sys.argv[0]).split('.')[0] if name.find('-') <> -1: app = name.split('-')[1] else: app = name self.application = app def makeRecord(self,name, lvl, fn, lno, msg, args, exc_info , func=None, extra=None): add = {} add['server'] = self.computer add['application'] = self.application if extra is not None: add.update(extra) rec = Logger.makeRecord(self,name, lvl, fn, lno, msg, args, exc_info , func=func, extra=add) return rec
weightedEights/runDBcheck
RADAR_DATA/20170713.001/Source/pylib/logger.py
Python
gpl-3.0
808
import asyncio import aio_pika async def main(loop): connection = await aio_pika.connect_robust( "amqp://guest:guest@127.0.0.1/", loop=loop ) async with connection: routing_key = "test_queue" # Transactions conflicts with `publisher_confirms` channel = await connection.channel(publisher_confirms=False) # Use transactions with async context manager async with channel.transaction(): # Publishing messages but delivery will not be done # before committing this transaction for i in range(10): message = aio_pika.Message(body="Hello #{}".format(i).encode()) await channel.default_exchange.publish( message, routing_key=routing_key ) # Using transactions manually tx = channel.transaction() # start transaction manually await tx.select() await channel.default_exchange.publish( aio_pika.Message(body="Hello {}".format(routing_key).encode()), routing_key=routing_key, ) await tx.commit() tx.close() # Using transactions manually tx = channel.transaction() # start transaction manually await tx.select() await channel.default_exchange.publish( aio_pika.Message(body="Should be rejected".encode()), routing_key=routing_key, ) await tx.rollback() tx.close() if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main(loop)) loop.close()
mosquito/aio-pika
docs/source/examples/simple_publisher_transactions.py
Python
apache-2.0
1,628
from __future__ import unicode_literals from django.db import models from django.utils import timezone # Create your models here. class Room(models.Model): name = models.TextField() label = models.SlugField(unique=True) def __unicode__(self): return self.label class Message(models.Model): room = models.ForeignKey(Room, related_name='messages') handle = models.TextField() message = models.TextField() timestamp = models.DateTimeField(default=timezone.now, db_index=True) def __unicode__(self): return '[{timestamp}] {handle}: {message}'.format(**self.as_dict()) @property def formatted_timestamp(self): return self.timestamp.strftime('%b %-d %-I:%M %p') def as_dict(self): return {'handle': self.handle, 'message': self.message, 'timestamp': self.formatted_timestamp}
vilnitskiy/chat
chat/models.py
Python
gpl-3.0
856
from golem.core.responses.responses import MenuElement class ThreadSetting(): def to_response(self): pass class GreetingSetting(ThreadSetting): def __init__(self, message): self.message = message def __str__(self): return 'greeting_setting' class GetStartedSetting(ThreadSetting): def __init__(self, payload): self.payload = payload def __str__(self): return 'get_started_setting' class MenuSetting(ThreadSetting): def __init__(self, elements=None): self.elements = [MenuElement(**element) for element in elements or []] def __str__(self): text = 'menu:' for element in self.elements: text += "\n " + str(element) return text def add_element(self, element): self.elements.append(element) return element def create_element(self, **kwargs): element = MenuElement(**kwargs) return self.add_element(element)
prihoda/golem
golem/core/responses/settings.py
Python
gpl-3.0
969