repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
albertoconnor/website
|
newsletter/migrations/0001_initial.py
|
Python
|
mit
| 2,249
| 0.003557
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '00
|
01_squashed_0016_change_page_url_path_to_text_field'),
('articles', '0024_auto_20150722_1928'),
('images', '0001_in
|
itial'),
]
operations = [
migrations.CreateModel(
name='NewsletterArticleLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('override_text', wagtail.wagtailcore.fields.RichTextField(default='', help_text='Text to describe article.', blank=True)),
('article', models.ForeignKey(related_name='newsletter_links', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='articles.ArticlePage', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='NewsletterPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('issue_date', models.DateField(auto_now=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='newsletterarticlelink',
name='newsletter',
field=modelcluster.fields.ParentalKey(related_name='article_links', to='newsletter.NewsletterPage'),
),
migrations.AddField(
model_name='newsletterarticlelink',
name='override_image',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='images.AttributedImage', help_text='Circular Image to accompany article if article image not selected', null=True),
),
]
|
maartenq/ansible
|
test/units/modules/network/f5/test_bigip_profile_http_compression.py
|
Python
|
gpl-3.0
| 4,310
| 0.001856
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_http_compression import ApiParameters
from library.modules.bigip_profile_http_compression import ModuleParameters
from library.modules.bigip_profile_http_compression import ModuleManager
from library.modules.bigip_profile_http_compression import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_profile_http_compression import ApiParameters
from ansible.modules.network.f5.bigip_profile_http_compression import ModuleParameters
from ansible.modules.network.f5.bigip_profile_http_compression import ModuleManager
from ansible.modules.network.f5.bigip_profile_http_compression import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
description='description1',
buffer_size=1024,
|
gzip_memory_level=64,
|
gzip_level=2,
gzip_window_size=128
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.description == 'description1'
assert p.buffer_size == 1024
assert p.gzip_memory_level == 64
assert p.gzip_level == 2
assert p.gzip_window_size == 128
def test_api_parameters(self):
p = ApiParameters(params=load_fixture('load_ltm_profile_http_compression_1.json'))
assert p.description == 'my profile'
assert p.buffer_size == 4096
assert p.gzip_memory_level == 8
assert p.gzip_level == 1
assert p.gzip_window_size == 16
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
description='description1',
buffer_size=1024,
gzip_memory_level=64,
gzip_level=2,
gzip_window_size=128,
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
xahhy/Django-vod
|
vodmanagement/apps.py
|
Python
|
lgpl-3.0
| 125
| 0
|
from django.apps import AppConfig
class VodConfig(AppConfig):
name = 'vodmanagement'
verbose_name = '视频点播'
| ||
stvstnfrd/edx-platform
|
common/test/acceptance/tests/lms/test_account_settings.py
|
Python
|
agpl-3.0
| 2,482
| 0.001612
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the Account Settings page.
"""
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class AccountSettingsTestMixin(EventsTestMixin, AcceptanceTest):
"""
Mixin with helper methods to test the account settings page.
"""
CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated"
USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed'
ACCOUNT_SETTINGS_REFERER = u"/account/settings"
shard = 23
def visit_account_settings_page(self, gdpr=False):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax()
# TODO: LEARNER-4422 - delete when we clean up flags
if gdpr:
self.account_settings_page.browser.get(self.browser.current_url + "?course_experience.gdpr=1")
self.account_settings_page.wait_for_page()
def log_in_as_unique_user(self, email=None, full_name=None, password=None):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(
self.browser,
username=username,
email=email,
full_name=full_name,
password=password
).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
class AccountSettingsA11yTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Class to test account settings accessibility.
"""
a11y = True
def test_account_settings_a11y(self):
"""
Test the accessibility
|
of the account settings page.
"""
self.log_in_as_unique_user()
self.visit_account_settings_page()
self.account_settings_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.account_settings_page.a11y_audit.check_for_accessibility_err
|
ors()
|
goodes/fit4school
|
fit4school/urls.py
|
Python
|
apache-2.0
| 677
| 0.001477
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.utils.translation import ugettext_lazy as _
urlpatterns = [
url(r'^i18n/', include('django.conf.urls.i18n')),
|
url(r'^fitbit_api/', include('fit4school.fitbit_api.urls')),
url(r'^control/django-rq/', include('django_rq.urls')),
url(r'^control/', include('fit4school.control.urls')),
url(r'^station/', include('fit4school.station.urls')),
url(r'', include('fit4school.core.urls')),
]
|
+ staticfiles_urlpatterns()
|
digitalocean/netbox
|
netbox/tenancy/tests/test_filters.py
|
Python
|
apache-2.0
| 3,818
| 0.001833
|
from django.test import TestCase
from tenancy.filters import *
from tenancy.models import Tenant, TenantGroup
class TenantGroupTestCase(TestCase):
queryset = TenantGroup.objects.all()
filterset = TenantGroupFilterSet
@classmethod
def setUpTestData(cls):
parent_tenant_groups = (
TenantGroup(name='Parent Tenant Group 1', slug='parent-tenant-group-1'),
TenantGroup(name='Parent Tenant Group 2', slug='parent-tenant-group-2'),
TenantGroup(name='Parent Tenant Group 3', slug='parent-tenant-group-3'),
)
for tenantgroup in parent_tenant_groups:
tenantgroup.save()
tenant_groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1', parent=parent_tenant_groups[0], description='A'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2', parent=parent_tenant_groups[1], description='B'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3', parent=parent_tenant_groups[2], description='C'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-group-1', 'tenant-group-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent(self):
parent_groups = TenantGroup.objects.filter(name__startswith='Parent')[:2]
params = {'parent_id': [parent_groups[0].pk, parent_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'parent': [parent_groups[0].slug, parent_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class TenantTestCase(TestCase):
queryset = Tenant.objects.all()
filterset = TenantFilterSet
@classmethod
def setUpTestData(cls):
tenant_groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['te
|
nant-1', 'tenant-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_group(self):
group = TenantGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
se
|
lf.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
|
openstack/nova
|
nova/tests/unit/api/openstack/compute/test_limits.py
|
Python
|
apache-2.0
| 24,448
| 0.000286
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
from http import client as httplib
from io import StringIO
import mock
from oslo_limit import fixture as limit_fixture
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from nova.api.openstack.compute import limits as limits_v21
from nova.api.openstack.compute import views
from nova.api.openstack import wsgi
import nova.context
from nova import exception
from nova.limit import local as local_limit
from nova.limit import placement as placement_limit
from nova import objects
from nova.policies import limits as l_policies
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
class BaseLimitTestSuite(test.NoDBTestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
mock_get_project_quotas = mock.patch.object(
nova.quota.QUOTAS,
"get_project_quotas",
side_effect = stub_get_project_quotas)
mock_get_project_quotas.start()
self.addCleanup(mock_get_project_quotas.stop)
patcher = self.mock_can = mock.patch('nova.context.RequestContext.can')
self.mock_can = patcher.start()
self.addCleanup(patcher.stop)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTestV21(BaseLimitTestSuite):
"""Tests for `limits.LimitsController` class."""
limits_controller = limits_v21.LimitsController
def setUp(self):
"""Run before each test."""
super(LimitsControllerTestV21, self).setUp()
self.controller = wsgi.Resource(self.limits_controller())
self.ctrler = self.limits_controller()
def _get_index_request(self, accept_header="application/json",
tenant_id=None, user_id='testuser',
project_id='testproject'):
"""Helper to set routing arguments."""
request = fakes.HTTPRequest.blank('', version='2.1')
if tenant_id:
request = fakes.HTTPRequest.blank('/?tenant_id=%s' % tenant_id,
version='2.1')
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = nova.context.RequestContext(user_id, project_id)
request.environ["nova.context"] = context
return request
def test_empty_index_json(self):
# Test getting empty limit details in JSON.
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
self._test_index_json()
def test_index_json_by_tenant(self):
self._test_index_json('faketenant')
def _test_index_json(self, tenant_id=None):
# Test getting limit details in JSON.
request = self._get_index_request(tenant_id=tenant_id)
context = request.environ["nova.context"]
if tenant_id is None:
tenant_id = context.project_id
self.absolute_limits = {
'ram': 512,
'instances': 5,
'cores': 21,
'key_pairs': 10,
'floating_ips': 10,
'security_groups': 10,
'security_group_rules': 20,
}
expected = {
"limits": {
"rate": [],
"absolute": {
"maxTotalRAMSize": 512,
"maxTotalInstances": 5,
"maxTotalCores": 21,
"maxTotalKeypairs": 10,
"maxTotalFloatingIps": 10,
"maxSecurityGroups": 10,
"maxSecurityGroupRules": 20,
"totalRAMUsed": 256,
"totalCoresUsed": 10,
"totalInstancesUsed": 2,
"totalFloatingIpsUsed": 5,
"totalSecurityGroupsUsed": 5,
},
},
}
def _get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
get_project_quotas:
get_project_quotas.side_effect = _get_project_quotas
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
get_project_quotas.assert_called_once_with(context, tenant_id,
usages=True)
def _do_test_used_limits(self, reserved):
request = self._get_index_request(tenant_id=None)
quota_map = {
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
'totalFloatingIpsUsed': 'floating_ips',
'totalSecurityGroupsUsed': 'security_groups',
'totalServerGroupsUsed': 'server_groups',
}
limits = {}
expected_abs_limits = []
for display_name, q in quota_map.items():
limits[q] = {'limit': len(display_name),
'in_use': len(display_name) // 2,
'reserved': 0}
expected_abs_limits.append(display_name)
def stub_get_project_quotas(context, project_id, usages=True):
return limits
self.stub_out('nova.quota.QUOTAS.get_project_quotas',
|
stub_get_project_quotas)
res = request.get_response(self.controller)
body = jsonutils.loads(res.body)
|
abs_limits = body['limits']['absolute']
for limit in expected_abs_limits:
value = abs_limits[limit]
r = limits[quota_map[limit]]['reserved'] if reserved else 0
self.assertEqual(limits[quota_map[limit]]['in_use'] + r, value)
def test_used_limits_basic(self):
self._do_test_used_limits(False)
def test_used_limits_with_reserved(self):
self._do_test_used_limits(True)
def test_admin_can_fetch_limits_for_a_given_tenant_id(self):
project_id = "123456"
user_id = "A1234"
tenant_id = 'abcd'
fake_req = self._get_index_request(tenant_id=tenant_id,
user_id=user_id,
project_id=project_id)
context = fake_req.environ["nova.context"]
with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
return_value={}) as mock_get_quotas:
fake_req.get_response(self.controller)
self.assertEqual(2, self.mock_can.call_count)
self.mock_can.assert_called_with(
l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
mock_get_quotas.assert_calle
|
tumbislav/SvgMapper
|
src/main.py
|
Python
|
gpl-3.0
| 2,923
| 0.002396
|
# encoding: utf-8
# main.py, copyright 2014 by Marko Čibej <marko@cibej.org>
#
# This file is part of SvgMapper. Full sources and documentation
# are available here: https://github.com/tumbislav/SvgMapper
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
# Full licence is in the file LICENSE and at http://www.gnu.org/copyleft/gpl.html
__author__ = 'Marko Čibej'
import argparse
from svgmapper import *
from helper import logger
def main(config, resources=None, maps=None, simulate=False):
logger.info('Starting job')
with SvgMapper() as mapper:
mapper.load_config(config, resources)
if maps:
mapper.replace_targets(maps)
if not simulate:
mapper.run()
logger.info('Finished')
def parse_args():
parser = argparse.ArgumentParser(description='Transform maps in SVG format in various ways.')
parser.add_argument('config_file', help='The name of the configuration file')
parser.add_argument('-r', '--resource', help='Additional resource file(s)',
action='append', metavar='resource_file')
parser.add_argument('-m', '--map', help='Map(s) to run instead of those listed in config file', metavar='map_name')
parser.add_argument('-v', '--verbosity', help='Set verbosity: 0=errors only, 1=warnings, 2=info, 3=debug',
type=int, choices=range(0, 3), dest='verbosity')
parser.add_argument('-l', '--log', help='Output to named log file', metavar=('level(0-3)', 'logFile'), nargs=2)
|
parser.add_argument('-s', '--simulate', help='Don\'t actually do anything, just parse all the configurations',
ac
|
tion='store_true')
return parser.parse_args()
def set_logging(the_log, verbosity):
log_levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logger.setLevel(logging.DEBUG)
if the_log:
level = log_levels[int(the_log[0])]
lf = logging.FileHandler(the_log[1], mode='w')
lf.setLevel(level)
lf.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(lf)
lc = logging.StreamHandler()
if verbosity:
lc.setLevel(log_levels[verbosity])
else:
lc.setLevel(log_levels[2])
logger.addHandler(lc)
|
Pica4x6/megaradrp
|
megaradrp/tests/test_products.py
|
Python
|
gpl-3.0
| 2,054
| 0.011685
|
#
# Copyright 2015 Universidad Complutense de Madrid
#
# This file is part of Megara DRP
#
# Megara DRP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Megara DRP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Megara DR
|
P. If not, see <http://www.gnu.org/licenses/>.
#
from megaradrp.products import TraceMap
from tempfile import NamedTemporaryFile
import pytest
import yaml
@pytest.mark.xfail
def test_fail_traceMap(benchmark=None):
my_obj = TraceMap()
my_obj._datatype_load('')
def test_load_traceMap(benchmark=None):
data = dict(A = 'a',
B = dict(C = 'c',
D = 'd',
E = 'e',)
)
my_obj =
|
TraceMap()
my_file = NamedTemporaryFile()
with open(my_file.name, 'w') as fd:
yaml.dump(data, fd)
my_open_file = my_obj._datatype_load(my_file.name)
assert (my_open_file == {'A': 'a', 'B': {'C': 'c', 'E': 'e', 'D': 'd'}})
def test_dump_traceMap(benchmark=None):
class aux(object):
def __init__(self, destination):
self.destination = destination
data = dict(A = 'a',
B = dict(C = 'c',
D = 'd',
E = 'e',)
)
my_obj = TraceMap()
my_file = NamedTemporaryFile()
work_env = aux(my_file.name)
my_open_file = my_obj._datatype_dump(data, work_env)
with open(my_open_file, 'r') as fd:
traces = yaml.load(fd)
assert (traces == {'A': 'a', 'B': {'C': 'c', 'E': 'e', 'D': 'd'}})
if __name__ == "__main__":
test_load_traceMap()
test_dump_traceMap()
|
lachaka/traffic_sign_recognition
|
datasets/neg_images/img_gen.py
|
Python
|
mit
| 644
| 0.021739
|
import cv2
import os
import skimage.io
output_dir = '../training_images/Final_Training/Images/00043/'
images_path = './samples/'
if not os.path.exists
|
(output_dir):
os.makedirs(output_dir)
name = 0;
img_names = [f for f in os.listdir(images_path)
if f.endswith(".ppm")]
for img_name in img_names:
img = cv2.imread(images_path + img_name)
h, w, channel = skimage.io.imread(images_path + img_name).shape
for i in range(0, w - 128, 128):
for j in range(0, h - 128, 128):
crop_img = img[j : (j + 128), i : (i + 128)]
img_full_path = output_dir + '0_' + str(name) + '.ppm'
|
cv2.imwrite(img_full_path, crop_img)
name += 1
|
ashishtanwer/DFS
|
reader.py
|
Python
|
gpl-2.0
| 600
| 0.011667
|
#!/usr/bin/python
import re
fi = open("tree8", "r")
fo = open("tree8.dot", "wb")
fo.write("graph test {\n")
fo.write("\nflowexport=text\n")
line = fi.readline()
line = fi.readline()
line = fi.readline()
while line!= 0:
RouterList = re.sub("[^\w]", " ", line).split()
fo.write('%s[\n\tautoack = "False"\n\tipdests = "10.0.0
|
.0/16"\n];\n' % RouterList[0])
fo.write('%s[\n\tautoack = "False"\n\tipdests = "10.0.0.0/16"\n];\n' % RouterList[1])
fo
|
.write('\t%s -- %s[weight = 1, capacity = 100000000, delay = 0.001];\n'%(RouterList[0],RouterList[1]))
fo.write("}")
|
glogiotatidis/mozillians-new
|
mozillians/users/helpers.py
|
Python
|
bsd-3-clause
| 1,366
| 0.000732
|
import base64
import hashlib
import re
def validate_username(username):
"""Validate username.
Import modules here to prevent dependenc
|
y breaking.
"""
from models import UsernameBlacklist
username = username.lower()
if (UsernameBlacklist.
objects.filter(value=username, is_regex=False).exists()):
return False
for regex_value in UsernameBlacklist.objects.filter(is_regex=True):
if re.match(regex_value.value, username):
return False
|
return True
def calculate_username(email):
"""Calculate username from email address.
Import modules here to prevent dependency breaking.
"""
from models import USERNAME_MAX_LENGTH
from django.contrib.auth.models import User
email = email.split('@')[0]
username = re.sub(r'[^\w.@+-]', '-', email)
username = username[:USERNAME_MAX_LENGTH]
suggested_username = username
count = 0
while User.objects.filter(username=suggested_username).exists():
count += 1
suggested_username = '%s%d' % (username, count)
if len(suggested_username) > USERNAME_MAX_LENGTH:
# We failed to calculate a name for you, default to a
# email digest.
username = base64.urlsafe_b64encode(
hashlib.sha1(email).digest()).rstrip('=')
return suggested_username
|
quamis/ExchangeRate
|
ExchangeRate/ExchangeRate/spiders/Raiffeisen.py
|
Python
|
gpl-2.0
| 1,581
| 0.025933
|
from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from ExchangeRate.items import ExchangerateItem
import re
class RaiffeisenSpider(BaseSpider):
name = "Raiffeisen"
allowed_domains = ["www.raiffeisen.ro"]
start_urls = [
"http://www.raiffeisen.ro/curs-valutar",
]
def parse(self, response):
hxs = Selector(response)
item = ExchangerateItem()
|
item['EUR_buy'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "EUR")]/../td[4]/text()').extract()[0])
item['EUR_sell'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "EUR")]/../td[5]/text()').e
|
xtract()[0])
item['USD_buy'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "USD")]/../td[4]/text()').extract()[0])
item['USD_sell'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "USD")]/../td[5]/text()').extract()[0])
item['GBP_buy'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "GBP")]/../td[4]/text()').extract()[0])
item['GBP_sell'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "GBP")]/../td[5]/text()').extract()[0])
item['CHF_buy'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "CHF")]/../td[4]/text()').extract()[0])
item['CHF_sell'] = float(hxs.xpath('//div[@class="rzbContentTextNormal"]//table//td[contains(text(), "CHF")]/../td[5]/text()').extract()[0])
return [item]
|
alexhayes/django-google-adwords
|
django_google_adwords/south_migrations/0003_auto__add_field_ad_ad_approval_status.py
|
Python
|
mit
| 31,311
| 0.006739
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Ad.ad_approval_status'
db.add_column(u'django_google_adwords_ad', 'ad_approval_status',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Ad.ad_approval_status'
db.delete_column(u'django_google_adwords_ad', 'ad_approval_status')
models = {
u'django_google_adwords.account': {
'Meta': {'object_name': 'Account'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'account_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'account_last_synced': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ad_group_last_synced': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ad_last_synced': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'campaign_last_synced': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField'
|
, [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'active'", 'max_length': '32'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
u'django_google_adwords.ad': {
'Meta': {'object_name': 'Ad'},
'ad': ('django.d
|
b.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ad_approval_status': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ad_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ads'", 'to': u"orm['django_google_adwords.AdGroup']"}),
'ad_id': ('django.db.models.fields.BigIntegerField', [], {}),
'ad_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ad_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description_line_1': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_line_2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'destination_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
u'django_google_adwords.adgroup': {
'Meta': {'object_name': 'AdGroup'},
'ad_group': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ad_group_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'ad_group_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ad_groups'", 'to': u"orm['django_google_adwords.Campaign']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
u'django_google_adwords.alert': {
'Meta': {'object_name': 'Alert'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': u"orm['django_google_adwords.Account']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occurred': ('django.db.models.fields.DateTimeField', [], {}),
'severity': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
u'django_google_adwords.campaign': {
'Meta': {'object_name': 'Campaign'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'campaigns'", 'to': u"orm['django_google_adwords.Account']"}),
'budget': ('money.contrib.django.models.fields.MoneyField', [], {'decimal_places': '2', 'default': '0', 'no_currency_field': 'True', 'max_digits': '12', 'blank': 'True', 'null': 'True'}),
'budget_currency': ('money.contrib.django.models.fields.CurrencyField', [], {'default': "'AUD'", 'max_length': '3'}),
'campaign': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'campaign_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'campaign_state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
u'django_google_adwords.dailyaccountmetrics': {
'Meta': {'object_name': 'DailyAccountMetrics'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metrics'", 'to': u"orm['django_google_adwords.Account']"}),
'avg_cpc': ('money.contrib.django.models.fields.MoneyField', [], {'decimal_places': '2', 'default': '0', 'no_currency_field': 'True', 'max_digits': '12', 'blank': 'True', 'null': 'True'}),
'avg_cpc_currency': ('money.contrib.django.models.fields.CurrencyField', [], {'default': "'AUD'", 'max_length': '3'}),
'avg_cpm': ('money.contrib.django.models.fields.MoneyField', [], {'decimal_places': '2', 'default': '0', 'no_currency_field': 'True', 'max_digits': '12', 'blank': 'True', 'null': 'True'}),
'avg_cpm_currency': ('money.contrib.django.models.fields.CurrencyField', [], {'default': "'AUD'", 'max_length': '3'}),
'avg_position': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'click_conversion_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'clicks': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_impr_share': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'content_lost_is_budget': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_pla
|
AdrienVR/ALT
|
QCM.py
|
Python
|
lgpl-3.0
| 2,961
| 0.044917
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from os import getcwd, listdir, path
from PyQt4.QtCore import SIGNAL
from Question import *
from State import State
class QCM():
# Constructeur
def __init__(self):
self.qcm={}
self.charger()
def charger(self):
curr=getcwd()
for x in listdir(curr+"/qcm"):
if path.isfile(path.join(curr+"/qcm", x)):
pass
else :
self.create(x)
for y in listdir(curr+"/qcm/"+x):
if path.isfile(path.join(curr+"/qcm/"+x, y)):
y=y[:y.find(".")]
self.load(x,y)
def create(self,x):
self.qcm[x]={}
def load(self,x,y):
try:
a=open("qcm/"+x+"/"+y+".txt")
file_lines=a.readlines()
a.close()
#verification fichier
old=""
bloc_size=0
for line in file_lines:
line = line.strip()
if line=="":
if bloc_size<4:
print "<4"
raise Exception
bloc_size=0
else :
bloc_size+=1
old=line
#end
#questions loading
for codec in ["utf-8","ISO-8859-15","utf-16",""]:
try:
blocs=Question().extract(file_lines)
self.qcm[x][y]=self.loader(blocs)
break
except:
pass
print y," loaded correctly"
return True
except Exception, e:
print "loading error : "+x+"/"+y+"\n"+str(e)+"\n"
return False
def loader(self,blocs):
reponse="ABCDEFGHIJKLMNOP"
didi={}
i=0
## gestion des bloc
for bloc in blocs:
i+=1
q=Question()
qexpl=""
rep=reponse #copie de liste abcd pour le nom des reponses
if type(bloc[0])==type(5):
q.question=bloc[1]
for choi in bloc[2:bloc[0]+1]:
q.choix[rep[0]]=choi
rep=rep[1:]
for comm in bloc[bloc[0]+1:len(bloc)-1]:
qexpl+=comm+"\n"
## fu=len(qexpl)/80
## for fx in range(fu):
## fi=(fx+1)*80
## while(qexpl[fi] not in [" ","\n"]):fi+=1
## qexpl=qexpl[:fi]+"\n"+qexpl[fi:]
q.explications=qexpl[1:]
else :
q.question=bloc[0]
for choi in bloc[1:-1]:
q.choix[rep[0]]=choi
rep=rep[1:]
q.reponse=bloc[-1]
#cut the question in line of length 100
lk=len(q.question)/100
for k in range(lk):
fi=(k+1)*100
while(q.question[fi]!=' '):fi+=1
|
q.question=q.question[:fi]+"\n"+q.question[fi:]
didi[i]=q
return didi
###############
## GETTERS
###############
def getKeyfromInt(self,nb):
i=self.qcm.k
|
eys();i.sort()
return i[nb]
def getListfromKey(self,key):
return self.qcm[key].keys()
def getChapter(self,x,y):
return self.qcm[x][y]
|
mworks/mworks
|
examples/Tests/Stimulus/PythonImage/image_gen.py
|
Python
|
mit
| 1,385
| 0.000722
|
import gzip
import math
import numpy as np
class Renderer:
def reset(self):
self.fmt = getvar('pixel_buffer_format')
self.width = int(getvar('pixel_buffer_width'))
self.height = int(getvar('pixel_buffer_height'))
self.period = getvar('period')
with gzip.open('background.gz') as fp:
self.bg = np.frombuffer(fp.read(), dtype=np.uint8)
if self.fmt in ('RGB8', 'sRGB8'):
self.bg = self.bg[np.arange(self.bg.shape[0]) % 4 != 3]
self.bg = np.frombuffer(self.bg, dtype=np.dtype('B, B, B'))
elif self.fmt in ('RGBA8', 'sRGBA8'):
self.bg = np.frombuffer(self.bg, dtype=np.dtype('B, B, B, B'))
elif self.fmt == 'RGBA16F':
self.bg = self.bg.astype(np.float16)
self.bg /= 255.0
self.bg = np.frombuffer(self.bg, dtype=np.dtype('f2, f2, f2, f2'))
else:
raise RuntimeError('Invalid buffer format: %r' % self.fmt)
self.bg = self.bg.reshape((self.height, self.width))
def render(self):
elapsed_time = getvar('elapsed_time')
pos = 0.5 * (1.0 - math.cos(2.0 * math.pi * elapsed_time / self.period))
return np.roll(self.bg,
(round(self.height
|
* pos), round(self.width * pos
|
)),
(0, 1))
r = Renderer()
reset = r.reset
render = r.render
del r
|
J2897/WinSCP_Updater
|
Update_WinSCP.py
|
Python
|
gpl-3.0
| 4,220
| 0.030095
|
# Released under the GNU General Public License version 3 by J2897.
def get_page(page):
import urllib2
source = urllib2.urlopen(page)
return source.read()
title = 'WinSCP Updater'
target = 'Downloading WinSCP'
url = 'http://winscp.net/eng/download.php'
print 'Running: ' + title
print 'Target: ' + target
print 'URL: ' + url
try:
page = get_page(url)
except:
page = None
else:
print 'Got page...'
def msg_box(message, box_type):
import win32api
user_input = win32api.MessageBox(0, message, title, box_type)
return user_input
def stop():
import sys
sys.exit()
if page == None:
msg_box('Could not download the page. You may not be connected to the internet.', 0)
stop()
def find_site_ver(page):
T1 = page.find(target)
if T1 == -1:
return None, None
T2 = page.find('>WinSCP ', T1)
T3 = page.find('<', T2)
T4 = page.find('winscp', T3)
T5 = page.find('.exe', T4)
return page[T2+8:T3], page[T4:T5+4] # 5.1.5, winscp515setup.exe
try:
site_version, FN = find_site_ver(page)
except:
msg_box('Could not search the page.', 0)
stop()
else:
print 'Found: ' + site_version
if site_version == None:
msg_box('The search target has not been found on the page. The formatting, or the text on the page, may have been changed.', 0)
stop()
import os
tmp = os.getenv('TEMP')
PF = os.getenv('PROGRAMFILES')
WinSCP_exe = PF + '\\WinSCP\\WinSCP.exe'
DL = tmp + '\\' + FN
command = [DL, '/SILENT', '/NORESTART']
def DL_file():
import urllib
url = 'http://downloads.sourceforge.net/project/winscp/WinSCP/' + site_version + '/' + FN
urllib.urlretrieve(url, DL)
def sub_proc(command):
import subprocess
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
return p.returncode # is 0 if success
def download_install():
try:
DL_file()
except:
ms
|
g_box('Failed to download ' + FN + ' to ' + tmp + '.', 0)
stop()
else:
print 'Downloaded: ' + FN
try:
RC = sub_proc(command)
except:
RC = None
if RC == None:
msg_box('Failed to execute ' + FN + '.', 0)
stop()
elif RC == 0:
msg_box('Successfully updated to version ' + site_ver
|
sion + '.', 0)
stop()
else:
msg_box('Successfully spawned new process for ' + FN + '. But the installation appears to have failed.', 0)
stop()
# Check if the WinSCP.exe file exists...
if not os.path.isfile(WinSCP_exe):
# No: Download and install WinSCP, and quit.
print 'WinSCP.exe file doesn\'t exist.'
print 'Installing WinSCP for the first time...'
download_install()
print 'Ending...'
delay(5)
stop()
import win32api
try:
info = win32api.GetFileVersionInfo(WinSCP_exe, "\\")
ms = info['FileVersionMS']
ls = info['FileVersionLS']
file_version = "%d.%d.%d.%d" % (win32api.HIWORD(ms), win32api.LOWORD (ms),
win32api.HIWORD (ls), win32api.LOWORD (ls))
except:
msg_box('Cannot find the file version of the local WinSCP.exe file.', 0)
stop()
else:
print 'Got local file version information...'
# Check if the site_version numbers are in the file_version numbers...
def clean(text):
import re
return re.sub('[^0-9]', '', text)
clean_site_version = clean(site_version)
clean_file_version = clean(file_version)[:len(clean_site_version)]
print 'Local version: ' + clean_file_version
print 'Site version: ' + clean_site_version
def delay(sec):
import time
time.sleep(sec)
if clean_file_version.find(clean_site_version) != -1:
# Yes: Quit.
print 'Match!'
print 'Ending...'
delay(5)
stop()
# Check if WinSCP is running...
def find_proc(exe):
import subprocess
cmd = 'WMIC PROCESS get Caption'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in proc.stdout:
if line.find(exe) != -1:
return True
while find_proc('WinSCP.exe'):
print 'WinSCP is running. Close WinSCP now!'
user_input = msg_box('There is a new version of WinSCP available. Please close WinSCP and press OK to continue.', 1)
if user_input == 1:
pass
elif user_input == 2:
stop()
# Now download and install the new file...
user_input = msg_box('If you use a custom WinSCP.ini file, back it up now and then press OK when you are ready to proceed with the update.', 1)
if user_input == 2:
stop()
download_install()
print 'Ending...'
delay(5)
|
JiscPER/jper-oaipmh
|
setup.py
|
Python
|
apache-2.0
| 610
| 0.029508
|
from setuptools import setup, find_packages
setup(
name = 'jper-oaipmh',
versio
|
n = '1.0.0',
packages = find_packages(),
install_requires = [
"octopus==1.0.0",
"esprit",
"Flask"
],
url = 'http://cottagelabs.com/',
author = 'Cottage Labs',
author_email = 'us@cottagelabs.com',
description = 'OAI-PMH endpoint for JPER',
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Sof
|
tware Development :: Libraries :: Python Modules'
],
)
|
TeamUACS1/410Project
|
tests/testResponse.py
|
Python
|
apache-2.0
| 1,144
| 0.012238
|
# r
|
un python freetests.py while server is running
import urllib2
import unittest
BASEURL = "http://cmput410project15.herokuapp.com"
class TestYourWebserver(unittest.TestCase):
"""
Tests some the responses from the server with urllib2
"""
def setUp(self,baseurl=BASEURL):
self.baseurl = baseurl
def test_get_indexhtml(self):
"""Tests to see if we can get 200 OK from the mainpage"""
url = self.baseurl + "/main
|
"
req = urllib2.urlopen(url, None, 3)
self.assertTrue( req.getcode() == 200 , "200 OK Not FOUND!")
def test_get_404(self):
"""This tests for a 404 error for a page that dosent exist"""
url = self.baseurl + "/do-not-implement-this-page-it-is-not-found"
try:
req = urllib2.urlopen(url, None, 3)
self.assertTrue( False, "Should have thrown an HTTP Error!")
except urllib2.HTTPError as e:
self.assertTrue( e.getcode() == 404 , ("404 Not FOUND! %d" % e.getcode()))
else:
self.assertTrue( False, "Another Error was thrown!")
if __name__ == '__main__':
unittest.main()
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/core/tests/test_subs.py
|
Python
|
gpl-3.0
| 18,518
| 0.00054
|
from sympy import (Symbol, Wild, sin, cos, exp, sqrt, pi, Function, Derivative,
abc, Integer, Eq, symbols, Add, I, Float, log, Rational, Lambda, atan2,
cse, cot, tan, S, Tuple, Basic, Dict, Piecewise, oo, Mul)
from sympy.core.basic import _aresame
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y
def test_subs():
n3 = Rational(3)
e = x
e = e.subs(x, n3)
assert e == Rational(3)
e = 2*x
assert e == 2*x
e = e.subs(x, n3)
assert e == Rational(6)
def test_trigonometric():
n3 = Rational(3)
e = (sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e = e.subs(x, n3)
assert e == 2*cos(n3)*sin(n3)
e = (sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e = e.subs(sin(x), cos(x))
assert e == 2*cos(x)**2
assert exp(pi).subs(exp, sin) == 0
assert cos(exp(pi)).subs(exp, sin) == 1
i = Symbol('i', integer=True)
zoo = S.ComplexInfinity
assert tan(x).subs(x, pi/2) is zoo
assert cot(x).subs(x, pi) is zoo
assert cot(i*x).subs(x, pi) is zoo
assert tan(i*x).subs(x, pi/2) == tan(i*pi/2)
assert tan(i*x).subs(x, pi/2).subs(i, 1) is zoo
o = Symbol('o', odd=True)
assert tan(o*x).subs(x, pi/2) == tan(o*pi/2)
def test_powers():
assert sqrt(1 - sqrt(x)).subs(x, 4) == I
assert (sqrt(1 - x**2)**3).subs(x, 2) == - 3*I*sqrt(3)
assert (x**Rational(1, 3)).subs(x, 27) == 3
assert (x**Rational(1, 3)).subs(x, -27) == 3*(-1)**Rational(1, 3)
assert ((-x)**Rational(1, 3)).subs(x, 27) == 3*(-1)**Rational(1, 3)
n = Symbol('n', negative=True)
assert (x**n).subs(x, 0) is S.Infinity
assert exp(-1).subs(S.Exp1, 0) is S.Infinity
assert (x**(4.0*y)).subs(x**(2.0*y), n) == n**2.0
def test_logexppow(): # no eval()
x = Symbol('x', real=True)
w = Symbol('w')
e = (3**(1 + x) + 2**(1 + x))/(3**x + 2**x)
assert e.subs(2**x, w) != e
assert e.subs(exp(x*log(Rational(2))), w) != e
def test_bug():
x1 = Symbol('x1')
x2 = Symbol('x2')
y = x1*x2
assert y.subs(x1, Float(3.0)) == Float(3.0)*x2
def test_subbug1():
# see that they don't fail
(x**x).subs(x, 1)
(x**x).subs(x, 1.0)
def test_subbug2():
# Ensure this does not cause infinite recursion
assert Float(7.7).epsilon_eq(abs(x).subs(x, -7.7))
def test_dict_set():
a, b, c = map(Wild, 'abc')
f = 3*cos(4*x)
r = f.match(a*cos(b*x))
assert r == {a: 3, b: 4}
e = a/b*sin(b*x)
assert e.subs(r) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(r) == 3*sin(4*x) / 4
s = set(r.items())
assert e.subs(s) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(s) == 3*sin(4*x) / 4
assert e.subs(r) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(r) == 3*sin(4*x) / 4
assert x.subs(Dict((x, 1))) == 1
def test_dict_ambigous(): # see #467
y = Symbol('y')
z = Symbol('z')
f = x*exp(x)
g = z*exp(z)
df = {x: y, exp(x): y}
dg = {z: y, exp(z): y}
assert f.subs(df) == y**2
assert g.subs(dg) == y**2
# and this is how order can affect the result
assert f.subs(x, y).subs(exp(x), y) == y*exp(y)
assert f.subs(exp(x), y).subs(x, y) == y**2
# length of args and count_ops are the same so
# default_sort_key resolves ordering...if one
# doesn't want this result then an unordered
# sequence should not be used.
e = 1 + x*y
assert e.subs({x: y, y: 2}) == 5
# here, there are no obviously clashing keys or values
# but the results depend on the order
assert exp(x/2 + y).subs(dict([(exp(y + 1), 2), (x, 2)])) == exp(y + 1)
def test_deriv_sub_bug3():
y = Symbol('y')
f = Function('f')
pat = Derivative(f(x), x, x)
assert pat.subs(y, y**2) == Derivative(f(x), x, x)
assert pat.subs(y, y**2) != Derivative(f(x), x)
def test_equality_subs1():
f = Function('f')
x = abc.x
eq = Eq(f(x)**2, x)
res = Eq(Integer(16), x)
assert eq.subs(f(x), 4) == res
def test_equality_subs2():
f = Function('f')
x = abc.x
eq = Eq(f(x)**2, 16)
assert bool(eq.subs(f(x), 3)) is False
assert bool(eq.subs(f(x), 4)) is True
def test_issue643():
y = Symbol('y')
e = sqrt(x)*exp(y)
assert e.subs(sqrt(x), 1) == exp(y)
def test_subs_dict1():
x, y = symbols('x y')
assert (1 + x*y).subs(x, pi) == 1 + pi*y
assert (1 + x*y).subs({x: pi, y: 2}) == 1 + 2*pi
c2, c3, q1p, q2p, c1, s1, s2, s3 = symbols('c2 c3 q1p q2p c1 s1 s2 s3')
test = (c2**2*q2p*c3 + c1**2*s2**2*q2p*c3 + s1**2*s2**2*q2p*c3
- c1**2*q1p*c2*s3 - s1**2*q1p*c2*s3)
assert (test.subs({c1**2: 1 - s1**2, c2**2: 1 - s2**2, c3**3: 1 - s3**2})
== c3*q2p*(1 - s2**2) + c3*q2p*s2**2*(1 - s1**2)
- c2*q1p*s3*(1 - s1**2) + c3*q2p*s1**2*s2**2 - c2*q1p*s3*s1**2)
def test_mul():
x, y, z, a, b, c = symbols('x y z a b c')
A, B, C = symbols('A B C', commutative=0)
assert (x*y*z).subs(z*x, y) == y**2
assert (z*x).subs(1/x, z) == z*x
assert (x*y/z).subs(1/z, a) == a*x*y
assert (x*y/z).subs(x/z, a) == a*y
assert (x*y/z).subs(y/z, a) == a*x
assert (x*y/z).subs(x/z, 1/a) == y/a
assert (x*y/z).subs(x, 1/a) == y/(z*a)
assert (2*x*y).subs(5*x*y, z) != 2*z/5
assert (x*y*A).subs(x*y, a) == a*A
assert (x**2*y**(3*x/2)).subs(x*y**(x/2), 2) == 4*y**(x/2)
assert (x*exp(x*2)).subs(x*exp(x), 2) == 2*exp(x)
assert ((x**(2*y))**3).subs(x**y, 2) == 64
assert (x*A*B).subs(x*A, y) == y*B
assert (x*y*(1 + x)*(1 + x*y)).subs(x*y, 2) == 6*(1 + x)
assert ((1 + A*B)*A*B).subs(A*B, x*A*B)
assert (x*a/z).subs(x/z, A) == a*A
assert (x**3*A).subs(x**2*A, a) == a*x
assert (x**2*A*B).subs(x**2*B, a) == a*A
assert (x**2*A*B).subs(x**2*A, a) == a*B
assert (b*A**3/(a**3*c**3)).subs(a**4*c**3*A**3/b**4, z) == \
b*A**3/(a**3*c**3)
assert (6*x).subs(2*x, y) == 3*y
|
assert (y*exp(3*x/2)).subs(y*exp(x), 2) == 2*exp(x/2)
assert (y*exp(3*x/2)).subs(y*exp(x), 2) == 2*exp(x/2)
assert (A**2*B*A**2*B*A**2).subs(A*B*A, C) == A*C**2*A
assert (x*A**3).subs(x*A, y) == y*A**2
assert (x**2*A**3).subs(x*A, y) == y**2*A
assert (x*A**3).subs(x*A, B) == B*A**2
assert (x*A*B*A*exp(x*A*B)).subs(x*A, B) == B**2*A*exp(B*B)
assert (x**2*A*B*A*exp(x*A*B)).subs(x*A, B) == B**3*exp(B**2)
assert (x**3*A*exp(x*A*B)*A*exp(x*A*B)).subs(x*A, B) == \
|
x*B*exp(B**2)*B*exp(B**2)
assert (x*A*B*C*A*B).subs(x*A*B, C) == C**2*A*B
assert (-I*a*b).subs(a*b, 2) == -2*I
# issue 3262
assert (-8*I*a).subs(-2*a, 1) == 4*I
assert (-I*a).subs(-a, 1) == I
# issue 3342
assert (4*x**2).subs(2*x, y) == y**2
assert (2*4*x**2).subs(2*x, y) == 2*y**2
assert (-x**3/9).subs(-x/3, z) == -z**2*x
assert (-x**3/9).subs(x/3, z) == -z**2*x
assert (-2*x**3/9).subs(x/3, z) == -2*x*z**2
assert (-2*x**3/9).subs(-x/3, z) == -2*x*z**2
assert (-2*x**3/9).subs(-2*x, z) == z*x**2/9
assert (-2*x**3/9).subs(2*x, z) == -z*x**2/9
assert (2*(3*x/5/7)**2).subs(3*x/5, z) == 2*(S(1)/7)**2*z**2
assert (4*x).subs(-2*x, z) == 4*x # try keep subs literal
def test_subs_simple():
a = symbols('a', commutative=True)
x = symbols('x', commutative=False)
assert (2*a).subs(1, 3) == 2*a
assert (2*a).subs(2, 3) == 3*a
assert (2*a).subs(a, 3) == 6
assert sin(2).subs(1, 3) == sin(2)
assert sin(2).subs(2, 3) == sin(3)
assert sin(a).subs(a, 3) == sin(3)
assert (2*x).subs(1, 3) == 2*x
assert (2*x).subs(2, 3) == 3*x
assert (2*x).subs(x, 3) == 6
assert sin(x).subs(x, 3) == sin(3)
def test_subs_constants():
a, b = symbols('a b', commutative=True)
x, y = symbols('x y', commutative=False)
assert (a*b).subs(2*a, 1) == a*b
assert (1.5*a*b).subs(a, 1) == 1.5*b
assert (2*a*b).subs(2*a, 1) == b
assert (2*a*b).subs(4*a, 1) == 2*a*b
assert (x*y).subs(2*x, 1) == x*y
assert (1.5*x*y).subs(x, 1) == 1.5*y
assert (2*x*y).subs(2*x, 1) == y
assert (2*x*y).subs(4*x, 1) == 2*x*y
def test_subs_commutative():
a, b, c, d, K = symbols('a b c d K', commutative=True)
assert (a*b).subs(a*b, K) == K
assert (a*b*a*b).subs(a*b, K) == K**2
assert
|
jdwittenauer/ionyx
|
tests/averaging_regressor_test.py
|
Python
|
apache-2.0
| 1,583
| 0.00379
|
import pprint as pp
import p
|
andas as pd
from sklearn.linear_model import Ridge, Lasso
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import GridSearchCV, KFold, cross_val_score
from sklearn.svm import LinearSVR
from ionyx.contrib import AveragingRegressor
from ionyx.datasets import DataSetLoader
print('Beginning averaging regressor test...')
data, X, y = DataSetLoader.load_property_inspection()
data = data.iloc[:1000, :]
X = X[:1000, :]
y = y[:1000]
|
estimators = [('ridge', Ridge()), ('lasso', Lasso()), ('svm', LinearSVR())]
ensemble = AveragingRegressor(estimators, weights=[1.0, 1.5, 2.0])
ensemble.fit(X, y)
print('Estimators list:')
pp.pprint(ensemble.estimators_)
print('Named estimators dict:')
pp.pprint(ensemble.named_estimators_)
print('Model 1 score = {0}'.format(mean_absolute_error(y, ensemble.estimators_[0].predict(X))))
print('Model 2 score = {0}'.format(mean_absolute_error(y, ensemble.estimators_[1].predict(X))))
print('Model 3 score = {0}'.format(mean_absolute_error(y, ensemble.estimators_[2].predict(X))))
print('Ensemble score = {0}'.format(mean_absolute_error(y, ensemble.predict(X))))
cv = KFold()
print('Cross-validation score = {0}'.format(cross_val_score(ensemble, X, y, cv=cv)))
param_grid = [
{
'ridge__alpha': [0.01, 0.1]
}
]
grid = GridSearchCV(ensemble, param_grid=param_grid, cv=cv, return_train_score=True)
grid.fit(X, y)
results = pd.DataFrame(grid.cv_results_)
results = results.sort_values(by='mean_test_score', ascending=False)
print('Grid search results:')
print(results)
print('Done.')
|
girishramnani/pyappbase
|
tests/test_async.py
|
Python
|
mit
| 3,468
| 0.00346
|
import asyncio
import time
import unittest
from test_sync import setup
from pyappbase import Appbase
async def hello_world(d, data):
while d[0]:
await asyncio.sleep(0.1)
data.append("Hello")
class AsnycTests(unittest.TestCase):
def setUp(self):
self.data = {
"type": "Books",
"id": "X2",
}
self.appbase = setup(Appbase)
self.appbase._set_async()
self.sync_appbase = setup(Appbase)
print(self.sync_appbase.index({
"type": "Books",
"id": "X2",
"body": {
"department_id": 1,
"department_name": "Books",
"name": "A Fake Book on Network Routing",
"price": 5295
}
}))
def test_async_sync_ping_comparison(self):
"""
This test runs the sync and async methods 'call_counts' times and checks if the async is faster than
sync or not
:return:
"""
# number of simultaneous calls
call_counts = 4
t = time.time()
for i in range(call_counts):
print(self.sync_appbase.ping())
sync_difference = time.time() - t
print()
print("Syncronous method took ", sync_difference, "s")
async def get_data():
return await self.appbase.ping()
t = time.time()
loop = asyncio.get_event_loop()
async def get_data_gathered():
answer = await asyncio.gather(*[get_data() for _ in range(call_counts)], loop=loop)
return answer
print("".join(loop.run_until_complete(get_data_gathered())))
async_difference = time.time() - t
print("Asnycronous method took ", async_difference, "s")
|
print()
# the async is more fast
self.assertGreater(sync_difference, async_difference)
def test_async_two_methods(self):
"""
simple asynchronously running ping with an async hello_world coroutine
:return:
"""
# some thing multable
wait = [True]
data = []
asyncio.get_event_loop().create_task(hello_world(wait, data))
results = asyncio.get_event_loop().run_until_complete(self.appbase.ping())
|
wait[0] = False
async def temp():
await asyncio.sleep(1)
asyncio.get_event_loop().run_until_complete(temp())
print(results)
self.assertNotEquals(len(data), 0)
def test_async_get(self):
async def get_data():
return await self.appbase.get(self.data)
results = asyncio.get_event_loop().run_until_complete(get_data())
self.assertEqual(results["_source"]["name"], "A Fake Book on Network Routing")
def test_async_index(self):
async def index_data():
return await self.appbase.index({
"type": "Books",
"id": "X2",
"body": {
"department_id": 1,
"department_name": "Books",
"name": "A Fake Book on Distributed Compute",
"price": 5295
}
})
async def get_data():
return await self.appbase.get(self.data)
index = asyncio.get_event_loop().run_until_complete(index_data())
result = asyncio.get_event_loop().run_until_complete(get_data())
self.assertEqual(result["_source"]["name"], "A Fake Book on Distributed Compute")
|
tvgdb/pianissimo
|
backend/endpoints/library_clean_index_endpoint.py
|
Python
|
gpl-3.0
| 1,056
| 0.001894
|
from flask import current_app as app, request
from flask.ext.restful import Resource, abort
from tasks import library_tasks
from util import inject_user
class LibraryCleanIndexEndpoint(Resource):
@inject_user
def get(self):
task_id = request.args.get('task_id')
if not task_id:
abort(400)
task = library_tasks.clean_library.AsyncResult(task_id)
if task.info:
response = task.info.copy()
else:
response = task.result.copy()
if not response.get("state", False):
response["state"] = task.state
if response["total"] == 0:
progress = 0
else:
progress = int(float(response["processed"]) / response["total"] * 100)
response['progress'] = progress
response['finished'] = (response["state"] == "SUCCESS") or (response["processed"] == response["
|
total"])
return resp
|
onse
@inject_user
def post(self):
task = library_tasks.clean_library.delay()
return {"task_id": task.id}
|
Ryuno-Ki/BloGTK3
|
share/blogtk2/lib/blogtk2/main.py
|
Python
|
apache-2.0
| 26,085
| 0.028944
|
#!/usr/bin/python
# Copyright 2009 Jay Reding
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from xml.etree import ElementTree # for Python 2.5 users
except:
from elementtree import ElementTree
from gdata import service
import gdata
import atom
import os
import base64
import time
import threading
import gtk
import gtk.glade
import gnome.ui
import gobject
import webkit
import webbrowser
import feedparser
# Internal Libraries
import config
import editor
import firstrun
import mtapi
import bloggeratom
import atomapi
import metaweblog
import blogger
from blogtk2 import SHARED_FILES
def threaded(f):
def wrapper(*args):
t = threading.Thread(target=f, args=args)
t.start()
return wrapper
class BloGTK:
def __init__(self):
program = gnome.init('blogtk', '2.0')
self.glade = gtk.glade.XML(os.path.join(SHARED_FILES, 'glade', 'blogtk2.glade'))
self.winMain = self.glade.get_widget('winMain')
self.tvBlogs = self.glade.get_widget('tvBlogs')
self.tvPosts = self.glade.get_widget('tvPosts')
self.appbar = self.glade.get_widget('appbarMain')
self.swPreview = self.glade.get_widget('swPreview')
self.dlgAbout = self.glade.get_widget('dlgAbout')
# Create preview
self.html_Preview = webkit.WebView()
# We want to lock down our preview to prevent scripting and plugins
# from doing anything.
self.webkit_settings = webkit.WebSettings()
self.webkit_settings.set_property("enable-plugins", False)
self.webkit_settings.set_property("enable-scripts", False)
self.html_Preview.set_settings(self.webkit_settings)
self.setWelcome()
self.winMain.show()
# Main Window events
self.winMain.connect('delete_event', self.delete_event)
self.winMain.connect('destroy', self.destroy)
dic = { 'on_tbtnRefresh_clicked' : self.refreshPosts,
'on_tvPosts_row_activated' : self.sendPostToEditor,
'gtk_main_quit' : self.destroy,
'on_tbtnHome_clicked' : self.goToBlogHome,
'on_tbtnEdit_clicked' : self.sendPostToEditor,
'on_mniEditPost_activate' : self.sendPostToEditor,
'on_tbtnNew_clicked' : self.createNewPost,
'on_mniNewPost_activate' : self.createNewPost,
'on_tvBlogs_row_activated' : self.createNewPost,
'on_tbtnDelete_clicked' : self.deletePost,
'on_mniDeletePost_activate' : self.deletePost,
'on_tbtnAccounts_clicked' : self.initAccounts,
'on_mniPrefs_activate' : self.initAccounts,
'on_mniOffline_activate' : self.goOffline,
'on_mniDisplayAbout_activate' : self.displayAbout,
'on_mniMainQuit_activate' : self.closeMainWin,
'on_dlgAbout_close' : self.windowHider
}
self.glade.signal_autoconnect(dic)
self.tbtnRefresh = self.glade.get_widget('tbtnRefresh')
self.mniOffline = self.glade.get_widget('mniOffline')
# This needs to be replaced with a variable
self.homeDir = os.path.expanduser('~')
self.configDir = self.homeDir + "/.BloGTK"
self.firstrun = firstrun.BloGTKFirstRun(self)
self.firstrun.checkConfigStatus()
# For offline support, we need a flag to note whether we should go offline or not.
self.isOffline = False
# We need a change flag here to prevent the app from closing
# if the editor has unsaved changes.
self.changeFlag = False
# Create our accelerator group
self.accelGroup = gtk.AccelGroup()
self.winMain.add_accel_group(self.accelGroup)
self.addAccelerators()
def addAccelerators(self):
# Here is where we create our accelerators for various actions.
# Menubar actions
mniNewPost = self.glade.get_widget('mniNewPost')
key, mod = gtk.accelerator_parse("<Control>N")
mniNewPost.add_accelerator("activate", self.accelGroup, key, mod, gtk.ACCEL_VISIBLE)
mniEditPost = self.glade.get_widget('mniEditPost')
key, mod = gtk.accelerator_parse("<Control>E")
mniEditPost.add_accelerator("activate", self.accelGroup, key, mod, gtk.ACCEL_VISIBLE)
mniDeletePost = self.glade.get_widget('mniDeletePost')
key, mod = gtk.accelerator_parse("<Control>D")
mniDeletePost.add_accelerator("activate", self.accelGroup, key, mod, gtk.ACCEL_VISIBLE)
mniPrefs = self.glade.get_widget('mniPrefs')
key, mod = gtk.accelerator_parse("<Control>S")
mniPrefs.add_accelerator("activate", self.accelGroup, key, mod, gtk.ACCEL_VISIBLE)
mniDisplayAbout = self.glade.get_widget('mniDisplayAbout')
key, mod = gtk.accelerator_parse("<Control>A")
mniDisplayAbout.add_accelerator("activate", self.accelGroup, key, mod, gtk.ACCEL_VISIBLE)
# Toolbar actions
key, mod = gtk.accelerator_parse("F5")
self.tbtnRefresh.add_accelerator("clicked", self.accelGroup, key, mod, gtk.ACCEL_VISIBLE)
tbtnHome = self.glade.get_widget('tbtnHome')
key, mod = gtk.accelerator_parse("<Control>H")
tbtnHome.add_accelerator("clicked", self.accelGroup, key, mod, gtk.ACCEL_VISIBLE)
def initBlogList(self):
self.accountArray = self.configReader.getConfigArray()
self.model = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_STRING)
self.tvBlogs.set_model(self.model)
self.tvBlogs.set_headers_visible(True)
column = gtk.TreeViewColumn("Blog",gtk.CellRendererText(), text=0)
self.idColumn = gtk.TreeViewColumn("ID", gtk.CellRendererText(), text=0)
self.idColumn.set_visible(False)
column.set_resizable(True)
self.tvBlogs.append_column(column)
self.tvBlogs.append_column(self.idColumn)
self.tvBlogs.show()
for account in self.accountArray:
self.model.append(None, [account['name'], account['endpoint'] + '/' + account['id']])
# Now it's be a Calvinist and predestinate our default selection in the
# TreeView.
sel = self.tvBlogs.get_selection()
sel.set_mode(gtk.SELECTION_SINGLE)
sel.select_path(0)
self.tvBlogs.set_cursor(0, None, False )
self.tvBlogs.connect('cursor_changed', self.checkListing)
def initPostList(self):
self.postsModel = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING)
self.tvPosts.set_model(self.postsModel)
self.tvPosts.set_headers_visible(True)
renderer1 = gtk.CellRendererText()
renderer2 = gtk.CellRendererText()
self.postIdColumn = gtk.TreeViewColumn("Post ID",renderer1, text=0)
self.postIdColumn.set_resizable(True)
self.postTi
|
tleColumn = gtk.TreeViewColumn("Post Title", gtk.CellRendererText(), t
|
ext=1)
self.postTitleColumn.set_resizable(True)
self.postDateColumn = gtk.TreeViewColumn("Post Date", renderer2, text=2)
self.tvPosts.append_column(self.postIdColumn)
self.tvPosts.append_column(self.postTitleColumn)
self.tvPosts.append_column(self.postDateColumn)
# Now it's be a Calvinist and predestinate our default selection in the
# TreeView.
sel = self.tvPosts.get_selection()
sel.set_mode(gtk.SELECTION_SINGLE)
sel.select_path(0)
#self.tvPosts.set_cursor(0, None, False)
self.tvPosts.connect('cursor_changed', self.createPreview)
self.checkListing(None)
def initAccounts(self, widget, data=None):
config.ConfigGUI(self)
def main(self):
# Initialize our config reader class
self.configReader = config.ConfigReader()
# Initialize our config writer class
self.configWriter = config.ConfigWriter()
# Initialize the blog listing.
self.initBlogList()
# Initialize the post listing
self.initPostList()
self.tvBlogs.connect('cursor_changed', self.checkListing)
@threaded
def refreshPosts(self, widget):
selected_iter = self.tvBlogs.get_selection().get_selected()[1]
accountName = self.model.get_value(selected_iter, 0)
for account in self.accountArray:
if account['name'] == accountName:
thisAccount = account
self.appbar.push(_('Getting posts'))
# Make it so the user cannot hit Refresh again while refresh is cylcling.
self.tbtnRefresh.set_sensitive(False)
gtk.gdk.threads_enter()
self.t
|
minervaproject/django-db-mutex
|
settings.py
|
Python
|
mit
| 1,916
| 0.000522
|
import os
from django.conf import settings
def configure_settings():
"""
Configures settings for manage.py and for run_tests.py.
"""
if not settings.configured:
# Determine the database settings depending on if a test_db var is set in CI mode or not
test_db = os.environ.get('DB', None)
if test_db is None:
db_config = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'ambition_dev',
'USER': 'ambition_dev',
'PASSWORD': 'ambition_dev',
'HOST': 'localhost'
|
}
elif test_db == 'postgres':
db_config = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER':
|
'postgres',
'NAME': 'db_mutex',
}
elif test_db == 'sqlite':
db_config = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db_mutex',
}
else:
raise RuntimeError('Unsupported test DB {0}'.format(test_db))
settings.configure(
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
NOSE_ARGS=['--nocapture', '--nologcapture', '--verbosity=1'],
MIDDLEWARE_CLASSES=(),
DATABASES={
'default': db_config,
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'db_mutex',
'db_mutex.tests',
'django_nose',
),
DEBUG=False,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
}
}
)
|
lluxury/P_U_S_A
|
4_documentation/code/authentication_email.py
|
Python
|
mit
| 647
| 0.006299
|
#!/usr/bin/env python
import
|
smtplib
mail_server = 'smtp.example.com'
mail_server_port = 465
from_addr = 'foo@example.com'
to_addr = 'bar@exmaple.com'
from_header = 'From: %s\r\n' % from_addr
to_header = 'To: %s\r\n\r\n' % to_addr
subject_header = 'Subject: Testing SMTP Authentication'
body = 'This mail tests SMTP Authentication'
email_message = '%s\n%s\n%s\n\n%s' % (from_header, to_header, subject_header, body)
s = smtplib.SMTP(mail_server, mail_serve
|
r_port)
s.set_debuglevel(1)
s.starttls()
s.login("fatalbert", "mysecretpassword")
s.sendmail(from_addr, to_addr, email_message)
s.quit()
#set_debuglevel 日志级别
#starttls 启动ssl
|
ljschumacher/tierpsy-tracker
|
tierpsy/analysis/wcon_export/exportWCON.py
|
Python
|
mit
| 9,522
| 0.014073
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 20:55:19 2016
@author: ajaver
"""
import json
import os
from collections import OrderedDict
import zipfile
import numpy as np
import pandas as pd
import tables
from tierpsy.helper.misc import print_flush
from tierpsy.analysis.feat_create.obtainFeaturesHelper import WormStats
from tierpsy.helper.params import read_unit_conversions, read_ventral_side, read_fps
def getWCONMetaData(fname, READ_FEATURES=False, provenance_step='FEAT_CREATE'):
def _order_metadata(metadata_dict):
ordered_fields = ['strain', 'timestamp', 'gene', 'chromosome', 'allele',
'strain_description', 'sex', 'stage', 'ventral_side', 'media', 'arena', 'food',
'habituation', 'who', 'protocol', 'lab', 'software']
extra_fields = metadata_dict.keys() - set(ordered_fields)
ordered_fields += sorted(extra_fields)
ordered_metadata = OrderedDict()
for field in ordered_fields:
if field in metadata_dict:
ordered_metadata[field] = metadata_dict[field]
return ordered_metadata
with tables.File(fname, 'r') as fid:
if not '/experiment_info' in fid:
experiment_info = {}
else:
experiment_info = fid.get_node('/experiment_info').read()
experiment_info = json.loads(experiment_info.decode('utf-8'))
provenance_tracking = fid.get_node('/provenance_tracking/' + provenance_step).read()
provenance_tracking = json.loads(provenance_tracking.decode('utf-8'))
commit_hash = provenance_tracking['commit_hash']
if 'tierpsy' in commit_hash:
tierpsy_version = commit_hash['tierpsy']
else:
tierpsy_version = commit_hash['MWTracker']
MWTracker_ver = {"name":"tierpsy (https://github.com/ver228/tierpsy-tracker)",
"version": tierpsy_version,
"featureID":"@OMG"}
if not READ_FEATURES:
experiment_info["software"] = MWTracker_ver
else:
#add open_worm_analysis_toolbox info and save as a list of "softwares"
open_worm_ver = {"name":"open_worm_analysis_toolbox (https://github.com/openworm/open-worm-analysis-toolbox)",
"version":commit_hash['open_worm_analysis_toolbox'],
"featureID":""}
experiment_info["software"] = [MWTracker_ver, open_worm_ver]
return _order_metadata(experiment_info)
def __reformatForJson(A):
if isinstance(A, (int, float)):
return A
good = ~np.isnan(A) & (A != 0)
dd = A[good]
if dd.size > 0:
dd = np.abs(np.floor(np.log10(np.abs(dd)))-2)
precision = max(2, int(np.min(dd)))
A = np.round(A.astype(np.float64), precision)
A = np.where(np.isnan(A), None, A)
#wcon specification require to return a single number if it is only one element list
if A.size == 1:
return A[0]
else:
return A.tolist()
def __addOMGFeat(fid, worm_feat_time, worm_id):
worm_features = OrderedDict()
#add time series features
for col_name, col_dat in worm_feat_time.iteritems():
if not col_name in ['worm_index', 'timestamp']:
worm_features[col_name] = col_dat.values
worm_path = '/features_events/worm_%i' % worm_id
worm_node = fid.get_node(worm_path)
#add event features
for feature_name in worm_node._v_children:
feature_path = worm_path + '/' + feature_name
worm_features[feature_name] = fid.get_node(feature_path)[:]
return worm_features
def _get_ventral_side(features_file):
ventral_side = read_ventral_side(features_file)
if not ventral_side or ventral_side == 'unknown':
ventral_type = '?'
else:
#we will merge the ventral and dorsal contours so the ventral contour is clockwise
ventral_type='CW'
return ventral_type
def _getData(features_file, READ_FEATURES=False, IS_FOR_WCON=True):
if IS_FOR_WCON:
lab_prefix = '@OMG '
else:
lab_prefix = ''
with pd.HDFStore(features_file, 'r') as fid:
if not '/features_timeseries' in fid:
return {} #empty file nothing to do here
features_timeseries = fid['/features_timeseries']
feat_time_group_by_worm = features_timeseries.groupby('worm_index');
ventral_side = _get_ventral_side(features_file)
with tables.File(features_file, 'r') as fid:
#fps used to adjust timestamp to real ti
|
me
fps = read_fps(features_file)
#get pointers to some usefu
|
l data
skeletons = fid.get_node('/coordinates/skeletons')
dorsal_contours = fid.get_node('/coordinates/dorsal_contours')
ventral_contours = fid.get_node('/coordinates/ventral_contours')
#let's append the data of each individual worm as a element in a list
all_worms_feats = []
#group by iterator will return sorted worm indexes
for worm_id, worm_feat_time in feat_time_group_by_worm:
worm_id = int(worm_id)
#read worm skeletons data
worm_skel = skeletons[worm_feat_time.index]
worm_dor_cnt = dorsal_contours[worm_feat_time.index]
worm_ven_cnt = ventral_contours[worm_feat_time.index]
#start ordered dictionary with the basic features
worm_basic = OrderedDict()
worm_basic['id'] = str(worm_id)
worm_basic['head'] = 'L'
worm_basic['ventral'] = ventral_side
worm_basic['ptail'] = worm_ven_cnt.shape[1]-1 #index starting with 0
worm_basic['t'] = worm_feat_time['timestamp'].values/fps #convert from frames to seconds
worm_basic['x'] = worm_skel[:, :, 0]
worm_basic['y'] = worm_skel[:, :, 1]
contour = np.hstack((worm_ven_cnt, worm_dor_cnt[:, ::-1, :]))
worm_basic['px'] = contour[:, :, 0]
worm_basic['py'] = contour[:, :, 1]
if READ_FEATURES:
worm_features = __addOMGFeat(fid, worm_feat_time, worm_id)
for feat in worm_features:
worm_basic[lab_prefix + feat] = worm_features[feat]
if IS_FOR_WCON:
for x in worm_basic:
if not x in ['id', 'head', 'ventral', 'ptail']:
worm_basic[x] = __reformatForJson(worm_basic[x])
#append features
all_worms_feats.append(worm_basic)
return all_worms_feats
def _getUnits(features_file, READ_FEATURES=False):
fps_out, microns_per_pixel_out, _ = read_unit_conversions(features_file)
xy_units = microns_per_pixel_out[1]
time_units = fps_out[2]
units = OrderedDict()
units["size"] = "mm" #size of the plate
units['t'] = time_units #frames or seconds
for field in ['x', 'y', 'px', 'py']:
units[field] = xy_units #(pixels or micrometers)
if READ_FEATURES:
#TODO how to change microns to pixels when required
ws = WormStats()
for field, unit in ws.features_info['units'].iteritems():
units['@OMG ' + field] = unit
return units
def exportWCONdict(features_file, READ_FEATURES=False):
metadata = getWCONMetaData(features_file, READ_FEATURES)
data = _getData(features_file, READ_FEATURES)
units = _getUnits(features_file, READ_FEATURES)
#units = {x:units[x].replace('degrees', '1') for x in units}
#units = {x:units[x].replace('radians', '1') for x in units}
wcon_dict = OrderedDict()
wcon_dict['metadata'] = metadata
wcon_dict['units'] = units
wcon_dict['data'] = data
return wcon_dict
def getWCOName(features_file):
return features_file.replace('_features.hdf5', '.wcon.zip')
def exportWCON(features_file, READ_FEATURES=False):
base_name = os.path.basename(features_file).replace('_features.hdf5', '')
print_flush("{} Expo
|
hydroffice/hyo_soundspeed
|
hyo2/soundspeed/formats/readers/simrad.py
|
Python
|
lgpl-2.1
| 8,879
| 0.003379
|
import logging
import re
import numpy
logger = logging.getLogger(__name__)
from hyo2.soundspeed.formats.readers.abstract import AbstractTextReader
from hyo2.soundspeed.profile.dicts import Dicts
from hyo2.soundspeed.base.callbacks.cli_callbacks import CliCallbacks
from hyo2.soundspeed.temp import coordinates
from hyo2.soundspeed.temp.regex_helpers import Profile, parseNumbers, getMetaFromTimeRE
# Identifier Input data Data to be used Comment
ssp_fmts_doc = '''
S00 D,c D,c
S01 D,c,T,S D,c,a(D,T,S,L) Same as S12, but used immediately.
S02 D,T,S D,c(D,T,S,L),a(D,T,S,L) Same as S22, but used immediately.
S03 D,T,C D,c(D,T,C,L),a(D,T,S,L) Same as S32,but used immediately.
S04 P,T,S D(P,T,S,L),c(P,T,S,L),a(P,T,S,L) Same as S42,but used immediately.
S05 P,T,C D(P,T,C,L),c(P,T,C,L),a(P,T,C,L) Same as S52,but used immediately.
S06 D,c,a D,c,a Same as S11,but used immediately.
S10 D,c D,c
S11 D,c,a D,c,a
S12 D,c,T,S D,c,a(D,T,S,L)
S13 D,c,a,f D,c,a Frequency dependent
S20 D,T,S D,c(D,T,S,L)
S21 D,T,S,a D,c(D,T,S,L),a
S22 D,T,S D,c(D,T,S,L),a(D,T,S,L)
S23 D,T,S,a,f D,c(D,T,S,L),a Frequency dependent
S30 D,T,C D,c(D,T,S,L)
S31 D,T,C,a D,c(D,T,S,L),a
S32 D,T,C D,c(D,T,S,L),a(D,T,S,L)
S33 D,T,C,a,f D,c(D,T,S,L),a Frequency dependent
S40 P,T,S D(P,T,S,L),c(P,T,S,L)
S41 P,T,S,a D(P,T,S,L),c(P,T,S,L),a
S42 P,T,S D(P,T,S,L),c(P,T,S,L),a(P,T,S,L)
S43 P,T,S,a,f D(P,T,S,L),c(P,T,S,L),a Frequency dependent
S50 P,T,C D(P,T,C,L),c(P,T,C,L)
S51 P,T,C,a D(P,T,C,L),c(P,T,C,L),a
S52 P,T,C D(P,T,C,L),c(P,T,C,L),a(P,T,C,L)
S53 P,T,C,a,f D(P,T,C,L),c(P,T,C,L),a Frequency dependent
'''
SSP_Formats = {}
for fmt in ssp_fmts_doc.splitlines():
m = re.match(r'\s*(?P<fmt>S\d\d)\s*(?P<fields>[\w,]*)\s', fmt)
if m:
SSP_Formats[m.group('fmt')] = [
t.replace('a', 'absorption').replace('c', 'soundspeed').replace('f', 'frequency').replace('D',
'depth').replace(
'T', 'temperature').replace('S', 'salinity') for t in m.group('fields').split(',')]
class Simrad(AbstractTextReader):
"""Simrad reader -> CTD style
"""
def __init__(self):
super(Simrad, self).__init__()
self.desc = "Simrad"
self._ext.add('ssp')
self._ext.add('s??')
def read(self, data_path, settings, callbacks=CliCallbacks(), progress=None):
logger.debug('*** %s ***: start' % self.driver)
self.s = settings
self.cb = callbacks
self.init_data() # create a new empty profile list
self._read(data_path=data_path)
self._parse_header()
self._parse_body()
# initialize probe/sensor type
self.ssp.cur.meta.sensor_type = Dicts.sensor_types['CTD']
self.ssp.cur.meta.probe_type = Dicts.probe_types['Simrad']
self.fix()
self.finalize()
logger.debug('*** %s ***: done' % self.driver)
return True
def _parse_header(self):
meta = {}
m = re.search(r'''\$[A-Z][A-Z](?P<fmt>S\d\d), #fmt is between 00 and 53
(?P<id>\d+),
(?P<nmeasure>\d+),
(?P<hour>\d\d)(?P<minute>\d\d)(?P<second>\d\d),
(?P<day>\d\d),
(?P<mon>\d\d),
(?P<yr>\d+),
''', self.lines[0], re.VERBOSE) # ignoring the optional fields of first line
if m:
meta.update(getMetaFromTimeRE(m))
meta['DataSetID'] = m.group('id')
meta['Format'] = "SSP " + m.group('fmt')
meta['fmt'] = m.group('fmt')
m = re.search(r'''(?P<lat>[\d.]+,[NS]),
(?P<lon>[\d.]+,[EW]),
''', self.lines[1], re.VERBOSE) # try the optional second line
if not m:
m = re.search(r'''(?P<lat>[\d.]+,[NS]),
|
(?P<lon>[\d.]+,[EW]),
''', self.lines[-1], re.VERBOSE) # try at the end of file
if m:
location = coordinates.Coordinate(m.group('lat'), m.group('lon'))
meta.update(Profile.getMetaFromCoord(location))
meta['filename'] = self.fid._path
self.rawmeta = meta
def _parse_body(self):
"""
' Simrad SSP format (See EM Series 1002 Operator Manual for details):
' Start ID,
|
$ item 1
' Talker ID AA
' Datagram ID, S10,
' Data Set ID, xxxxx, item 2
' Number of measurements, xxxx, item 3
' UTC time of data acquisition, hhmmss, item 4
' Day of data acquisition, xx, item 5
' Month of data acquisition, xx, item 6
' Year of data acquisition, xxxx, item 7
' First good depth in m x.x,
' Corresponding Sound Velocity in m/s, x.x,
' Skip temp, sal, absorption coeff fields ,,,
' End of line CRLF
' Then, repeat good depth, sv,,,,CRLF until all NPTS are listed.
From the Simrad Datagrams docs:
Data Description Format Length Valid range Note
Start identifier = $ Always 24h 1
Talker identifier aa 2 Capital letters
Datagram identifier Always Sxx, 4 S00to S53 1,2
Data set identifier xxxxx, 6 00000 to 65535
Number of measurements = N xxxx, 5 0001 to 9999 9
UTC time of data acquisition hhmmss, 7 000000 to 235959 3
Day of data acquisition xx, 3 00 to 31 3
Month of data acquisition xx, 3 00 to 12 3
Year of data acquisition xxxx, 5 0000 to 9999 3
N entries of the next 5 fields See note 4
Depth in m fromwater level or
Pressure in MPa
x.x, 2 0 to 12000.00 0 to
1.0000
4
Sound velocity in m/s x.x, 1 1400 to 1700.00
Temperature in _C x.x, 1 5 to 45.00
Salinity in parts per thousand or
Conductivity in S/m
x.x, 1 0 to 45.00 0 to 7.000
Absorption coefficient in dB/km x.x 0 0 to 200.00 5
Data set delimiter CRLF 2 0Dh 0Ah
End of repeat cycle
Latitude in degrees and minutes, plus
optional decimal minutes
llll.ll, Variable 5 0000 to 9000.0... 6
Latitude N/S a, 2 N or S 6
Longitude in degrees and minutes, plus
optional decimal minutes
yyyyy.yy, Variable 6 00000 to 18000.0... 6
Longitude E/W a, 2 Eor W 6
Atmospheric pressure in MPa x.x, 1 0 to 1.0000 6
Frequency in Hz xxxxxx, Variable 7
User given comments c c Variable 6
Optional checksum *hh 8
End of datagram delimiter = \CRLF 5Ch 0Dh 0Ah 3
Note:
1 The datagram identifier identifies what type of data is
included. This is shown in the following table where D is
depth, P is pressure, S is salinity, C is conductivity, c is
soundspeed, 'a' is absorption coefficient, f is frequency and
L is latitude. The notation c(T,S) indicates for example that
the soundspeed is to be calculated from the temperature and
salinity input data. When pressure is used, the atmospheric
pressure m
|
thedrow/invoke
|
invoke/parser/context.py
|
Python
|
bsd-2-clause
| 7,616
| 0.000919
|
from ..vendor.lexicon import Lexicon
from .argument import Argument
def to_flag(name):
name = name.replace('_', '-')
if len(name) == 1:
return '-' + name
return '--' + name
def sort_candidate(arg):
names = arg.names
# TODO: is there no "split into two buckets on predicate" builtin?
shorts = set(x for x in names if len(x.strip('-')) == 1)
longs = set(x for x in names if x not in shorts)
return sorted(shorts if shorts else longs)[0]
def flag_key(x):
"""
Obtain useful key list-of-ints for sorting CLI flags.
"""
# Setup
ret = []
x = sort_candidate(x)
# Long-style flags win over short-style ones, so the first item of
# comparison is simply whether the flag is a single character long (with
# non-length-1 flags coming "first" [lower number])
ret.append(1 if len(x) == 1 else 0)
# Next item of comparison is simply the strings themselves,
# case-insensitive. They will compare alphabetically if compared at this
# stage.
ret.append(x.lower())
# Finally, if the case-insensitive test also matched, compare
# case-sensitive, but inverse (with lowercase letters coming first)
inversed = ''
for char in x:
inversed += char.lower() if char.isupper() else char.upper()
ret.append(inversed)
return ret
class Context(object):
"""
Parsing context with knowledge of flags & their format.
Generally associated with the core program or a task.
When run through a parser, will also hold runtime values filled in by the
parser.
"""
def __init__(self, name=None, aliases=(), args=()):
"""
Create a new ``Context`` named ``name``, with ``aliases``.
``name`` is optional, and should be a string if given. It's used to
tell Context objects apart, and for use in a Parser when determining
what chunk of input might belong to a given Context.
``aliases`` is also optional and should be an iterable containing
strings. Parsing will honor any aliases when trying to "find" a given
context in its input.
May give one or more ``args``, which is a quick alternative to calling
``for arg in args: self.add_arg(arg)`` after initialization.
"""
self.args = Lexicon()
self.positional_args = []
self.flags = Lexicon()
self.inverse_flags = {} # No need for Lexicon here
self.name = name
self.aliases = aliases
for arg in args:
self.add_arg(arg)
def __str__(self):
aliases = (" (%s)" % ', '.join(self.aliases)) if self.aliases else ""
name = (" %r%s" % (self.name, aliases)) if self.name else ""
args = (": %r" % (self.args,)) if self.args else ""
return "<Context%s%s>" % (name, args)
def __repr__(self):
return str(self)
def add_arg(self, *args, **kwargs):
"""
Adds given ``Argument`` (or constructor args for one) to this context.
The Argument in question is added to the following dict attributes:
* ``args``: "normal" access, i.e. the given names are directly exposed
as keys.
* ``flags``: "flaglike" access, i.e. the given names are translated
into CLI flags, e.g. ``"foo"`` is accessible via ``flags['--foo']``.
* ``inverse_flags``: similar to ``flags`` but containing only the
"inverse" versions of boolean flags which default to True. This
allows the parser to track e.g. ``--no-myflag`` and turn it into a
False value for the ``myflag`` Argument.
"""
# Normalize
if len(args) == 1 and isinstance(args[0], Argument):
arg = args[0]
else:
arg = Argument(*args, **kwargs)
# Uniqueness constraint: no name collisions
for name in arg.names:
if name in self.args:
msg = "Tried to add an argument named %r but one already exists!"
raise ValueError(msg % name)
# First name used as "main" name for purposes of aliasing
main = arg.names[0] # NOT arg.name
self.args[main] = arg
# Note positionals in distinct, ordered list attribute
if arg.positional:
self.positional_args.append(arg)
# Add names & nicknames to flags, args
self.flags[to_flag(main)] = arg
for name in arg.nicknames:
self.args.alias(name, to=main)
self.flags.alias(to_flag(name), to=to_flag(main))
# Add attr_name to args, but not flags
if arg.attr_name:
self.args.alias(arg.attr_name, to=main)
# Add to inverse_flags if required
if arg.kind == bool and arg.default == True:
# Invert the 'main' flag name here, which will be a dashed version
# of the primary argument name if underscore-to-dash transformation
# occurred.
inverse_name = to_flag("no-%s" % main)
self.inverse_flags[i
|
nverse_name] = to_flag(main)
@property
def needs_positional_arg(se
|
lf):
return any(x.value is None for x in self.positional_args)
def help_for(self, flag):
"""
Return 2-tuple of ``(flag-spec, help-string)`` for given ``flag``.
"""
# Obtain arg obj
if flag not in self.flags:
raise ValueError("%r is not a valid flag for this context! Valid flags are: %r" % (flag, self.flags.keys()))
arg = self.flags[flag]
# Show all potential names for this flag in the output
names = list(set([flag] + self.flags.aliases_of(flag)))
# Determine expected value type, if any
value = {
str: 'STRING',
}.get(arg.kind)
# Format & go
full_names = []
for name in names:
if value:
# Short flags are -f VAL, long are --foo=VAL
# When optional, also, -f [VAL] and --foo[=VAL]
if len(name.strip('-')) == 1:
value_ = ("[%s]" % value) if arg.optional else value
valuestr = " %s" % value_
else:
valuestr = "=%s" % value
if arg.optional:
valuestr = "[%s]" % valuestr
else:
valuestr = ""
# Tack together
full_names.append(name + valuestr)
namestr = ", ".join(sorted(full_names, key=len))
helpstr = arg.help or ""
return namestr, helpstr
def help_tuples(self):
"""
Return sorted iterable of help tuples for all member Arguments.
Sorts like so:
* General sort is alphanumerically
* Short flags win over long flags
* Arguments with *only* long flags and *no* short flags will come
first.
* When an Argument has multiple long or short flags, it will sort using
the most favorable (lowest alphabetically) candidate.
This will result in a help list like so::
--alpha, --zeta # 'alpha' wins
--beta
-a, --query # short flag wins
-b, --argh
-c
"""
# TODO: argument/flag API must change :(
# having to call to_flag on 1st name of an Argument is just dumb.
# To pass in an Argument object to help_for may require moderate
# changes?
# Cast to list to ensure non-generator on Python 3.
return list(map(
lambda x: self.help_for(to_flag(x.name)),
sorted(self.flags.values(), key=flag_key)
))
|
madelynfreed/rlundo
|
venv/lib/python2.7/site-packages/IPython/html/services/contents/filecheckpoints.py
|
Python
|
gpl-3.0
| 6,869
| 0
|
"""
File-based Checkpoints implementations.
"""
import os
import shutil
from tornado.web import HTTPError
from .checkpoints import (
Checkpoints,
GenericCheckpointsMixin,
)
from .fileio import FileManagerMixin
from IPython.utils import tz
from IPython.utils.path import ensure_dir_exists
from IPython.utils.py3compat import getcwd
from IPython.utils.traitlets import Unicode
class FileCheckpoints(FileManagerMixin, Checkpoints):
"""
A Checkpoints that caches checkpoints for files in adjacent
directories.
Only works with FileContentsManager. Use GenericFileCheckpoints if
you want file-based checkpoints with another ContentsManager.
"""
checkpoint_dir = Unicode(
'.ipynb_checkpoints',
config=True,
help="""The directory name in which to keep file checkpoints
This is a path relative to the file's own directory.
By default, it is .ipynb_checkpoints
""",
)
root_dir = Unicode(config=True)
def _root_dir_def
|
ault(self):
try:
return self.parent.root_dir
except AttributeError
|
:
return getcwd()
# ContentsManager-dependent checkpoint API
def create_checkpoint(self, contents_mgr, path):
"""Create a checkpoint."""
checkpoint_id = u'checkpoint'
src_path = contents_mgr._get_os_path(path)
dest_path = self.checkpoint_path(checkpoint_id, path)
self._copy(src_path, dest_path)
return self.checkpoint_model(checkpoint_id, dest_path)
def restore_checkpoint(self, contents_mgr, checkpoint_id, path):
"""Restore a checkpoint."""
src_path = self.checkpoint_path(checkpoint_id, path)
dest_path = contents_mgr._get_os_path(path)
self._copy(src_path, dest_path)
# ContentsManager-independent checkpoint API
def rename_checkpoint(self, checkpoint_id, old_path, new_path):
"""Rename a checkpoint from old_path to new_path."""
old_cp_path = self.checkpoint_path(checkpoint_id, old_path)
new_cp_path = self.checkpoint_path(checkpoint_id, new_path)
if os.path.isfile(old_cp_path):
self.log.debug(
"Renaming checkpoint %s -> %s",
old_cp_path,
new_cp_path,
)
with self.perm_to_403():
shutil.move(old_cp_path, new_cp_path)
def delete_checkpoint(self, checkpoint_id, path):
"""delete a file's checkpoint"""
path = path.strip('/')
cp_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(cp_path):
self.no_such_checkpoint(path, checkpoint_id)
self.log.debug("unlinking %s", cp_path)
with self.perm_to_403():
os.unlink(cp_path)
def list_checkpoints(self, path):
"""list the checkpoints for a given file
This contents manager currently only supports one checkpoint per file.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_path):
return []
else:
return [self.checkpoint_model(checkpoint_id, os_path)]
# Checkpoint-related utilities
def checkpoint_path(self, checkpoint_id, path):
"""find the path to a checkpoint"""
path = path.strip('/')
parent, name = ('/' + path).rsplit('/', 1)
parent = parent.strip('/')
basename, ext = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=ext,
)
os_path = self._get_os_path(path=parent)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
with self.perm_to_403():
ensure_dir_exists(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def checkpoint_model(self, checkpoint_id, os_path):
"""construct the info dict for a given checkpoint"""
stats = os.stat(os_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id=checkpoint_id,
last_modified=last_modified,
)
return info
# Error Handling
def no_such_checkpoint(self, path, checkpoint_id):
raise HTTPError(
404,
u'Checkpoint does not exist: %s@%s' % (path, checkpoint_id)
)
class GenericFileCheckpoints(GenericCheckpointsMixin, FileCheckpoints):
"""
Local filesystem Checkpoints that works with any conforming
ContentsManager.
"""
def create_file_checkpoint(self, content, format, path):
"""Create a checkpoint from the current content of a notebook."""
path = path.strip('/')
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint for %s", path)
with self.perm_to_403():
self._save_file(os_checkpoint_path, content, format=format)
# return the checkpoint info
return self.checkpoint_model(checkpoint_id, os_checkpoint_path)
def create_notebook_checkpoint(self, nb, path):
"""Create a checkpoint from the current content of a notebook."""
path = path.strip('/')
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint for %s", path)
with self.perm_to_403():
self._save_notebook(os_checkpoint_path, nb)
# return the checkpoint info
return self.checkpoint_model(checkpoint_id, os_checkpoint_path)
def get_notebook_checkpoint(self, checkpoint_id, path):
path = path.strip('/')
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_checkpoint_path):
self.no_such_checkpoint(path, checkpoint_id)
return {
'type': 'notebook',
'content': self._read_notebook(
os_checkpoint_path,
as_version=4,
),
}
def get_file_checkpoint(self, checkpoint_id, path):
path = path.strip('/')
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_checkpoint_path):
self.no_such_checkpoint(path, checkpoint_id)
content, format = self._read_file(os_checkpoint_path, format=None)
return {
'type': 'file',
'content': content,
'format': format,
}
|
art-solopov/mdwiki
|
common/admin.py
|
Python
|
mit
| 369
| 0.01626
|
from django.contrib i
|
mport admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
class UserAdmin(BaseUserAdmin):
list_display = ('username', 'email',
'first_name', 'last_name',
'is_staff', 'is_active'
|
,
)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
|
agile-geoscience/striplog
|
striplog/legend.py
|
Python
|
apache-2.0
| 30,661
| 0.000457
|
"""
Defines a legend for displaying components.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
# from builtins import object
from io import StringIO
import csv
import warnings
import random
import re
import itertools
try:
from functools import partialmethod
except: # Python 2
from utils import partialmethod
import numpy as np
from ma
|
tplotlib import patches
import matplotlib.p
|
yplot as plt
from .component import Component
from . import utils
from .defaults import LEGEND__NSDOE
from .defaults import LEGEND__Canstrat
from .defaults import LEGEND__NAGMDM__6_2
from .defaults import LEGEND__NAGMDM__6_1
from .defaults import LEGEND__NAGMDM__4_3
from .defaults import LEGEND__SGMC
from .defaults import TIMESCALE__ISC
from .defaults import TIMESCALE__USGS_ISC
from .defaults import TIMESCALE__DNAG
###############################################
# This module is not used directly, but must
# be imported in order to register new hatches.
from . import hatches # DO NOT DELETE
###############################################
class LegendError(Exception):
"""
Generic error class.
"""
pass
class Decor(object):
"""
A single display style. A Decor describes how to display a given set
of Component properties.
In general, you will not usually use a Decor on its own. Instead, you
will want to use a Legend, which is just a list of Decors, and leave
the Decors to the Legend.
Args:
params (dict): The parameters you want in the Decor. There must be a
Component to attach the decoration to, and at least 1 other attribute.
It's completely up to you, but you probably want at least a colour
(hex names like #AAA or #d3d3d3, or matplotlib's English-language
names listed at http://ageo.co/modelrcolour are acceptable.
The only other parameter the class recognizes for now is 'width',
which is the width of the striplog element.
Example:
my_rock = Component({ ... })
d = {'component': my_rock, 'colour': 'red'}
my_decor = Decor(d)
"""
def __init__(self, *params, **kwargs):
"""
Supports the passing in of a single dictionary, or the passing of
keyword arguments.
Possibly a bad idea; review later.
"""
for p in params:
params = p
for k, v in kwargs.items() or params.items():
k = k.lower().replace(' ', '_')
if k in ['colour', 'color']:
k = 'colour'
if not v:
v = '#eeeeee'
try:
v = v.lower()
except AttributeError:
v = v
setattr(self, k, v)
if (getattr(self, 'component', None) is None) and (getattr(self, 'curve', None) is None):
raise LegendError("You must provide a Component to decorate.")
if len(self.__dict__) < 2:
raise LegendError("You must provide at least one decoration.")
# Make sure we have a width, and it's a float, even if it's None.
try:
self.width = float(getattr(self, 'width', None))
except (TypeError, ValueError):
self.width = None
# Make sure we have a hatch, even if it's None. And correct 'none's.
self.hatch = getattr(self, 'hatch', None)
if self.hatch == 'none':
self.hatch = None
def __repr__(self):
s = repr(self.__dict__)
return "Decor({0})".format(s)
def __str__(self):
s = str(self.__dict__)
return "Decor({0})".format(s)
def __add__(self, other):
if isinstance(other, self.__class__):
result = [self, other]
return Legend(result)
elif isinstance(other, Legend):
return other + self
else:
raise LegendError("You can only add legends or decors.")
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# Weed out empty elements
s = {k: v for k, v in self.__dict__.items() if v}
o = {k: v for k, v in other.__dict__.items() if v}
# Compare
if s == o:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
# If we define __eq__ we also need __hash__ otherwise the object
# becomes unhashable. All this does is hash the frozenset of the
# keys. (You can only hash immutables.)
def __hash__(self):
return hash(frozenset(self.__dict__.keys()))
def _repr_html_(self):
"""
Jupyter Notebook magic repr function.
"""
rows, c = '', ''
s = '<tr><td><strong>{k}</strong></td><td style="{stl}">{v}</td></tr>'
for k, v in self.__dict__.items():
if k == '_colour':
k = 'colour'
c = utils.text_colour_for_hex(v)
style = 'color:{}; background-color:{}'.format(c, v)
else:
style = 'color:black; background-color:white'
if k == 'component':
try:
v = v._repr_html_()
except AttributeError:
v = v.__repr__()
rows += s.format(k=k, v=v, stl=style)
html = '<table>{}</table>'.format(rows)
return html
def _repr_html_row_(self, keys):
"""
Jupyter Notebook magic repr function as a row – used by
``Legend._repr_html_()``.
"""
tr, th, c = '', '', ''
r = '<td style="{stl}">{v}</td>'
h = '<th>{k}</th>'
for k in keys:
v = self.__dict__.get(k)
if k == '_colour':
k = 'colour'
c = utils.text_colour_for_hex(v)
style = 'color:{}; background-color:{}'.format(c, v)
else:
style = 'color:black; background-color:white'
if k == 'component':
try:
v = v._repr_html_()
except AttributeError:
v = v.__repr__()
tr += r.format(v=v, stl=style)
th += h.format(k=k)
return th, tr
@property
def colour(self):
return self._colour
@colour.setter
def colour(self, c):
numbers = r'([\.0-9]+), ?([\.0-9]+), ?([\.0-9]+)'
pattern = re.compile(r'[\(\[]?' + numbers + r'[\)\]]?')
try:
x = pattern.search(c)
except:
x = None
if x is not None:
try:
x = list(map(float, x.groups()))
if x[0] > 1 or x[1] > 1 or x[2] > 1:
x = [int(i) for i in x]
colour = utils.rgb_to_hex(x)
except KeyError:
raise LegendError("Colour not recognized: " + c)
elif not c:
colour = '#eeeeee'
elif type(c) in [list, tuple]:
try:
colour = utils.rgb_to_hex(c)
except TypeError:
raise LegendError("Colour not recognized: " + c)
elif c[0] != '#':
try:
colour = utils.name_to_hex(c)
except KeyError:
raise LegendError("Colour not recognized: " + c)
elif (c[0] == '#') and (len(c) == 4):
# Three-letter hex
colour = c[:2] + c[1] + 2*c[2] + 2*c[3]
elif (c[0] == '#') and (len(c) == 8):
# 8-letter hex
colour = c[:-2]
else:
colour = c
self._colour = colour
@property
def rgb(self):
"""
Returns an RGB triple equivalent to the hex colour.
"""
return utils.hex_to_rgb(self.colour)
@property
def keys(self):
"""
Returns the keys of the Decor's dict.
"""
return list(self.__dict__.keys())
@classmethod
def random(cls, component, match_only=None):
"""
Returns a minimal Decor with a random colour.
"""
c = component.__dict__.copy()
if match_only is None:
match_only = c.keys()
for k in list(c.keys
|
Mangopay/mangopay2-python-sdk
|
tests/test_bankaccounts.py
|
Python
|
mit
| 18,848
| 0.001061
|
# -*- coding: utf-8 -*-
from mangopay.utils import Address
from tests import settings
from tests.resources import BankAccount
from tests.test_base import BaseTest, BaseTestLive
from datetime import date
import responses
import time
class BankAccountsTest(BaseTest):
@responses.activate
def test_create_bankaccount_iban(self):
self.mock_natural_user()
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1169419/bankaccounts/IBAN',
'body': {
"UserId": "1169419",
"Type": "IBAN",
"OwnerName": "Victor Hugo",
"OwnerAddress": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"IBAN": "FR7630004000031234567890143",
"BIC": "BNPAFRPP",
"Id": "1169675",
"Tag": "custom tag",
"CreationDate": 1383561267
},
'status': 200
})
params = {
"owner_name": "Victor Hugo",
"user": self.natural_user,
"type": "IBAN",
"owner_address": Address(address_line_1='AddressLine1', address_line_2='AddressLine2',
city='City', region='Region',
postal_code='11222', country='FR'),
"iban": "FR7630004000031234567890143",
"bic": "BNPAFRPP",
"tag": "custom tag"
}
bankaccount = BankAccount(**params)
self.assertIsNone(bankaccount.get_pk())
bankaccount.save()
self.assertIsInstance(bankaccount, BankAccount)
for key, value in params.items():
self.assertEqual(getattr(bankaccount, key), value)
self.assertIsNotNone(bankaccount.get_pk())
@responses.activate
def test_create_bankaccount_gb(self):
self.mock_natural_user()
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1169419/bankaccounts/GB',
'body': {
"UserId": "1169419",
"Type": "GB",
|
"OwnerName": "Victor Hugo",
"OwnerAddress": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"AccountNumber": "621
|
36016",
"SortCode": "404865",
"Id": "38290008",
"Tag": "custom tag",
"CreationDate": 1383561267
},
'status': 200
})
params = {
"tag": "custom tag",
"user": self.natural_user,
"type": "GB",
"owner_name": "Victor Hugo",
"owner_address": Address(address_line_1='AddressLine1', address_line_2='AddressLine2',
city='City', region='Region',
postal_code='11222', country='FR'),
"account_number": "62136016",
"sort_code": "404865"
}
bankaccount = BankAccount(**params)
self.assertIsNone(bankaccount.get_pk())
bankaccount.save()
self.assertIsInstance(bankaccount, BankAccount)
for key, value in params.items():
self.assertEqual(getattr(bankaccount, key), value)
self.assertIsNotNone(bankaccount.get_pk())
@responses.activate
def test_create_bankaccount_us(self):
self.mock_natural_user()
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1169419/bankaccounts/US',
'body': {
"UserId": "1169419",
"OwnerName": "Victor Hugo",
"OwnerAddress": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"Type": "US",
"Id": "6775383",
"Tag": "custom tag",
"CreationDate": 1431964711,
"AccountNumber": "123",
"ABA": "123456789",
"DepositAccountType": "CHECKING"
},
'status': 200
})
params = {
"owner_name": "Victor Hugo",
"user": self.natural_user,
"type": "US",
"owner_address": Address(address_line_1='AddressLine1', address_line_2='AddressLine2',
city='City', region='Region',
postal_code='11222', country='FR'),
"tag": "custom tag",
"account_number": "123",
"aba": "123456789",
"deposit_account_type": "CHECKING"
}
bankaccount = BankAccount(**params)
self.assertIsNone(bankaccount.get_pk())
bankaccount.save()
self.assertIsInstance(bankaccount, BankAccount)
for key, value in params.items():
self.assertEqual(getattr(bankaccount, key), value)
self.assertIsNotNone(bankaccount.get_pk())
@responses.activate
def test_create_bankaccount_ca(self):
self.mock_natural_user()
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1169419/bankaccounts/CA',
'body': {
"UserId": "1169419",
"OwnerName": "Victor Hugo",
"OwnerAddress": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"Type": "CA",
"Id": "6775449",
"Tag": "custom tag",
"CreationDate": 1431964854,
"AccountNumber": "123",
"InstitutionNumber": "1234",
"BranchCode": "12345",
"BankName": "banque nationale of canada"
},
'status': 200
})
params = {
"owner_name": "Victor Hugo",
"user": self.natural_user,
"type": "CA",
"owner_address": Address(address_line_1='AddressLine1', address_line_2='AddressLine2',
city='City', region='Region',
postal_code='11222', country='FR'),
"tag": "custom tag",
"bank_name": "banque nationale of canada",
"institution_number": "1234",
"branch_code": "12345",
"account_number": "123"
}
bankaccount = BankAccount(**params)
self.assertIsNone(bankaccount.get_pk())
bankaccount.save()
self.assertIsInstance(bankaccount, BankAccount)
for key, value in params.items():
self.assertEqual(getattr(bankaccount, key), value)
self.assertIsNotNone(bankaccount.get_pk())
@responses.activate
def test_create_bankaccount_other(self):
self.mock_natural_user()
self.register_mock({
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1169419/bankaccounts/OTHER',
'body': {
"UserId": "1169419",
"OwnerName": "Victor Hugo",
"OwnerAddress": {
|
ef-ctx/tornwamp
|
tornwamp/session.py
|
Python
|
apache-2.0
| 5,335
| 0.000937
|
"""
Abstract websocket connections (dual channel between clients and server).
"""
import socket
import errno
from datetime import datetime
from tornwamp import topic
from tornwamp.identifier import create_global_id
class ConnectionDict(dict):
"""
Connections manager.
"""
@property
def dict(self):
"""
Return a python dictionary which could be jsonified.
"""
return {key: value.dict for key, value in self.items()}
def filter_by_property_value(self, attr_name, attr_value):
"""
Provided an attribute name and its value, retrieve connections which
have it.
"""
items = []
|
for _, connection in self.items():
if getattr(connection, attr_name) == attr_value:
items.append(connection)
return items
connections = ConnectionDict()
class ClientConnection(object):
"""
Represent a client connection.
"""
existing_ids = []
def __init__(self, websocket, **details):
"""
Create a connection object provided:
- webs
|
ocket (tornado.websocket.WebSocketHandler instance
- details: dictionary of metadata associated to the connection
"""
self.id = create_global_id()
# set connection attributes, if any is given
for name, value in details.items():
setattr(self, name, value)
# meta-data
# TODO: update this
self.last_update = datetime.now().isoformat()
# communication-related
self._websocket = websocket
self.topics = {
"subscriber": {},
"publisher": {}
}
# when connection should be closed but something is left
self.zombie = False
self.zombification_datetime = None
@property
def peer(self):
try:
ip, port = self._websocket.ws_connection.stream.socket.getpeername()
except (AttributeError, OSError, socket.error) as error:
if not hasattr(error, 'errno') or error.errno in (errno.EBADF, errno.ENOTCONN):
# Expected errnos:
# - EBADF: bad file descriptor (connection was closed)
# - ENOTCONN: not connected (connection was never open)
ip = self._websocket.request.remote_ip
name = u"{0}:HACK|{1}".format(ip, self.id)
else:
# Rethrow exception in case of unknown errno
raise
else:
forwarded_ip = self._websocket.request.headers.get("X-Forwarded-For")
if forwarded_ip:
ip = forwarded_ip
name = u"{0}:{1}|{2}".format(ip, port, self.id)
return name
def get_subscription_id(self, topic_name):
"""
Return connection's subscription_id for a specific topic.
"""
subscribe_subscription = self.topics['subscriber'].get(topic_name)
publish_subscription = self.topics['publisher'].get(topic_name)
return subscribe_subscription or publish_subscription
def add_subscription_channel(self, subscription_id, topic_name):
"""
Add topic as a subscriber.
"""
self.topics["subscriber"][topic_name] = subscription_id
def remove_subscription_channel(self, topic_name):
"""
Remove topic as a subscriber.
"""
self.topics.get("subscriber", {}).pop(topic_name, None)
def add_publishing_channel(self, subscription_id, topic_name):
"""
Add topic as a publisher.
"""
self.topics["publisher"][topic_name] = subscription_id
def remove_publishing_channel(self, topic_name):
"""
Remove topic as a publisher.
"""
self.topics.get("publisher", {}).pop(topic_name, None)
def get_publisher_topics(self):
"""
Return list of topics to which this connection has subscribed.
"""
return list(self.topics["publisher"])
def get_topics(self):
"""
Return a dictionary containing subscriptions_ids and connections - no
matter if they are subscribers or publishers.
"""
return dict(self.topics["subscriber"], **self.topics["publisher"])
@property
def topics_by_subscription_id(self):
return {subscription_id: topic for topic, subscription_id in self.get_topics().items()}
@property
def dict(self):
"""
Return dict representation of the current Connection, keeping only data
that could be exported to JSON (convention: attributes which do not
start with _).
"""
return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
def zombify(self):
"""
Make current connection a zombie:
- remove all its topics
- remove it from the TopicsManager
In WAMP, in order to disconnect, we're supposed to do a GOODBYE
handshake.
Considering the server wanted to disconnect the client for some reason,
we leave the client in a "zombie" state, so it can't subscribe to
topics and can't receive messages from other clients.
"""
self.zombification_datetime = datetime.now().isoformat()
self.zombie = True
topic.topics.remove_connection(self)
|
jacebrowning/gdm
|
tests/test_cli.py
|
Python
|
mit
| 1,452
| 0.000689
|
# pylint: disable=unused-variable,redefined-outer-name,expression-not-assigned
import os
from unittest.mock import call, patch
import pytest
from expecter import expect
from gitman import cli
@pytest.fixture
def config(tmpdir):
tmpdir.chdir()
path = str(tmpdir.join("gdm.yml"))
open(path, 'w').close()
return path
@pytest.fixture
def location(tmpdir):
tmpdir.chdir()
path = str(tmpdir.join("gdm.yml"))
with open(path, 'w') as outfile:
outfile.write("location: foo")
return str(tmpdir.join("foo"))
def describe_show():
@patch('gitman.common.show')
def it_prints_location_by_default(show, location):
cli.main(['show'])
expect(show.mock_calls) == [call(location, color='path')]
@patch('gitman.common.show')
def it_can_print_a_depenendcy_path(show, location):
cli.main(['show', 'bar'])
expect(show.mock_calls) == [call(os.path.join(location, "bar"), color='path')]
def it_exits_when_no_config_found(tmpdir):
tmpdir.chdir()
with expect.raises(SystemExit):
cli.main(['show'])
def describe_edit():
@patch('gitman.system.launch')
def it_launches_the_config(launch, config):
cli.main(['edit'])
expect(launch.mock
|
_calls) == [call(config), call().__bool__()]
def it_exits_when_no_config_found(tmpdir):
tmpdir.chdir()
with
|
expect.raises(SystemExit):
cli.main(['edit'])
|
esthermm/odoo-addons
|
mrp_routing_cost/models/mrp_workcenter.py
|
Python
|
agpl-3.0
| 6,237
| 0.00016
|
# -*- coding: utf-8 -*-
# © 2016 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import api, fields, models
from openerp.addons import decimal_precision as dp
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
costs_hour = fields.Float(
string='Cost per hour', digits=dp.get_precision('Product Price'))
costs_cycle = fields.Float(
string='Cost per cycle', digits=dp.get_precision('Product Price'))
op_number = fields.Integer(string='Operators')
op_avg_cost = fields.Float(
string='Operators avg. cost', digits=dp.get_precision('Product Price'))
subtotal_hour = fields.Float(
string='Subtotal by hours', compute='_compute_subtotals',
digits=dp.get_precision('Product Price'))
subtotal_operator = fields.Float(
string='Subtotal by operators', compute='_compute_subtotals',
digits=dp.get_precision('Product Price'))
subtotal_cycle = fields.Float(
string='Subtotal by cycle', compute='_compute_subtotals',
digits=dp.get_precision('Product Price'))
subtotal = fields.Float(
string='Subtotal', compute='_compute_subtotals',
digits=dp.get_precision('Product Price'))
unit_final_cost = fields.Float(
string='Final Unit Cost', compute='_compute_subtotals',
digits=dp.get_precision('Product Price'),
help='Cost by final product unit.')
@api.depends('workcenter_id', 'workcenter_id.costs_hour',
'workcenter_id.costs_cycle', 'workcenter_id.op_number',
'workcenter_id.op_avg_cost', 'workcenter_id.fixed_hour_cost',
'workcenter_id.fixed_cycle_cost')
def _compute_subtotals(self):
for line in self:
line.subtotal_hour = line.hour * line.costs_hour
line.subtotal_cycle = line.cycle * line.costs_cycle
line.subtotal_operator = \
line.hour * line.op_avg_cost * line.op_number
line.subtotal =\
line.subtotal_hour + line.subtotal_cycle +\
line.subtotal_operator
line.unit_final_cost = \
line.subtotal / (line.production_id.product_qty or 1.0)
@api.onchange('workcenter_id')
def onchange_workcenter_id(self):
for line in self.filtered('workcenter_id'):
line.costs_hour = line.workcenter_id.costs_hour
line.costs_cycle = line.workcenter_id.costs_cycle
line.op_number = line.workcenter_id.op_number
line.op_avg_cost = line.workcenter_id.op_avg_cost
class MrpProduction(models.Model):
_inherit = 'mrp.production'
routing_cycle_total = fields.Float(
string='Total (Cycle)', compute='_compute_routing_total',
digits=dp.get_precision('Product Price'))
routing_hour_total = fields.Float(
string='Total (Hour)', compute='_compute_routing_total',
digits=dp.get_precision('Product Price'))
routing_total = fields.Float(
string='Total', compute='_compute_routing_total',
digits=dp.get_precision('Product Price'))
routing_operator_total = fields.Float(
string='Total (Operator)', compute='_compute_routing_total',
digits=dp.get_precision('Product Price'))
production_total = fields.Float(
string='Production Total', compute='_compute_production_total',
digits=dp.get_precision('Product Price'))
@api.depends('workcenter_lines', 'workcenter_lines.subtotal')
def _compute_routing_total(self):
by_unit = self.env['mrp.config.settings']._get_parameter(
'subtotal.by.unit')
for mrp in self.filtered(lambda m: m.workcenter_lines and
m.product_qty):
subtotal = sum(
mrp.mapped('workcenter_lines.subtotal_cycle'))
mrp.routing_cycle_total =\
subtotal / mrp.product_qty if by_unit els
|
e subtotal
subtotal = sum(
mrp.mapped('workcenter_lines.subtotal_hour'))
mrp.routing_hour_total =\
subtotal / mrp.product_qty if by_unit else subtotal
subtotal = sum(
mrp.mapped('workcenter_lines.subtotal_operator'))
mrp.routing_operator_total =\
subtotal / mrp.product_qty if by_unit else subtotal
mrp.routing_total =\
mrp.routing_cycle_total + mrp
|
.routing_hour_total + \
mrp.routing_operator_total
@api.multi
def _compute_production_total(self):
by_unit = self.env['mrp.config.settings']._get_parameter(
'subtotal.by.unit')
for prod in self:
total = prod.routing_total
try:
total += prod.scheduled_total
except:
pass
prod.production_total =\
total * (prod.product_qty if by_unit else 1)
@api.multi
def button_recompute_total(self):
fields_list = ['production_total']
for field in fields_list:
self.env.add_todo(self._fields[field], self)
self.recompute()
class MrpWorkcenter(models.Model):
_inherit = 'mrp.workcenter'
fixed_hour_cost = fields.Boolean(string='Fixed hour cost', default=False)
fixed_cycle_cost = fields.Boolean(string='Fixed cycle cost', default=False)
class MrpBom(models.Model):
_inherit = 'mrp.bom'
@api.multi
def _prepare_wc_line(self, wc_use, level=0, factor=1):
self.ensure_one()
vals = super(MrpBom, self)._prepare_wc_line(
wc_use, level=level, factor=factor)
workcenter = self.env['mrp.workcenter'].browse(
vals.get('workcenter_id'))
vals.update({
'cycle': workcenter.capacity_per_cycle
if workcenter.fixed_cycle_cost else vals.get('cycle'),
'hour': workcenter.time_cycle if workcenter.fixed_hour_cost
else vals.get('hour'),
'costs_hour': workcenter.costs_hour,
'costs_cycle': workcenter.costs_cycle,
'op_number': workcenter.op_number,
'op_avg_cost': workcenter.op_avg_cost,
})
return vals
|
joonamo/photoplaces
|
photoplaces/photoplaces_web/migrations/0004_auto_20141105_1236.py
|
Python
|
mit
| 984
| 0.001016
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('photoplaces_web', '0003_photocluster_normalized_centers_dirty'),
]
operations = [
migrations.AddField(
model_name='normalizedphotoset',
name='hour_mean_natural',
field=models.FloatField(null=True, blank=True),
preserve_default=True,
|
),
migrations.AddField(
model_name='normalizedphotoset',
name='month_mean_natural',
field=model
|
s.FloatField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='photocluster',
name='normalized_set',
field=models.OneToOneField(related_name='+', null=True, blank=True, to='photoplaces_web.NormalizedPhotoSet'),
preserve_default=True,
),
]
|
qbuat/rootpy
|
rootpy/extern/byteplay.py
|
Python
|
gpl-3.0
| 33,903
| 0.004808
|
# byteplay - Python bytecode assembler/disassembler.
# Copyright (C) 2006-2010 Noam Yorav-Raphael
# Homepage: http://code.google.com/p/byteplay
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Many thanks to Greg X for adding support for Python 2.6 and 2.7!
__version__ = '0.2'
__all__ = ['opmap', 'opname', 'opcodes',
'cmp_op', 'hasarg', 'hasname', 'hasjrel', 'hasjabs',
'hasjump', 'haslocal', 'hascompare', 'hasfree', 'hascode',
'hasflow', 'getse',
'Opcode', 'SetLineno', 'Label', 'isopcode', 'Code',
'CodeList', 'printcodelist']
import opcode
from dis import findlabels
import types
from array import array
import operator
import itertools
import sys
import warnings
from cStringIO import StringIO
######################################################################
# Define opcodes and information about them
python_version = '.'.join(str(x) for x in sys.version_info[:2])
if python_version not in ('2.4', '2.5', '2.6', '2.7'):
warnings.warn("byteplay doesn't support Python version "+python_version)
class Opcode(int):
"""An int which represents an opcode - has a nicer repr."""
def __repr__(self):
return opname[self]
__str__ = __repr__
class CodeList(list):
"""A list for storing opcode tuples - has a nicer __str__."""
def __str__(self):
f = StringIO()
printcodelist(self, f)
return f.getvalue()
opmap = dict((name.replace('+', '_'), Opcode(code))
for name, code in opcode.opmap.iteritems()
if name != 'EXTENDED_ARG')
opname = dict((code, name) for name, code in opmap.iteritems())
opcodes = set(opname)
def globalize_opcodes():
for name, code in opmap.iteritems():
globals()[name] = code
__all__.append(name)
globalize_opcodes()
cmp_op = opcode.cmp_op
hasarg = set(x for x in opcodes if x >= opcode.HAVE_ARGUMENT)
hasconst = set(Opcode(x) for x in opcode.hasconst)
hasname = set(Opcode(x) for x in opcode.hasname)
hasjrel = set(Opcode(x) for x in opcode.hasjrel)
hasjabs = set(Opcode(x) for x in opcode.hasjabs)
hasjump = hasjrel.union(hasjabs)
haslocal = set(Opcode(x) for x in opcode.haslocal)
hascompare = set(Opcode(x) for x in opcode.hascompare)
hasfree = set(Opcode(x) for x in opcode.hasfree)
hascode = set([MAKE_FUNCTION, MAKE_CLOSURE])
class _se:
"""Quick way of defining static stack effects of opcodes"""
# Taken from assembler.py by Phillip J. Eby
NOP = 0,0
POP_TOP = 1,0
ROT_TWO = 2,2
ROT_THREE = 3,3
ROT_FOUR = 4,4
DUP_TOP = 1,2
UNARY_POSITIVE = UNARY_NEGATIVE = UNARY_NOT = UNARY_CONVERT = \
UNARY_INVERT = GET_ITER = LOAD_ATTR = 1,1
IMPORT_FROM = 1,2
BINARY_POWER = BINARY_MULTIPLY = BINARY_DIVIDE = BINARY_FLOOR_DIVIDE = \
BINARY_TRUE_DIVIDE = BINARY_MODULO = BINARY_ADD = BINARY_SUBTRACT = \
BINARY_SUBSCR = BINARY_LSHIFT = BINARY_RSHIFT = BINARY_AND = \
BINARY_XOR = BINARY_OR = COMPARE_OP = 2,1
INPLACE_POWER = INPLACE_MULTIPLY = INPLACE_DIVIDE = \
INPLACE_FLOOR_DIVIDE = INPLACE_TRUE_DIVIDE = INPLACE_MODULO = \
INPLACE_ADD = INPLACE_SUBTRACT = INPLACE_LSHIFT = INPLACE_RSHIFT = \
INPLACE_AND = INPLACE_XOR = INPLACE_OR = 2,1
SLICE_0, SLICE_1, SLICE_2, SLICE_3 = \
(1,1),(2,1),(2,1),(3,1)
STORE_SLICE_0, STORE_SLICE_1, STORE_SLICE_2, STORE_SLICE_3 = \
(2,0),(3,0),(3,0),(4,0)
DELETE_SLICE_0, DELETE_SLICE_1, DELETE_SLICE_2, DELETE_SLICE_3 = \
(1,0),(2,0),(2,0),(3,0)
STORE_SUBSCR = 3,0
DELETE_SUBSCR = STORE_ATTR = 2,0
DELETE_ATTR = STORE_DEREF = 1,0
PRINT_NEWLINE = 0,0
PRINT_EXPR = PRINT_ITEM = PRINT_NEWLINE_TO = IMPORT_STAR = 1,0
STORE_NAME = STORE_GLOBAL = STORE_FAST = 1,0
PRINT_ITEM_TO = 2,0
LOAD_LOCALS = LOAD_CONST = LOAD_NAME = LOAD_GLOBAL = LOAD_FAST = \
LOAD_CLOSURE = LOAD_DEREF = BUILD_MAP = 0,1
DELETE_FAST = DELETE_GLOBAL = DELETE_NAME = 0,0
EXEC_STMT = 3,0
BUILD_CLASS = 3,1
STORE_MAP = MAP_ADD = 2,0
SET_ADD = 1,0
if python_version == '2.4':
YIELD_VALUE = 1,0
IMPORT
|
_NAME = 1,1
LIST_APPEND = 2,0
elif python_version == '2.5':
YIELD_VALUE = 1,1
IMPORT_NAME = 2,1
LIST_APPEND = 2,0
elif python_version == '2.6':
YIELD_VALUE = 1,1
IMPORT_NAME = 2,1
LIST_APPEND = 2,0
elif python_version == '2.7':
YIELD_VALUE = 1,1
IMPORT_NAME = 2,1
LIST_APPEND = 1,0
_se = dict((op, getattr(_se, opname[op]))
for o
|
p in opcodes
if hasattr(_se, opname[op]))
hasflow = opcodes - set(_se) - \
set([CALL_FUNCTION, CALL_FUNCTION_VAR, CALL_FUNCTION_KW,
CALL_FUNCTION_VAR_KW, BUILD_TUPLE, BUILD_LIST,
UNPACK_SEQUENCE, BUILD_SLICE, DUP_TOPX,
RAISE_VARARGS, MAKE_FUNCTION, MAKE_CLOSURE])
if python_version == '2.7':
hasflow = hasflow - set([BUILD_SET])
def getse(op, arg=None):
"""Get the stack effect of an opcode, as a (pop, push) tuple.
If an arg is needed and is not given, a ValueError is raised.
If op isn't a simple opcode, that is, the flow doesn't always continue
to the next opcode, a ValueError is raised.
"""
try:
return _se[op]
except KeyError:
# Continue to opcodes with an effect that depends on arg
pass
if arg is None:
raise ValueError, "Opcode stack behaviour depends on arg"
def get_func_tup(arg, nextra):
if arg > 0xFFFF:
raise ValueError, "Can only split a two-byte argument"
return (nextra + 1 + (arg & 0xFF) + 2*((arg >> 8) & 0xFF),
1)
if op == CALL_FUNCTION:
return get_func_tup(arg, 0)
elif op == CALL_FUNCTION_VAR:
return get_func_tup(arg, 1)
elif op == CALL_FUNCTION_KW:
return get_func_tup(arg, 1)
elif op == CALL_FUNCTION_VAR_KW:
return get_func_tup(arg, 2)
elif op == BUILD_TUPLE:
return arg, 1
elif op == BUILD_LIST:
return arg, 1
elif python_version == '2.7' and op == BUILD_SET:
return arg, 1
elif op == UNPACK_SEQUENCE:
return 1, arg
elif op == BUILD_SLICE:
return arg, 1
elif op == DUP_TOPX:
return arg, arg*2
elif op == RAISE_VARARGS:
return 1+arg, 1
elif op == MAKE_FUNCTION:
return 1+arg, 1
elif op == MAKE_CLOSURE:
if python_version == '2.4':
raise ValueError, "The stack effect of MAKE_CLOSURE depends on TOS"
else:
return 2+arg, 1
else:
raise ValueError, "The opcode %r isn't recognized or has a special "\
"flow control" % op
class SetLinenoType(object):
def __repr__(self):
return 'SetLineno'
SetLineno = SetLinenoType()
class Label(object):
pass
def isopcode(obj):
"""Return whether obj is an opcode - not SetLineno or Label"""
return obj is not SetLineno and not isinstance(obj, Label)
# Flags from code.h
CO_OPTIMIZED = 0x0001 # use LOAD/STORE_FAST instead of _NAME
CO_NEWLOCALS = 0x0002 # only cleared for module/exec code
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
CO_NESTED = 0x0010 # ???
CO_GENERATOR = 0x0020
CO_NOFREE = 0x0040 # set if no free or cell vars
CO_GENERATOR_ALLOWED = 0x1000 # unused
# The future flags are only used on code generation, so we can ignore th
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_PolyTrend_Seasonal_DayOfWeek_NoAR.py
|
Python
|
bsd-3-clause
| 171
| 0.046784
|
import tests.model_contr
|
ol.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['PolyTrend'] , ['Seasonal_DayOfWeek'] , ['No
|
AR'] );
|
d0ugal-archive/django-formadmin
|
tests/test_formadmin/admin.py
|
Python
|
mit
| 375
| 0.002667
|
from formadmin.admin import FormAdmin
from formadmin import sites
from test_formadmin.forms import EmailForm, UploadForm
class EmailFormAdmin(FormAdmin):
app_label = "AdminForms
|
"
verbose_name = "Email Staff"
class UploadFormAdmin(FormAdmi
|
n):
verbose_name = "Upload Logo"
sites.register(EmailForm, EmailFormAdmin)
sites.register(UploadForm, UploadFormAdmin)
|
WebCampZg/conference-web
|
people/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 2,302
| 0.005213
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(unique=True, max_length=75, verbose_name='email address')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')
|
),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True,
|
help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
]
|
tjfontaine/linode-python
|
linode/api.py
|
Python
|
mit
| 40,693
| 0.007028
|
#!/usr/bin/python
# vim:ts=2:sw=2:expandtab
"""
A Python library to perform low-level Linode API functions.
Copyright (c) 2010 Timothy J Fontaine <tjfontaine@gmail.com>
Copyright (c) 2010 Josh Wright <jshwright@gmail.com>
Copyright (c) 2010 Ryan Tucker <rtucker@gmail.com>
Copyright (c) 2008 James C Sinclair <james@irgeek.com>
Copyright (c) 2013 Tim Heckman <tim@timheckman.net>
Copyright (c) 2014 Magnus Appelquist <magnus.appelquist@cloudnet.se>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from decimal import Decimal
import logging
import urllib
import urllib2
import copy
try:
import json
FULL_BODIED_JSON = True
except:
import simplejson as json
FULL_BODIED_JSON = False
try:
import requests
from types import MethodType
def requests_request(url, fields, headers):
return requests.Request(method="POST", url=url, headers=headers, data=fields)
def requests_open(request):
r = request.prepare()
s = requests.Session()
s.verify = True
response = s.send(r)
response.read = MethodType(lambda x: x.text, response)
return response
URLOPEN = requests_open
URLREQUEST = requests_request
except:
try:
import VEpycurl
def vepycurl_request(url, fields, headers):
return (url, fields, headers)
def vepycurl_open(request):
c = VEpycurl.VEpycurl(verifySSL=2)
url, fields, headers = request
nh = [ '%s: %s' % (k, v) for k,v in headers.items()]
c.perform(url, fields, nh)
return c.results()
URLOPEN = vepycurl_open
URLREQUEST = vepycurl_request
except:
import warnings
ssl_message = 'using urllib instead of pycurl, urllib does not verify SSL remote certificates, there is a risk of compromised communication'
warnings.warn(ssl_message, RuntimeWarning)
def urllib_request(url, fields, headers):
fields = urllib.urlencode(fields)
return urllib2.Request(url, fields, headers)
URLOPEN = urllib2.urlopen
URLREQUEST = urllib_request
class MissingRequiredArgument(Exception):
"""Raised when a required parameter is missing."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def __reduce__(self):
return (self.__class__, (self.value, ))
class ApiError(Exception):
"""Raised when a Linode API call returns an error.
Returns:
[{u'ERRORCODE': Error code number,
u'ERRORMESSAGE': 'Description of error'}]
ErrorCodes that can be returned by any method, per Linode API specification:
0: ok
1: Bad request
2: No action was requested
3: The requested class does not exist
4: Authentication failed
5: Object not found
6: A required property is missing for this action
7: Property is invalid
8: A data validation error has occurred
9: Method Not Implemented
10: Too many batched requests
11: RequestArray isn't valid JSON or WDDX
13: Permission denied
30: Charging the credit card fa
|
iled
31: Credit card is expired
40:
|
Limit of Linodes added per hour reached
41: Linode must have no disks before delete
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def __reduce__(self):
return (self.__class__, (self.value, ))
class ApiInfo:
valid_commands = {}
valid_params = {}
LINODE_API_URL = 'https://api.linode.com/api/'
VERSION = '0.0.1'
class LowerCaseDict(dict):
def __init__(self, copy=None):
if copy:
if isinstance(copy, dict):
for k,v in copy.items():
dict.__setitem__(self, k.lower(), v)
else:
for k,v in copy:
dict.__setitem__(self, k.lower(), v)
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())
def __setitem__(self, key, value):
dict.__setitem__(self, key.lower(), value)
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def get(self, key, def_val=None):
return dict.get(self, key.lower(), def_val)
def setdefault(self, key, def_val=None):
return dict.setdefault(self, key.lower(), def_val)
def update(self, copy):
for k,v in copy.items():
dict.__setitem__(self, k.lower(), v)
def fromkeys(self, iterable, value=None):
d = self.__class__()
for k in iterable:
dict.__setitem__(d, k.lower(), value)
return d
def pop(self, key, def_val=None):
return dict.pop(self, key.lower(), def_val)
class Api:
"""Linode API (version 2) client class.
Instantiate with: Api(), or Api(optional parameters)
Optional parameters:
key - Your API key, from "My Profile" in the LPM (default: None)
batching - Enable batching support (default: False)
This interfaces with the Linode API (version 2) and receives a response
via JSON, which is then parsed and returned as a dictionary (or list
of dictionaries).
In the event of API problems, raises ApiError:
api.ApiError: [{u'ERRORCODE': 99,
u'ERRORMESSAGE': u'Error Message'}]
If you do not specify a key, the only method you may use is
user_getapikey(username, password). This will retrieve and store
the API key for a given user.
Full documentation on the API can be found from Linode at:
http://www.linode.com/api/
"""
def __init__(self, key=None, batching=False):
self.__key = key
self.__urlopen = URLOPEN
self.__request = URLREQUEST
self.batching = batching
self.__batch_cache = []
@staticmethod
def valid_commands():
"""Returns a list of API commands supported by this class."""
return list(ApiInfo.valid_commands.keys())
@staticmethod
def valid_params():
"""Returns a list of all parameters used by methods of this class."""
return list(ApiInfo.valid_params.keys())
def batchFlush(self):
"""Initiates a batch flush. Raises Exception if not in batching mode."""
if not self.batching:
raise Exception('Cannot flush requests when not batching')
s = json.dumps(self.__batch_cache)
self.__batch_cache = []
request = { 'api_action' : 'batch', 'api_requestArray' : s }
return self.__send_request(request)
def __getattr__(self, name):
"""Return a callable for any undefined attribute and assume it's an API call"""
if name.startswith('__'):
raise AttributeError()
def generic_request(*args, **kw):
request = LowerCaseDict(kw)
request['api_action'] = name.replace('_', '.')
if self.batching:
self.__batch_cache.append(request)
logging.debug('Batched: %s', json.dumps(request))
else:
return self.__send_request(request)
generic_request.__name__ = name
return generic_request
def __send_request(self, request):
if self.__key:
request['api_key'] = self.__key
elif request['api_action'] != 'user.getapikey':
raise Exception('Must call user_getapikey to fetch key')
request['api_responseFormat'] = 'json'
request_log = copy.deepcopy(request)
redact = ['api_key','rootsshkey','rootpass']
for r in redact:
if r in request_log:
request_log[r] = '{0}: xxxx REDACTED xxxx'.format(r)
logging.debug('Parameters '+str(request_log))
|
ewdurbin/pymemcache
|
pymemcache/client/base.py
|
Python
|
apache-2.0
| 39,766
| 0
|
# Copyright 2012 Pinterest.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import socket
import six
from pymemcache import pool
from pymemcache.exceptions import (
MemcacheClientError,
MemcacheUnknownCommandError,
MemcacheIllegalInputError,
MemcacheServerError,
MemcacheUnknownError,
MemcacheUnexpectedCloseError
)
RECV_SIZE = 4096
VALID_STORE_RESULTS = {
b'set': (b'STORED',),
b'add': (b'STORED', b'NOT_STORED'),
b'replace': (b'STORED', b'NOT_STORED'),
b'append': (b'STORED', b'NOT_STORED'),
b'prepend': (b'STORED', b'NOT_STORED'),
b'cas': (b'STORED', b'EXISTS', b'NOT_FOUND'),
}
VALID_STRING_TYPES = (six.text_type, six.string_types)
# Some of the values returned by the "stats" command
# need mapping into native Python types
def _parse_bool_int(value):
return int(value) != 0
def _parse_bool_string_is_yes(value):
return value == b'yes'
def _parse_float(value):
return float(value.replace(b':', b'.'))
def _parse_hex(value):
return int(value, 8)
STAT_TYPES = {
# General stats
b'version': six.binary_type,
b'rusage_user': _parse_float,
b'rusage_system': _parse_float,
b'hash_is_expanding': _parse_bool_int,
b'slab_reassign_running': _parse_bool_int,
# Settings stats
b'inter': six.binary_type,
b'growth_factor': float,
b'stat_key_prefix': six.binary_type,
b'umask': _parse_hex,
b'detail_enabled': _parse_bool_int,
b'cas_enabled': _parse_bool_int,
b'auth_enabled_sasl': _parse_bool_string_is_yes,
b'maxconns_fast': _parse_bool_int,
b'slab_reassign': _parse_bool_int,
b'slab_automove': _parse_bool_int,
}
# Common helper functions.
def _check_key(key, allow_unicode_keys, key_prefix=b''):
"""Checks key and add key_prefix."""
if allow_unicode_keys:
if isinstance(key, six.text_type):
key = key.encode('utf8')
elif isinstance(key, VALID_STRING_TYPES):
try:
key = key.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
raise MemcacheIllegalInputError("Non-ASCII key: '%r'" % (key,))
key = key_prefix + key
if b' ' in key or b'\n' in key:
raise MemcacheIllegalInputError(
"Key contains space and/or newline: '%r'" % (key,)
)
if len(key) > 250:
raise MemcacheIllegalInputError("Key is too long: '%r'" % (key,))
return key
class Client(object):
"""
A client for a single memcached server.
*Keys and Values*
Keys must have a __str__() method which should return a str with no more
than 250 ASCII characters and no whitespace or control characters. Unicode
strings must be encoded (as UTF-8, for example) unless they consist only
of ASCII characters that are neither whitespace nor control characters.
Values must have a __str__() method to convert themselves to a byte
string. Unicode objects can be a problem since str() on a Unicode object
will attempt to encode it as ASCII (which will fail if the value contains
code points larger than U+127). You can fix this with a serializer or by
just calling encode on the string (using UTF-8, for instance).
If you intend to use anything but str as a value, it is a good idea to use
a serializer and deserializer. The pymemcache.serde library has some
already implemented serializers, including one that is compatible with
the python-memcache library.
*Serialization and Deserialization*
The constructor takes two optional functions, one for "serialization" of
values, and one for "deserialization". The serialization function takes
two arguments, a key and a value, and returns a tuple of two elements, the
serialized value, and an integer in the range 0-65535 (the "flags"). The
deserialization function takes three parameters, a key, value and flags
and returns the deserialized value.
Here is an example using JSON for non-str values:
.. c
|
ode-block:: python
def serialize_json(key, value):
if type(value) == st
|
r:
return value, 1
return json.dumps(value), 2
def deserialize_json(key, value, flags):
if flags == 1:
return value
if flags == 2:
return json.loads(value)
raise Exception("Unknown flags for value: {1}".format(flags))
*Error Handling*
All of the methods in this class that talk to memcached can throw one of
the following exceptions:
* MemcacheUnknownCommandError
* MemcacheClientError
* MemcacheServerError
* MemcacheUnknownError
* MemcacheUnexpectedCloseError
* MemcacheIllegalInputError
* socket.timeout
* socket.error
Instances of this class maintain a persistent connection to memcached
which is terminated when any of these exceptions are raised. The next
call to a method on the object will result in a new connection being made
to memcached.
"""
def __init__(self,
server,
serializer=None,
deserializer=None,
connect_timeout=None,
timeout=None,
no_delay=False,
ignore_exc=False,
socket_module=socket,
key_prefix=b'',
default_noreply=True,
allow_unicode_keys=False):
"""
Constructor.
Args:
server: tuple(hostname, port)
serializer: optional function, see notes in the class docs.
deserializer: optional function, see notes in the class docs.
connect_timeout: optional float, seconds to wait for a connection to
the memcached server. Defaults to "forever" (uses the underlying
default socket timeout, which can be very long).
timeout: optional float, seconds to wait for send or recv calls on
the socket connected to memcached. Defaults to "forever" (uses the
underlying default socket timeout, which can be very long).
no_delay: optional bool, set the TCP_NODELAY flag, which may help
with performance in some cases. Defaults to False.
ignore_exc: optional bool, True to cause the "get", "gets",
"get_many" and "gets_many" calls to treat any errors as cache
misses. Defaults to False.
socket_module: socket module to use, e.g. gevent.socket. Defaults to
the standard library's socket module.
key_prefix: Prefix of key. You can use this as namespace. Defaults
to b''.
default_noreply: bool, the default value for 'noreply' as passed to
store commands (except from cas, incr, and decr, which default to
False).
allow_unicode_keys: bool, support unicode (utf8) keys
Notes:
The constructor does not make a connection to memcached. The first
call to a method on the object will do that.
"""
self.server = server
self.serializer = serializer
self.deserializer = deserializer
self.connect_timeout = connect_timeout
self.timeout = timeout
self.no_delay = no_delay
self.ignore_exc = ignore_exc
self.socket_module = socket_module
self.sock = None
if isinstance(key_prefix, six.text_type):
key_prefix = key_prefix.encode('ascii')
if not isinstance(key_prefix, bytes):
raise TypeError("key_prefix should be bytes.")
self.key_prefix = key_prefi
|
ua-snap/downscale
|
old/old_bin/convert_tas_hur_to_vap.py
|
Python
|
mit
| 2,809
| 0.050196
|
# script to convert the newly generated Relative Humidity
def convert_to_hur( tas_arr, vap_arr ):
import numpy as np
with np.errstate( over='ignore' ):
esa_arr = 6.112 * np.exp( 17.62 * tas_arr/ (243.12 + tas_arr) )
# esa_arr = 6.112 * np.exp( 22.46 * tas_arr / (272.62 + tas_arr) )
return vap_arr/esa_arr * 100
def convert_to_vap( tas_arr, hur_arr ):
import numpy as np
with np.errstate( over='ignore' ):
esa_arr = 6.112 * np.exp( 17.62 * tas_arr / (243.12 + tas_arr) )
# esa_arr = 6.112 * np.exp( 22.46*tas_arr / (272.62 + tas_arr) )
return (hur_arr * esa_arr) / 100
def run( x ):
tas = rasterio.open( x[0] )
hur = rasterio.open( x[1] )
meta = tas.meta
meta[ 'dtype' ] = 'float32' # set it to float32
meta.update( compress='lzw' )
meta.pop( 'transform' )
tas_arr = tas.read( 1 )
hur_arr = hur.read( 1 )
vap_arr = convert_to_vap( tas_arr, hur_arr )
# mask it:
mask = tas.read_masks( 1 )
vap_arr[ mask == 0 ] = tas.nodata
# build an output filename from the input tas and write out -- changed to deal with pathing!
output_filename = x[1].replace( 'hur', 'vap' )
output_filename = output_filename.replace( '_metric_', '_hPa_' )
# output_filename = x[0].replace( 'tas', 'vap' )
# output_filename = output_filename.replace( '_C_', '_hPa_' )
dirname = os.path.dirname( output_filename )
try:
if not os.path.exists( dirname ):
os.makedirs( dirname )
except:
pass
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( vap_arr.astype( np.float32 ), 1 )
return output_filename
if __name__ == '__main__':
# import modules
import os, glob, rasterio
import numpy as np
from pathos import multiprocessing as mp
# args
ncores = 40
tas_input_path = '/workspace/Shared/Tech_Projects/ALFRESCO
|
_Inputs/project_data/TEM_Data/cru_november_final/ar5'
hur_input_path = '/Dat
|
a/malindgren/cru_november_final/ar5'
models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
for model in models:
print model
tas_files = sorted( glob.glob( os.path.join( tas_input_path, model, 'tas', 'downscaled', '*.tif' ) ) )
hur_files = sorted( glob.glob( os.path.join( hur_input_path, model, 'hur', 'downscaled', '*.tif' ) ) )
# combine the sorted lists which should now be in a common order...
tas_hur_list = zip( tas_files, hur_files )
# run in parallel
pool = mp.Pool( processes=ncores )
out = pool.map( run, tas_hur_list )
pool.close()
# def return_files( input_path, var ):
# output_files = []
# for root, subs, files in os.walk( input_path ):
# # print root
# if root.endswith( 'downscaled' ) and len( files ) != 0 and var in root:
# pool = mp.Pool( processes=ncores )
# files = pool.map( lambda x: os.path.join( root, x ), files )
# pool.close()
# output_files.append( files )
# return output_files
|
robernom/ptavi-pfinal
|
proxy_registrar.py
|
Python
|
gpl-2.0
| 9,377
| 0.000107
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Programa que actua como proxy-registrar en UDP."""
import socketserver
import socket
import sys
import json
import hashlib as HL
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from time import time, gmtime, strftime
from random import choice, randrange
from uaclient import Log
RESP_COD = {100: 'SIP/2.0 100 Trying\r\n', 180: 'SIP/2.0 180 Ring\r\n',
200: 'SIP/2.0 200 OK',
400: 'SIP/2.0 400 Bad Request\r\n\r\n',
401: ('SIP/2.0 401 Unauthorized\r\nWWW-Authenticate: ' +
'Digest nonce="{}"\r\n\r\n'),
404: 'SIP/2.0 404 User Not Found\r\n\r\n',
405: 'SIP/2.0 405 Method Not Allowed'}
HEX = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd']
HEX += ['e', 'f']
def add_header(data):
"""Introduce cabecera proxy en los mensajes que se van a reenviar."""
div = data.split("\r\n", 1)
return "{}\r\n{}\r\n{}".format(div[0], PR_HEADER, div[1])
def new_nonce():
"""Crea un nuevo nonce con digitos hexadecimales pseudoaleatorios."""
return ''.join(choice(HEX) for i in range(randrange(0, 20)))
def search_pass(name):
"""Busca password del usuario pasado como parametro."""
with open(PASSWD_PATH) as f_pass:
try:
for line in f_pass:
if line.split(':')[0] == name:
passwd = line.split(':')[1][0:-1]
break
else:
passwd = ""
return passwd
except FileNotFoundError:
sys.exit("Password file not found")
class PRHandler(ContentHandler):
"""Clase para obtener los valores del xml."""
def __init__(self, xml):
"""Crea los diccionarios para introducir los valores."""
self.dtd = {'server': ('name', 'ip', 'puerto'),
'log': ('path',),
'database': ('path', 'passwdpath')}
self.config = {tag: {} for tag in self.dtd}
parser = make_parser()
parser.setContentHandler(self)
parser.parse(xml)
def startElement(self, name, attrs):
"""Introduce los valores en el diccionario."""
if name in self.dtd:
for elem in self.dtd[name]:
self.config[name][elem] = attrs.get(elem, "")
class SIPHandler(socketserver.DatagramRequestHandler):
"""Clase para un servidor SIP."""
user_data = {}
def json2registered(self):
"""Busca fichero JSON con clientes; si no hay devuelve dicc vacio."""
try:
with open(DBASE) as f_json:
self.user_data = json.load(f_json)
except FileNotFoundError:
self.user_data = {}
def delete_users(self, moment):
"""Borra los usuarios expirados."""
lista_expirados = []
for user in self.user_data:
if self.user_data[user]['expires'] <= moment:
lista_expirados.append(user)
for name in lista_expirados:
del self.user_data[name]
def register2json(self):
"""Introduce en un fichero JSON los usuarios."""
with open(DBASE, 'w') as f_json:
json.dump(self.user_data, f_json, sort_keys=True, indent=4)
def register(self, data):
"""Metodo REGISTER."""
c_data = data.split()[1:]
# Extracción de información del usuario
u_name, u_port = c_data[0].split(':')[1:]
u_ip, u_exp = self.client_address[0], c_data[3]
u_pass = search_pass(u_name)
# Controlando el tiempo
time_exp = int(u_exp) + int(time())
str_exp = strftime('%Y-%m-%d %H:%M:%S', gmtime(time_exp))
nonce = new_nonce()
if u_name not in self.user_data:
self.user_data[u_name] = {'addr': u_ip, 'expires': str_exp,
'port': u_port, 'auth': False,
'nonce': nonce}
to_send = RESP_COD[401].format(nonce)
elif not self.user_data[u_name]['auth']:
try:
resp = data.split('"')[-2]
except IndexError:
resp = ""
u_nonce = self.user_data[u_name]['nonce']
expect = HL.md5((u_nonce + u_pass).encode()).hexdigest()
if resp == expect:
self.user_data[u_name]['auth'] = True
self.user_data[u_name]['expires'] = str_exp
to_send = (RESP_COD[200] + "\r\n\r\n")
else:
to_send = RESP_COD[401].format(nonce)
else:
to_send = (RESP_COD[200] + "\r\n\r\n")
self.register2json()
self.wfile.write(bytes(to_send, 'utf-8'))
obj_log.log_write("send", (u_ip, u_port), to_send)
def invite(self, data):
"""Metodo INVITE."""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
dest = data.split()[1][4:]
try:
(ip_port) = (self.user_data[dest]['addr'],
int(self.user_data[dest]['port']))
sock.connect(ip_port)
text = add_header(data)
sock.send(bytes(text, 'utf-8'))
recv = sock.recv(1024).decode('utf-8')
except (ConnectionRefusedError, KeyError):
recv = ""
self.wfile.write(bytes(RESP_COD[404], 'utf-8'))
if recv.split('\r\n')[0:3] == [RESP_COD[100][0:-2],
RESP_COD[180][0:-2], RESP_COD[200]]:
text = add_header(recv)
print(text)
self.socket.sendto(bytes(text, 'utf-8'), self.client_address)
try:
if recv.split()[1] and recv.split()[1] == "480":
text = add_header(recv)
self.socket.sendto(bytes(text, 'utf-8'), self.client_address)
except IndexError:
pass
de
|
f ack(self, data):
"""Metodo ACK."""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
d
|
est = data.split()[1][4:]
(ip_port) = (self.user_data[dest]['addr'],
int(self.user_data[dest]['port']))
sock.connect(ip_port)
text = add_header(data)
sock.send(bytes(text, 'utf-8'))
try:
recv = sock.recv(1024).decode('utf-8')
print(recv)
except socket.timeout:
pass
def bye(self, data):
"""Metodo BYE."""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
dest = data.split()[1][4:]
(ip_port) = (self.user_data[dest]['addr'],
int(self.user_data[dest]['port']))
sock.connect(ip_port)
text = add_header(data)
sock.send(bytes(text, 'utf-8'))
recv = sock.recv(1024).decode('utf-8')
except (ConnectionRefusedError, KeyError):
recv = ""
self.wfile.write(bytes(RESP_COD[404], 'utf-8'))
if recv == (RESP_COD[200] + "\r\n\r\n"):
text = add_header(recv)
self.socket.sendto(bytes(text, 'utf-8'), self.client_address)
def handle(self):
"""Cada vez que un cliente envia una peticion se ejecuta."""
data = self.request[0].decode('utf-8')
c_addr = (self.client_address[0], str(self.client_address[1]))
obj_log.log_write("recv", c_addr, data)
unallow = ["CANCEL", "OPTIONS", "SUSCRIBE", "NOTIFY", "PUBLISH",
"INFO", "PRACK", "REFER", "MESSAGE", "UPDATE"]
print(data)
met = data.split()[0]
self.json2registered()
str_now = strftime('%Y-%m-%d %H:%M:%S', gmtime(int(time())))
self.delete_users(str_now)
if met == "REGISTER":
self.register(data)
elif met == "INVITE":
self.invite(data)
elif met == "ACK":
self.ack(data)
elif met == "BYE":
self.bye(data)
elif met in unallow:
to_send = "SIP/2.0 405 Method Not Allowed\r\n\r\n"
obj_log.log_write
|
fisle/django-haystack
|
test_haystack/simple_tests/test_simple_backend.py
|
Python
|
bsd-3-clause
| 7,001
| 0.002716
|
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import date
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from haystack import connection_router, connections, indexes
from haystack.query import SearchQuerySet
from haystack.utils.loading import UnifiedIndex
from ..core.models import MockModel, ScoreMockModel
from ..mocks import MockSearchResult
from .search_indexes import SimpleMockScoreIndex, SimpleMockSearchIndex
class SimpleSearchBackendTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SimpleSearchBackendTestCase, self).setUp()
self.backend = connections['simple'].get_backend()
ui = connections['simple'].get_unified_index()
self.index = SimpleMockSearchIndex()
ui.build(indexes=[self.index, SimpleMockScoreIndex()])
self.sample_objs = MockModel.objects.all()
def test_update(self):
self.backend.update(self.index, self.sample_objs)
def test_remove(self):
self.backend.remove(self.sample_objs[0])
def test_clear(self):
self.backend.clear()
def test_search(self):
# No query string should always yield zero results.
self.assertEqual(self.backend.search(u''), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'*')['hits'], 24)
self.assertEqual(sorted([result.pk for result in self.backend.search(u'*')['results']]), [1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'daniel')['hits'], 23)
self.assertEqual([result.pk for result in self.backend.search(u'daniel')['results']], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'should be a string')['hits'], 1)
self.assertEqual([result.pk for result in self.backend.search(u'should be a string')['results']], [8])
# Ensure the results are ``SearchResult`` instances...
self.assertEqual(self.backend.search(u'should be a string')['results'][0].score, 0)
self.assertEqual(self.backend.search(u'index document')['hits'], 6)
self.assertEqual([result.pk for result in self.backend.search(u'index document')['results']], [2, 3, 15, 16, 17, 18])
# Regression-ville
self.assertEqual([result.object.id for result in self.backend.search(u'index document')['
|
results']], [2, 3, 15, 16, 17, 18])
self.assertEqual(self.backend.search(u'index document')['results'][0].model, MockModel)
# No support for spelling suggestions
self.assertEqual(self.backend.search(u'Indx')['hits'], 0)
self.assertFalse(self.backend.search(u'Indx').get('spelling_suggestion'))
# No support for facets
self.as
|
sertEqual(self.backend.search(u'', facets=['name']), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', facets=['name'])['hits'], 23)
self.assertEqual(self.backend.search(u'', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}})['hits'], 23)
self.assertEqual(self.backend.search(u'', query_facets={'name': '[* TO e]'}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', query_facets={'name': '[* TO e]'})['hits'], 23)
self.assertFalse(self.backend.search(u'').get('facets'))
self.assertFalse(self.backend.search(u'daniel').get('facets'))
# Note that only textual-fields are supported.
self.assertEqual(self.backend.search(u'2009-06-18')['hits'], 0)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.backend.search(u'index document', result_class=MockSearchResult)['results'][0], MockSearchResult))
def test_filter_models(self):
self.backend.update(self.index, self.sample_objs)
self.assertEqual(self.backend.search(u'*', models=set([]))['hits'], 24)
self.assertEqual(self.backend.search(u'*', models=set([MockModel]))['hits'], 23)
def test_more_like_this(self):
self.backend.update(self.index, self.sample_objs)
self.assertEqual(self.backend.search(u'*')['hits'], 24)
# Unsupported by 'simple'. Should see empty results.
self.assertEqual(self.backend.more_like_this(self.sample_objs[0])['hits'], 0)
def test_score_field_collision(self):
index = connections['simple'].get_unified_index().get_index(ScoreMockModel)
sample_objs = ScoreMockModel.objects.all()
self.backend.update(index, self.sample_objs)
# 42 is the in the match, which will be removed from the result
self.assertEqual(self.backend.search(u'42')['results'][0].score, 0)
@override_settings(DEBUG=True)
class LiveSimpleSearchQuerySetTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveSimpleSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_ui = connections['simple'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SimpleMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['simple']._index = self.ui
self.sample_objs = MockModel.objects.all()
self.sqs = SearchQuerySet(using='simple')
def tearDown(self):
# Restore.
connections['simple']._index = self.old_ui
super(LiveSimpleSearchQuerySetTestCase, self).tearDown()
def test_general_queries(self):
# For now, just make sure these don't throw an exception.
# They won't work until the simple backend is improved.
self.assertTrue(len(self.sqs.auto_query('daniel')) > 0)
self.assertTrue(len(self.sqs.filter(text='index')) > 0)
self.assertTrue(len(self.sqs.exclude(name='daniel')) > 0)
self.assertTrue(len(self.sqs.order_by('-pub_date')) > 0)
def test_general_queries_unicode(self):
self.assertEqual(len(self.sqs.auto_query(u'Привет')), 0)
def test_more_like_this(self):
# MLT shouldn't be horribly broken. This used to throw an exception.
mm1 = MockModel.objects.get(pk=1)
self.assertEqual(len(self.sqs.filter(text=1).more_like_this(mm1)), 0)
def test_values_queries(self):
sqs = self.sqs.auto_query('daniel')
self.assertTrue(len(sqs) > 0)
flat_scores = sqs.values_list("score", flat=True)
self.assertEqual(flat_scores[0], 0)
scores = sqs.values_list("id", "score")
self.assertEqual(scores[0], [1, 0])
scores_dict = sqs.values("id", "score")
self.assertEqual(scores_dict[0], {"id": 1, "score": 0})
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
|
unittests/Applications/test_CommonPlotLines.py
|
Python
|
gpl-2.0
| 112
| 0.008929
|
import uni
|
ttest
from PyFoam.Applications.CommonPlotLines import CommonPlotLines
theSuite=un
|
ittest.TestSuite()
|
zibneuro/brainvispy
|
IO/vtkio.py
|
Python
|
bsd-3-clause
| 1,198
| 0.010017
|
import vtk
import os
import os.path
from vis.vtkpoly import VtkPolyModel
from vis.vtkvol import VtkVolumeModel
from IO.obj import OBJReader
class VtkIO:
def __get_reader(self, file_extension):
'''Returns a reader that can read the file type having the provided extension. Returns None if no such reader.'''
lower_file_ext = file_extension.lower()
#if (lower_file_ext == ".tiff" or lower_file_ext == ".tif"):
# return vtk.vtkTIFFReader()
if (lower_file_ext == ".vtk"):
return vtk.vtkPolyDataReader()
if (lower_file_ext == ".pl
|
y"):
return vtk.vtkPLYReader()
if (lower_file_ext == ".obj"):
return OBJReader()
return None
def load(self, file_name):
"""Loads the data from the file 'file_name' and returns it. Returns None if the file type is not supported."""
# Make sure the file exists
if not file_name or not os.path.isfile(file_name):
|
return None
# Get the right data reader depending on the file extension
data_reader = self.__get_reader(os.path.splitext(file_name)[1])
if not data_reader:
return None
data_reader.SetFileName(file_name)
data_reader.Update()
return data_reader.GetOutput()
|
sputnick-dev/weboob
|
modules/bp/pages/pro.py
|
Python
|
agpl-3.0
| 3,390
| 0.003542
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
from dateutil.rel
|
ativedelta import relativedelta
from decimal import Decimal
from weboob.deprecated.browser import Page
from weboob.deprecated.browser.parsers.csvparser import CsvParser
from weboob.capabilities.bank import Account, AccountNotFound
from .accounthistory import Transaction, AccountHistory
class RedirectPage(Page):
pass
class HistoryParser(CsvParser):
FMTPARAMS = {'delimiter': ';'}
class ProAccountsLis
|
t(Page):
ACCOUNT_TYPES = {u'Comptes épargne': Account.TYPE_SAVINGS,
u'Comptes courants': Account.TYPE_CHECKING,
}
def get_accounts_list(self):
for table in self.document.xpath('//div[@class="comptestabl"]/table'):
try:
account_type = self.ACCOUNT_TYPES[table.xpath('./caption/text()')[0].strip()]
except (IndexError,KeyError):
account_type = Account.TYPE_UNKNOWN
for tr in table.xpath('./tbody/tr'):
cols = tr.findall('td')
link = cols[0].find('a')
if link is None:
continue
a = Account()
a.type = account_type
a.id, a.label = map(unicode, link.attrib['title'].split(' ', 1))
tmp_balance = self.parser.tocleanstring(cols[1])
a.currency = a.get_currency(tmp_balance)
a.balance = Decimal(Transaction.clean_amount(tmp_balance))
a._card_links = []
a._link_id = link.attrib['href']
yield a
def get_account(self, id):
for account in self.get_accounts_list():
if account.id == id:
return account
raise AccountNotFound('Unable to find account: %s' % id)
class ProAccountHistory(Page):
def on_loaded(self):
link = self.document.xpath('//a[contains(@href, "telechargercomptes.ea")]/@href')[0]
self.browser.location(link)
class ProAccountHistoryDownload(Page):
def on_loaded(self):
self.browser.select_form(name='telechargement')
self.browser['dateDebutPeriode'] = (datetime.date.today() - relativedelta(months=11)).strftime('%d/%m/%Y')
self.browser.submit()
class ProAccountHistoryCSV(AccountHistory):
def get_next_link(self):
return False
def get_history(self, deferred=False):
for line in self.document.rows:
if len(line) < 4 or line[0] == 'Date':
continue
t = Transaction()
t.parse(raw=line[1], date=line[0])
t.set_amount(line[2])
t._coming = False
yield t
|
trevor/calendarserver
|
txdav/__init__.py
|
Python
|
apache-2.0
| 862
| 0
|
# -*- test-case-name: txdav -*-
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache
|
.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Calendar & contacts data store.
"""
#
# FIXME: 'txdav' should be renamed to 'datastore' and
|
shoul not be
# WebDAV-specific.
#
# (That means txdav.xml should be pulled out, for example, as that is
# WebDAV-specific.)
#
|
ikegami-yukino/shellinford-python
|
shellinford/__init__.py
|
Python
|
bsd-3-clause
| 201
| 0
|
from .
|
import shellinford
VERSION = (0, 4, 1)
__version__ = '0.4.1'
__all__ = ['FMIndex', 'bit_vector', 'bwt']
FMIndex = shellinford.FMIndex
bit_vector = shellinford.bit_vector
bwt = sh
|
ellinford.bwt
|
labase/activnce
|
main/utils/0_14_0207convertlogformat.py
|
Python
|
gpl-2.0
| 2,392
| 0.013854
|
# -*- coding: utf-8 -*-
"""
################################################
Plataforma ActivUFRJ
################################################
:Author: *Núcleo de Computação Eletrônica (NCE/UFRJ)*
:Contact: carlo@nce.ufrj.br
:Date: $Date: 2009-2010 $
:Status: This is a "work in progress"
:Revision: $Revision: 0.01 $
:Home: `LABASE `__
:Copyright: ©2009, `GPL
"""
|
from couchdb import Server
from uuid import uuid4
_DOCBASES = ['log', 'log2']
_EMPTYLOG = lambda: dict(
sujeito = "",
verbo = "",
obj
|
eto = "",
tipo = "",
link = "",
news = "True", # todos os documentos do log velho que não tiverem o campo news
# serão copiados para o log novo com news="True"
# este valor será armazenado sempre como string
data_inclusao = ""
)
class Activ(Server):
"Active database"
log = {}
log2 = {}
def __init__(self, url):
Server.__init__(self, url)
act = self
test_and_create = lambda doc: doc in act and act[doc] or act.create(doc)
for attribute in _DOCBASES:
setattr(Activ, attribute, test_and_create(attribute))
def erase_database(self):
'erase tables'
for table in _DOCBASES:
try:
del self[table]
except:
pass
__ACTIV = Activ('http://127.0.0.1:5984/')
LOG = __ACTIV.log
LOG2 = __ACTIV.log2
def main():
print u"iniciando conversão"
for user_id in LOG:
if "_design" not in user_id:
#print "-------------------"
print user_id
log_data = dict()
log_data.update(LOG[user_id])
for item in log_data["registros"]:
log_new = _EMPTYLOG()
log_new.update(item)
if log_new["news"] is True:
log_new["news"] = "True"
if log_new["news"] is False:
log_new["news"] = "False"
#print log_new
id = uuid4().hex
LOG2[id] = log_new
print u"conversão finalizada."
if __name__ == "__main__":
main()
|
overxfl0w/Grampus-Forensic-Utils
|
Finger-FootPrinting/GrampusHTTP/fingerghttp.py
|
Python
|
gpl-2.0
| 1,829
| 0.053581
|
import sys
sys.path.append("../../Crawlers/Bing/WAPI/")
from crawlerbing import crawlerbing
sys.path.append("../../Crawlers/Google/")
from crawlergoogle import crawlergoogle
class fingergrampushttp:
def __init__(self,key,header):
self.key = key
self.header = header
self.__selectUrls()
def __searchUrls(self):
allUrls = {}
allUrls.update(crawlergoogle(self.key,"",1)._returnUrls())
allUrls.update(crawlerbing(self.key)._returnUrls())
endUrls = {}
for key in allUrls:
endUrls[allUrls[key]] = ""
return endUrls
def __getHeaders(self,url):
self.socketClient = socket.socket()
try:
#(socket.gethostbyname(self.__replacedUrl(url))
self.socketClient.connect((self.__replacedUrl(url),80))
self.socketClient.send("HEAD / HTTP/1.0\r\n\r\n")
data = self.socketClient.recv(1024)
return data
except:
return None
def __getOptions(self,url):
self.socketClient = socket.socket()
try:
self.socketClient.connect((self.__replacedUrl(url),80))
self.socketClient.send("OPTIONS / HTTP/1.0\r\n\r\n")
data = self.socketClient.recv(1024)
indexAllow = data.find("Allow")
data =
|
data[indexAllow:data.find("\r\n",indexAllow)]
return data
except:
return ""
def __replacedUrl(self,url)
|
:
url = url.replace("http://","")
url = url[:url.find('/')]
return url
def __selectUrls(self):
self.allUrls = self.__searchUrls()
self.selectedUrls = {}
for key in self.allUrls:
try:
self.allUrls[key] = self.__getHeaders(key)
self.allUrls[key] += self.__getOptions(key)
if self.header in self.allUrls[key]:
self.selectedUrls[key] = self.allUrls[key]
except:
continue
def _returnSelected(self):
return self.selectedUrls
def _returnAll(self):
return self.allUrls
#print fingergrampushttp("futbol","IIS")._returnAll()
|
davidnmurray/iris
|
docs/iris/example_tests/test_polar_stereo.py
|
Python
|
gpl-3.0
| 1,351
| 0
|
# (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in
|
the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See th
|
e
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import Iris tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from . import extest_util
with extest_util.add_examples_to_path():
import polar_stereo
@tests.skip_grib
class TestPolarStereo(tests.GraphicsTest):
"""Test the polar_stereo example code."""
def test_polar_stereo(self):
with extest_util.show_replaced_by_check_graphic(self):
polar_stereo.main()
if __name__ == '__main__':
tests.main()
|
nguyenngochuy91/Ancestral-Blocks-Reconstruction
|
create_operon_tree.py
|
Python
|
gpl-3.0
| 9,054
| 0.01712
|
#!/usr/bin/env python
''' Author : Huy Nguyen
Program : Create operon tree
Start : 01/01/2018
End :
'''
from Bio import SeqIO
import argparse
import os
import homolog4
import shutil
from ete3 import Tree
## Traverses the genome information directory
def traverseAll(path):
res=[]
for root,dirs,files in os.walk(path):
for f in files:
res.append(root+"/"+f)
return res
### parsing the argument from user input
def parser_code():
parser = argparse.ArgumentParser()
parser.add_argument("--genomes_directory","-g", help="The directory that store all the genomes file (E_Coli/genomes)")
parser.add_argument("--gene_blocks","-b", help="The gene_block_names_and_genes.txt file, this file stores the operon name and its set of genes")
parser.add_argument("--reference","-r", help="The ncbi accession number for the reference genome (NC_000913 for E_Coli and NC_000964 for B_Sub)")
parser.add_argument("--filter","-f", help="The filter file for creating the tree (E_Coli/phylo_order.txt for E_Coli or B_Sub/phylo_order.txt for B-Sub)")
parser.add_argument("--output","-o", help="Output directory to store the result",default = "result")
return parser.parse_args()
### Given the optimized gene block data, and the genes in each species, provide a dictionary
### that has key as an operon, value is a dictionary where key is the species, and key is the gene sequence, start,stop and strand
def generate_operon(optimized_gene_block,db):
db_dict = {}
operon_dict ={}
db_list = [i for i in traverseAll(db) if i.split('/')[-1].split('.')[-1] == 'ffc']
for organism in db_list:
accession_num = organism.split('/')[-1].split('.')[0]
for record in SeqIO.parse(organism,"fasta"):
info = record.id.split('|')
name = '_'.join(info[1].split('_')[:2])
together = name+"_"+accession_num
start = int(info[4])
stop = int(info[5])
strand = int(info[6])
if together not in db_dict:
db_dict[together] = {}
db_dict[together][(start,stop,strand)] = record.seq.tostring()
operon_list = traverseAll(optimized_gene_block)
for operon in operon_list:
operon_name = operon.split("/")[-1].split(".")[0]
operon_dict[operon_name]= {}
handle = open(operon,"r")
for line in handle.readlines():
h = homolog4.Homolog.from_blast(line)
name = '_'.join(h.organism().split('_')[:2])
accession_num = h.accession().split(".")[0]
together = name+"_"+accession_num
start = h.start()
stop = h.stop()
strand = h.strand()
seq = db_dict[together][(start,stop,strand)]
if together not in operon_dict[operon_name]:
operon_dict[operon_name][together] = [(seq,start,stop,strand)]
else:
operon_dict[operon_name][together].append((seq,start,stop,strand))
return operon_dict
### given the gene position, determine whether there is a gene block, and then concatenate
### and assign split as a string of 500 letter "N"
def concatenate(potential):
d = {}
for gene in potential:
seq = gene[0]
start = gene[1]
stop = gene[2]
strand = gene[3]
if strand in d:
d[strand].append((start,stop,seq))
else:
d[strand] = [(start,stop,seq)]
has_block = False # flag to check whether there is a block
wholeString = ""
for strand in d:
d[strand].sort()
for strand in d:
substring = d[strand][0][2] # always store the first gene sequence
for i in range(len(d[strand])-1):
gene1 = d[strand][i]
gene2 = d[strand][i+1]
if abs(gene2[0]-gene1[1])>500:
substring+="N"*500
else:
has_block = True
substring+=gene2[2]
wholeString+= substring
if has_block:
return wholeString[:-1]+"\n"
else:
return ""
### given the operon dict above, generate fasta file to do alignment
def generate_fasta(operon_dict,fasta):
for operon in operon_dict:
outfile = open(fasta+operon,'w')
for species in operon_dict[operon]:
potential = operon_dict[operon][species]
combine = concatenate(potential)
if combine:
outfile.write(">"+species+"\n")
outfile.write(combine)
outfile.close()
return None
### given the fasta, generate a multiple alignment and tree
def generate_tree(fasta,tree):
files = traverseAll(fasta)
## using muscle
# make the alignment file
for file in files:
name = file.split('/')[-1]
|
tree_name = tree+name+"/"
try:
os.mkdir(tree_name)
except:
print (tree_name+" directory already created")
temp_align = tree_name +name+ '.aln'
cm1 ="muscle -in "+file+ " -out "+temp_align
os.system(cm1)
#make the tree using clustal
cm2 ="clustalw -infile="+temp_align+" -tree=1"
# have to wait for few second for the aln file actually comes out lol
os.system(cm2)
temp_tree = tree_name + name+
|
'.ph' # that's what this file gets named by default, and i'm sick of looking for the cmd line arg to fix.
print(temp_tree)
print("modifying")
#modify for negative branch
modify_tree = tree_name + name+ '.new'
cm3 = "sed -e 's,:-[0-9\.]\+,:0.0,g' "+temp_tree+" > "+modify_tree
os.system(cm3)
os.remove(temp_tree)
# dealing with negative branch length
#print "marker_fasta",marker_fasta
#print "temp_tree", temp_tree
# move the created tree file to the location i say its going
if __name__ == '__main__':
args = parser_code()
reference = args.reference
genomes_directory = args.genomes_directory
reference = args.reference
filter_file = args.filter
gene_block_names_and_genes = args.gene_blocks
outdir = args.output
# check if we are going to output results in the current directory
dirs = genomes_directory.split('/')
outdir+='/'
try:
os.mkdir(outdir+'/')
except:
print ("output directory has already been created")
if len(dirs)>=3: # means that we have to go to subdirectory
parent_dir = outdir+dirs[0]+"/"
else:
parent_dir = outdir
##########################################################################
# finding gene blocks
db = parent_dir+ 'db'
gene_block_names_and_genes = dirs[0]+"/"+'gene_block_names_and_genes.txt'
gene_block_query = parent_dir +'gene_block_query.fa'
blast_result = parent_dir+'blast_result/'
blast_parse = parent_dir+'blast_parse/'
optimized_gene_block = parent_dir+'optimized_gene_block/'
tree = parent_dir +"operon_tree/"
try:
os.mkdir(tree)
except:
print ("directory tree has already been created")
# ### format a database for faster blasting. output in db
# cmd1 ='./format_db.py -i {} -o {}'.format(genomes_directory,db)
# os.system(cmd1)
# print ('cmd1:',cmd1)
#
# ### Given the gene_block_names_and_genes.txt, create a gene_block_query.fa using the reference gene bank file. output in file gene_block_query.fa
# cmd2 ='./make_operon_query.py -i {} -b {} -r {} -o {}'.format(genomes_directory,gene_block_names_and_genes,reference,gene_block_query)
# os.system(cmd2)
# print ('cmd2:',cmd2)
#
# ### blasting using db vs the gene_block_query.fa above. output in blast_result
# cmd3 ='./blast_script.py -u {} -d {} -o {}'.format(gene_block_query,db,blast_result)
# os.system(cmd3)
# print ('cmd3:',cmd3)
#
# ### parsing the blast result directory into files that group by operon names, output in blast_parse
# cmd4 ='./bl
|
VIVEKLUCKY1848/gedit-plugins-1
|
plugins/git/git/workerthread.py
|
Python
|
gpl-2.0
| 4,422
| 0.000226
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 - Garrett Regier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A P
|
ARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA.
from gi.repository import GLib
import abc
import collections
import queue
import threadin
|
g
import traceback
from .debug import debug
class WorkerThread(threading.Thread):
__metaclass__ = abc.ABCMeta
__sentinel = object()
def __init__(self, callback, chunk_size=1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__callback = callback
self.__chunk_size = chunk_size
self.__quit = threading.Event()
self.__has_idle = threading.Event()
self.__tasks = queue.Queue()
self.__results = collections.deque()
@abc.abstractmethod
def handle_task(self, *args, **kwargs):
raise NotImplementedError
# TODO: add, put, push?
def push(self, *args, **kwargs):
self.__tasks.put((args, kwargs))
def __close(self, process_results):
self.__quit.set()
# Prevent the queue.get() from blocking forever
self.__tasks.put(self.__sentinel)
super().join()
if not process_results:
self.__results.clear()
else:
while self.__in_idle() is GLib.SOURCE_CONTINUE:
pass
def terminate(self):
self.__close(False)
def join(self):
self.__close(True)
def clear(self):
old_tasks = self.__tasks
self.__tasks = queue.Queue(1)
# Prevent the queue.get() from blocking forever
old_tasks.put(self.__sentinel)
# Block until the old queue has finished, otherwise
# a old result could be added to the new results queue
self.__tasks.put(self.__sentinel)
self.__tasks.put(self.__sentinel)
old_tasks = self.__tasks
self.__tasks = queue.Queue()
# Switch to the new queue
old_tasks.put(self.__sentinel)
# Finally, we can now create a new deque without
# the possibility of any old results being added to it
self.__results.clear()
def run(self):
while not self.__quit.is_set():
task = self.__tasks.get()
if task is self.__sentinel:
continue
args, kwargs = task
try:
result = self.handle_task(*args, **kwargs)
except Exception:
traceback.print_exc()
continue
self.__results.append(result)
# Avoid having an idle for every result
if not self.__has_idle.is_set():
self.__has_idle.set()
debug('%s<%s>: result callback idle started' %
(type(self).__name__, self.name))
GLib.source_set_name_by_id(GLib.idle_add(self.__in_idle),
'[gedit] git %s result callback idle' %
(type(self).__name__,))
def __in_idle(self):
try:
for i in range(self.__chunk_size):
result = self.__results.popleft()
try:
self.__callback(result)
except Exception:
traceback.print_exc()
except IndexError:
# Must be cleared before we check the results length
self.__has_idle.clear()
# Only remove the idle when there are no more items,
# some could have been added after the IndexError was raised
if len(self.__results) == 0:
debug('%s<%s>: result callback idle finished' %
(type(self).__name__, self.name))
return GLib.SOURCE_REMOVE
return GLib.SOURCE_CONTINUE
# ex:ts=4:et:
|
levlaz/circleci.py
|
tests/circle/test_error.py
|
Python
|
mit
| 951
| 0.001052
|
# pylint: disable-all
import unittest
from circleci.err
|
or import CircleCIException, BadKeyError, BadVerbError, InvalidFilterError
class TestCircleCIError(unittest.TestCase):
def setUp(self):
self.base = Circle
|
CIException('fake')
self.key = BadKeyError('fake')
self.verb = BadVerbError('fake')
self.filter = InvalidFilterError('fake', 'status')
self.afilter = InvalidFilterError('fake', 'artifacts')
def test_error_implements_str(self):
self.assertTrue(self.base.__str__ is not object.__str__)
string = self.base.__str__()
self.assertIn('invalid', string)
def test_verb_message(self):
self.assertIn('DELETE', self.verb.message)
def test_key_message(self):
self.assertIn('deploy-key', self.key.message)
def test_filter_message(self):
self.assertIn('running', self.filter.message)
self.assertIn('completed', self.afilter.message)
|
djaodjin/djaodjin-deployutils
|
deployutils/__init__.py
|
Python
|
bsd-2-clause
| 1,370
| 0
|
# Copyright (c) 2021, DjaoDjin Inc.
# All rights reserved.
#
# Redistri
|
bution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following di
|
sclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = '0.6.4-dev'
|
yephper/django
|
django/contrib/postgres/fields/jsonb.py
|
Python
|
bsd-3-clause
| 3,093
| 0
|
import json
from psycopg2.extras import Json
from django.contrib.postgres import forms, lookups
from django.core import exceptions
from django.db.models import Field, Transform
from django.utils.translation import ugettext_lazy as _
__all__ = ['JSONField']
class JSONField(Field):
empty_strings_allowed = False
description = _('A JSON object')
default_error_messages = {
'invalid': _("Value must be valid JSON."),
}
def db_type(self, connection):
return 'jsonb'
def get_transform(self, name):
transform = super(JSONField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def get_prep_value(self, value):
if value is not None:
return Json(value)
return value
def get_prep_lookup(self, lookup_type, value):
if lookup_type in ('has_key', 'has_keys', 'has_any_keys'):
return value
if isinstance(value, (dict, list)):
return Json(value)
retu
|
rn super(JSONField, self).get_prep_lookup(lookup_type, value)
def validate(self, value, model_instance):
super(JSONField, self).validate(value, model_instance)
try:
json.dumps(value)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
cod
|
e='invalid',
params={'value': value},
)
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.JSONField}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
JSONField.register_lookup(lookups.DataContains)
JSONField.register_lookup(lookups.ContainedBy)
JSONField.register_lookup(lookups.HasKey)
JSONField.register_lookup(lookups.HasKeys)
JSONField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if len(key_transforms) > 1:
return "{} #> %s".format(lhs), [key_transforms] + params
try:
int(self.key_name)
except ValueError:
lookup = "'%s'" % self.key_name
else:
lookup = "%s" % self.key_name
return "%s -> %s" % (lhs, lookup), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
|
Rustem/toptal-blog-celery-toy-ex
|
celery_uncovered/toyex/models.py
|
Python
|
mit
| 353
| 0
|
class Repository(object):
def __init__(self, obj):
|
self._wrapped_obj = obj
self.language = obj[u'language'] or u'unknown'
self.name = obj[u'full_name']
def __getattr__(self, attr):
if attr in self.__dict__:
retu
|
rn getattr(self, attr)
else:
return getattr(self._wrapped_obj, attr)
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/services/webapp/doc/__init__.py
|
Python
|
agpl-3.0
| 186
| 0.005376
|
# Copyright 2009 Canonical Ltd. This software is lic
|
ensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
|
# Make this directory into a Python package.
|
crisisking/udbraaains
|
brains/mapping/views.py
|
Python
|
bsd-3-clause
| 3,478
| 0
|
import cPickle as pickle
import datetime
import json
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import redis
from mapping.tasks import process_data
CONN = redis.Redis(host=settings.BROKER_HOST, port=settings.BROKER_PORT, db=6)
def process_annotation_timestamp(annotation, name_in, age_name, time):
if name_in in annotation:
annotation[name_in] = pickle.loads(str(annotation[name_in]))
try:
annotation[age_name] = unicode(time - annotation[name_in])
except TypeError:
annotation[age_name] = None
del annotat
|
ion[name_in]
else:
annotation[age_name] = None
@csrf_exempt
def receive_data(request):
if request.method == 'POST' and 'data' in request.POST:
data = json.loads(request.POST['data'])
# Grab the player's position and IP, process data in background
origin_x = data['surroundings']['position']['coords']['x']
ori
|
gin_y = data['surroundings']['position']['coords']['y']
if origin_x < 0 or origin_x > 99 or origin_y < 0 or origin_y > 99:
return HttpResponse('STOP IT', status=400)
if 'HTTP_X_REAL_IP' in request.META:
ip = request.META['HTTP_X_REAL_IP']
else:
ip = request.META['REMOTE_ADDR']
process_data.delay(data, ip)
payload = {}
payload['annotation'] = []
payload['trees'] = [json.loads(x) for x in CONN.smembers('trees')]
# Grab all locations in a 15x15 square,
# centered on the player's position
x_range = (range(origin_x + 1, origin_x + 8) +
range(origin_x, origin_x - 8, -1))
y_range = (range(origin_y + 1, origin_y + 8) +
range(origin_y, origin_y - 8, -1))
for x in x_range:
for y in y_range:
annotation = CONN.get('location:{0}:{1}'.format(x, y))
if annotation:
now = datetime.datetime.now()
annotation = json.loads(annotation)
process_annotation_timestamp(annotation, 'report_date',
'report_age', now)
process_annotation_timestamp(annotation,
'inside_report_date',
'inside_age', now)
process_annotation_timestamp(annotation,
'outside_report_date',
'outside_age', now)
payload['annotation'].append(annotation)
return HttpResponse(json.dumps(payload),
content_type='application/json',
status=200)
return HttpResponse(status=405)
def map_data(request):
data = []
for key in CONN.keys('location:*'):
annotation = json.loads(CONN[key])
try:
report_date = pickle.loads(annotation['report_date'])
now = datetime.datetime.now()
annotation['report_age'] = unicode(now - report_date)
except TypeError:
annotation['report_age'] = None
del annotation['report_date']
data.append(annotation)
return HttpResponse(json.dumps(data),
content_type='application/json',
status=200)
|
JoltLabs/django-web-sugar
|
web_sugar/templatetags/jslint.py
|
Python
|
bsd-3-clause
| 2,355
| 0.000849
|
import os.path
import subprocess
from django import template
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.templatetags.static import StaticNode
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import staticfiles_storage
from web_sugar.jslint import JSLintError
register = template.Library()
# Jslint path
_BASE_DIR = os.path.dirname(os.path.dirname(__file__))
JSLINT_PATH = os.path.join(_BASE_DIR, 'jslint', 'bin', 'jslint.js')
_file_times = {}
def _run_jslint(path, extra_args):
#
# We just send it to jslint, capture it's return value, stdout and stderr.
#
global _file_times
old_mtime = _file_times.get(path, None)
new_mtime = os.path.getmtime(path)
if old_mtime and old_mtime >= new_mtime and getattr(settings, 'JSLINT_CACHE_ENABLED', True):
return
_file_times[path] = new_mtime
node_path = getattr(settings, 'JSLINT_NODE_PATH', 'node')
cmd = [node_path, JSLINT_PATH
|
] + [path] + getattr(settings, 'JSLINT_ARGS', [])
if ext
|
ra_args:
cmd.append(extra_args)
proc = subprocess.Popen(' '.join(cmd), shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
proc.wait()
if stderr:
raise ImproperlyConfigured(stderr + '\n' + stdout)
elif stdout and not stdout.rstrip().endswith('is OK.'):
raise JSLintError(stdout)
class JSLintNode(StaticNode):
def url(self, context):
path = self.path.resolve(context)
extra_args = context.get('jslint_extra_args')
if getattr(settings, 'JSLINT_ENABLED', False):
found = finders.find(path)
if not found:
raise JSLintError("File not found: " + path)
_run_jslint(found, extra_args)
return staticfiles_storage.url(path)
@register.tag('jslint')
def do_jslint(parser, token):
"""
A template tag that wraps the 'static' template
tag but also runs the path through jslint.
Usage::
{% jslint path [as varname] %}
Examples::
{% jslint "myapp/js/base.js" %}
{% jslint variable_with_path %}
{% static variable_with_path as varname %}
"""
return JSLintNode.handle_token(parser, token)
|
agilemobiledev/mongo-orchestration
|
setup.py
|
Python
|
apache-2.0
| 2,438
| 0
|
#!/usr/bin/python
import os
import sys
extra_opts = {'test_suite': 'tests'}
extra_deps = []
extra_test_deps = []
if sys.version_info[:2] == (2, 6):
extra_deps.append('argparse')
extra_deps.append('simplejson')
extra_test_deps.append('unittest2')
extra_opts['test_suite'] = 'unittest2.collector'
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
try:
with open('README.r
|
st', 'r') as fd:
extra_opts['long_description'] = fd.read()
except IOError:
pass # Install without README.rst
setup(
|
name='mongo-orchestration',
version='0.4.dev0',
author='MongoDB, Inc.',
author_email='mongodb-user@googlegroups.com',
description='Restful service for managing MongoDB servers',
keywords=['mongo-orchestration', 'mongodb', 'mongo', 'rest', 'testing'],
license="http://www.apache.org/licenses/LICENSE-2.0.html",
platforms=['any'],
url='https://github.com/10gen/mongo-orchestration',
install_requires=['pymongo>=3.0.2',
'bottle>=0.12.7',
'CherryPy>=3.5.0'] + extra_deps,
tests_require=['coverage>=3.5'] + extra_test_deps,
packages=find_packages(exclude=('tests',)),
package_data={
'mongo_orchestration': [
os.path.join('configurations', config_dir, '*.json')
for config_dir in ('servers', 'replica_sets', 'sharded_clusters')
] + [os.path.join('lib', 'client.pem')]
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython"
],
entry_points={
'console_scripts': [
'mongo-orchestration = mongo_orchestration.server:main'
]
},
**extra_opts
)
|
CanalTP/flask-restful
|
tests/test_api.py
|
Python
|
bsd-3-clause
| 27,561
| 0.00381
|
import unittest
from flask import Flask, views
from flask.signals import got_request_exception, signals_available
try:
from mock import Mock, patch
except:
# python3
from unittest.mock import Mock, patch
import flask
import werkzeug
from flask.ext.restful.utils import http_status_message, challenge, unauthorized, error_data, unpack
import flask_restful
import flask_restful.fields
from flask_restful import OrderedDict
from json import dumps, loads
#noinspection PyUnresolvedReferences
from nose.tools import assert_equals, assert_true # you need it for tests in form of continuations
import six
def check_unpack(expected, value):
assert_equals(expected, value)
def test_unpack():
yield check_unpack, ("hey", 200, {}), unpack("hey")
yield check_unpack, (("hey",), 200, {}), unpack(("hey",))
yield check_unpack, ("hey", 201, {}), unpack(("hey", 201))
yield check_unpack, ("hey", 201, "foo"), unpack(("hey", 201, "foo"))
yield check_unpack, (["hey", 201], 200, {}), unpack(["hey", 201])
# Add a dummy Resource to verify that the app is properly set.
class HelloWorld(flask_restful.Resource):
def get(self):
return {}
class APITestCase(unittest.TestCase):
def test_http_code(self):
self.assertEquals(http_status_message(200), 'OK')
self.assertEquals(http_status_message(404), 'Not Found')
def test_challenge(self):
self.assertEquals(challenge('Basic', 'Foo'), 'Basic realm="Foo"')
def test_unauthorized(self):
response = Mock()
response.headers = {}
unauthorized(response, "flask-restful")
self.assertEquals(response.headers['WWW-Authenticate'],
'Basic realm="flask-restful"')
def test_unauthorized_custom_realm(self):
response = Mock()
response.headers = {}
unauthorized(response, realm='Foo')
self.assertEquals(response.headers['WWW-Authenticate'], 'Basic realm="Foo"')
def test_handle_error_401_sends_challege_default_realm(self):
app = Flask(__name__)
api = flask_restful.Api(app)
exception = Mock()
exception.code = 401
exception.data = {'foo': 'bar'}
with app.test_request_context('/foo'):
resp = api.handle_error(exception)
self.assertEquals(resp.status_code, 401)
self.assertEquals(resp.headers['WWW-Authenticate'],
'Basic realm="flask-restful"')
def test_handle_error_401_sends_challege_configured_realm(self):
app = Flask(__name__)
app.config['HTTP_BASIC_AUTH_REALM'] = 'test-realm'
api = flask_restful.Api(app)
exception = Mock()
exception.code = 401
exception.data = {'foo': 'bar'}
with app.test_request_context('/foo'):
resp = api.handle_error(exception)
self.assertEquals(resp.status_code, 401)
self.assertEquals(resp.headers['WWW-Authenticate'],
'Basic realm="test-realm"')
def test_error_data(self):
self.assertEquals(error_data(400), {
'status': 400,
'message': 'Bad Request',
})
def test_marshal(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
marshal_dict = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal(marshal_dict, fields)
self.assertEquals(output, {'foo': 'bar'})
def test_marshal_decorator(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
@flask_restful.marshal_with(fields)
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')])
self.assertEquals(try_me(), {'foo': 'bar'})
def test_marshal_decorator_tuple(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
@flask_restful.marshal_with(fields)
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')]), 200, {'X-test': 123}
self.assertEquals(try_me(), ({'foo': 'bar'}, 200, {'X-test': 123}))
def test_marshal_field(self):
fields = OrderedDict({'foo': flask_restful.fields.Raw()})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal(marshal_fields, fields)
self.assertEquals(output, {'foo': 'bar'})
def test_marshal_tuple(self):
fields = OrderedDict({'foo': flask_restful.fields.Raw})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal((marshal_fields,), fields)
self.assertEquals(output, [{'foo': 'bar'}])
def test_marshal_nested(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested({
'fye': flask_restful.fields.String,
}))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', {'fye': 'fum'})])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', 'fum')]))])
self.assertEquals(output, expected)
def test_marshal_nested_with_non_null(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=False))
])
marshal_fields = [OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])]
output = flask_restful.marshal(marshal_fields, fields)
expected = [OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', None), ('blah', None)]))])]
self.assertEquals(output, expected)
def test_marshal_nested_with_null(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=Tr
|
ue))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])
output = flask_restful.marshal(marshal_fields, fields)
exp
|
ected = OrderedDict([('foo', 'bar'), ('fee', None)])
self.assertEquals(output, expected)
def test_marshal_nested_with_non_null_and_no_display_empty(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=False, display_null=False))
])
marshal_fields = [OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])]
output = flask_restful.marshal(marshal_fields, fields, display_null=False)
expected = [OrderedDict([('foo', 'bar'), ('fee', OrderedDict([]))])]
self.assertEquals(output, expected)
def test_marshal_nested_with_null_and_no_display_empty(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=True, display_null=False))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])
output = flask_restful.marshal(marshal_fields, fields, display_null=False)
expected = OrderedDict([('foo', 'bar')])
self.assertEquals(output, expected)
def test_allow_null_presents_data(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.
|
tornadozou/tensorflow
|
tensorflow/python/keras/applications/__init__.py
|
Python
|
apache-2.0
| 1,675
| 0
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications are canned architectures with pre-trained weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications import inception_v3
from tensorflow.python.keras.applications import mobilenet
from tensorflow.python.keras.applications import resnet50
from tensorflow.python.keras.applications import vgg16
from tensorflow.python.keras.applications import vgg19
from tensorflow.python.keras.applications import xception
from ten
|
sorflow.python.keras.applications.inception_v3 import InceptionV3
from t
|
ensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.resnet50 import ResNet50
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.vgg19 import VGG19
from tensorflow.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
|
DimensionDataCBUSydney/mist.io
|
scripts/get_rackspace_pricing.py
|
Python
|
agpl-3.0
| 3,267
| 0.010713
|
# -*- coding: utf-8 -*-
#get pricing for rackspace providers by asking rackspace.
#Outputs dicts with providers and pricing per size, per image type, suitable for mist.io's config.py
import urllib
import urllib2
import cookielib
import json
#username and password as you login in https://mycloud.rackspace.com
username = ''
password = ''
rack_auth_url = 'https://mycloud.rackspace.com/'
payload = {
'username': username,
'password': password,
'type': 'password'
}
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
data = urllib.urlencode(payload)
req = urllib2.Request(rack_auth_url, data)
urllib2.urlopen(req)
#authenticate to rackspace, to get a valid session id
rackspace_pricing_url = 'https://mycloud.rackspace.com/proxy/rax:offerings,offerings/offerings/22'
price_list = urllib2.urlopen(rackspace_pricing_url)
#then ask for the pricing list
data = price_list.read()
price_list = json.loads(data)
price_list = price_list['offering']['product']
image_mapping = {
"Windows + SQL Web": 'mswinSQLWeb',
"Windows + SQL Standard": 'mswi
|
nSQL',
"Windows -": 'mswin',
"RHEL": 'rhel',
"Linux": 'linux',
"Vyatta": 'vyatta',
}
#available da
|
tacenters/regions
#rackspace dict on pricing.json refers to first generation cloud servers and is manually
#created from http://www.rackspace.com/cloud/pricing/ First Gen...
rackspace = {
'rackspacenovalon': {},
'rackspacenovaiad': {},
'rackspacenovasyd': {},
'rackspacenovaord': {},
'rackspacenovadfw': {},
'rackspacenovahkg': {}
}
#FIXME: GBP mapping
currency_mapping = {
'USD': '$',
'GBP': 'GBP'
}
#populate our dict with prices
for prod in price_list:
description = prod['description'] # description, contains the image type as well
for image_type in image_mapping.keys():
if image_type in description:
image = image_mapping[image_type]
break
try:
for line in prod['productCharacteristics']['productCharacteristic']:
if line.get('characteristicCategory') == 'PROVISIONING':
size = line['value'] #the size value, values 2-8
for pricing in prod['priceList']['productOfferingPrice'][0]['prices']['price']:
currency = currency_mapping.get(pricing['currency'], '')
amount = currency + pricing['amount'] + "/hour"
region = 'rackspacenova' + pricing['region'].lower()
try:
rackspace[region][size][image] = amount
except:
rackspace[region][size] = {}
rackspace[region][size][image] = amount
except Exception as e:
pass
#dicts for Bandwidth/Storage that do not interest us
#formatting for easy copy/paste to mist.io/config.py
for rack_key in rackspace.keys():
print " \"%s\": {" % rack_key
for image in rackspace[rack_key].keys()[:-1]:
print " \"%s\": %s," % (image, json.dumps(rackspace[rack_key][image]))
image = rackspace[rack_key].keys()[-1]
print " \"%s\": %s" % (image, json.dumps(rackspace[rack_key][image]))
#don't use a comma for the last key, for valid JSON
print ' },\n'
|
GetBlimp/boards-backend
|
manage.py
|
Python
|
agpl-3.0
| 584
| 0
|
#!/usr/bin/env python
import os
import sys
import dotenv
dotenv.read_dotenv()
if __name__ == "__main__":
ENVIRONMENT = os.get
|
env('ENVIRONMENT')
if ENVIRONMENT == 'STAGING':
settings = 'staging'
elif ENVIRONMENT == 'PRODUCTION':
settings = 'production'
else:
settings = 'development'
|
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blimp_boards.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', settings.title())
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
dalanlan/calico-docker
|
calico_containers/calico_ctl/node.py
|
Python
|
apache-2.0
| 18,774
| 0.001438
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
calicoctl node [--ip=<IP>] [--ip6=<IP6>] [--node-image=<DOCKER_IMAGE_NAME>] [--as=<AS_NUM>] [--log-dir=<LOG_DIR>] [--detach=<DETACH>] [--kubernetes] [--libnetwork]
calicoctl node stop [--force]
calicoctl node bgp peer add <PEER_IP> as <AS_NUM>
calicoctl node bgp peer remove <PEER_IP>
calicoctl node bgp peer show [--ipv4 | --ipv6]
Description:
Configure the main calico/node container as well as default BGP information
for this node.
Options:
--force Stop the node process even if it has active endpoints.
--node-image=<DOCKER_IMAGE_NA
|
ME> Docker image to use for Calico's per-node
container. Default is calico/node:latest.
Default for Calico with
|
libnetwork is
calico/node-libnetwork:latest.
--detach=<DETACH> Set "true" to run Calico service as detached,
"false" to run in the foreground. [default: true]
--log-dir=<LOG_DIR> The directory for logs [default: /var/log/calico]
--ip=<IP> The local management address to use.
--ip6=<IP6> The local IPv6 management address to use.
--as=<AS_NUM> The default AS number for this node.
--ipv4 Show IPv4 information only.
--ipv6 Show IPv6 information only.
--kubernetes Download and install the kubernetes plugin.
--libnetwork Use the libnetwork plugin.
"""
import sys
import os
import stat
import docker
import socket
import urllib
import signal
from pycalico.datastore_datatypes import IPPool
from pycalico.datastore_datatypes import BGPPeer
from pycalico.datastore import (ETCD_AUTHORITY_ENV,
ETCD_AUTHORITY_DEFAULT)
from pycalico.util import get_host_ips
from netaddr import IPAddress
from prettytable import PrettyTable
from connectors import client
from connectors import docker_client
from utils import DOCKER_ORCHESTRATOR_ID
from utils import hostname
from utils import print_paragraph
from utils import get_container_ipv_from_arguments
from utils import validate_ip
from checksystem import check_system
DEFAULT_IPV4_POOL = IPPool("192.168.0.0/16")
DEFAULT_IPV6_POOL = IPPool("fd80:24e2:f998:72d6::/64")
POLICY_ONLY_ENV = "POLICY_ONLY_CALICO"
KUBERNETES_PLUGIN_VERSION = 'v0.1.0'
KUBERNETES_BINARY_URL = 'https://github.com/projectcalico/calico-kubernetes/releases/download/%s/calico_kubernetes' % KUBERNETES_PLUGIN_VERSION
KUBERNETES_PLUGIN_DIR = '/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/'
KUBERNETES_PLUGIN_DIR_BACKUP = '/etc/kubelet-plugins/calico/'
CALICO_DEFAULT_IMAGE = "calico/node:latest"
LIBNETWORK_IMAGE = 'calico/node-libnetwork:latest'
def validate_arguments(arguments):
"""
Validate argument values:
<IP>
<IP6>
<PEER_IP>
<AS_NUM>
<DETACH>
Arguments not validated:
<DOCKER_IMAGE_NAME>
<LOG_DIR>
:param arguments: Docopt processed arguments
"""
# Validate IPs
ip_ok = arguments.get("--ip") is None or \
validate_ip(arguments.get("--ip"), 4)
ip6_ok = arguments.get("--ip6") is None or \
validate_ip(arguments.get("--ip6"), 6)
container_ip_ok = arguments.get("<IP>") is None or \
validate_ip(arguments["<IP>"], 4) or \
validate_ip(arguments["<IP>"], 6)
peer_ip_ok = arguments.get("<PEER_IP>") is None or \
validate_ip(arguments["<PEER_IP>"], 4) or \
validate_ip(arguments["<PEER_IP>"], 6)
asnum_ok = True
if arguments.get("<AS_NUM>") or arguments.get("--as"):
try:
asnum = int(arguments["<AS_NUM>"] or arguments["--as"])
asnum_ok = 0 <= asnum <= 4294967295
except ValueError:
asnum_ok = False
detach_ok = True
if arguments.get("<DETACH>") or arguments.get("--detach"):
detach_ok = arguments.get("--detach") in ["true", "false"]
# Print error message
if not ip_ok:
print "Invalid IPv4 address specified with --ip argument."
if not ip6_ok:
print "Invalid IPv6 address specified with --ip6 argument."
if not container_ip_ok or not peer_ip_ok:
print "Invalid IP address specified."
if not asnum_ok:
print "Invalid AS Number specified."
if not detach_ok:
print "Valid values for --detach are 'true' and 'false'"
# Exit if not valid argument
if not (ip_ok and ip6_ok and container_ip_ok and peer_ip_ok and asnum_ok
and detach_ok):
sys.exit(1)
def node(arguments):
"""
Main dispatcher for node commands. Calls the corresponding helper function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
validate_arguments(arguments)
if arguments.get("bgp"):
if arguments.get("peer"):
ip_version = get_container_ipv_from_arguments(arguments)
if arguments.get("add"):
node_bgppeer_add(arguments.get("<PEER_IP>"), ip_version,
arguments.get("<AS_NUM>"))
elif arguments.get("remove"):
node_bgppeer_remove(arguments.get("<PEER_IP>"), ip_version)
elif arguments.get("show"):
if not ip_version:
node_bgppeer_show(4)
node_bgppeer_show(6)
else:
node_bgppeer_show(ip_version)
elif arguments.get("stop"):
node_stop(arguments.get("--force"))
else:
assert arguments.get("--detach") in ["true", "false"]
detach = arguments.get("--detach") == "true"
node_start(ip=arguments.get("--ip"),
node_image=arguments.get('--node-image'),
log_dir=arguments.get("--log-dir"),
ip6=arguments.get("--ip6"),
as_num=arguments.get("--as"),
detach=detach,
kubernetes=arguments.get("--kubernetes"),
libnetwork=arguments.get("--libnetwork"))
def node_start(node_image, log_dir, ip, ip6, as_num, detach, kubernetes,
libnetwork):
"""
Create the calico-node container and establish Calico networking on this
host.
:param ip: The IPv4 address of the host.
:param node_image: The calico-node image to use.
:param ip6: The IPv6 address of the host (or None if not configured)
:param as_num: The BGP AS Number to use for this node. If not specified
the global default value will be used.
:param detach: True to run in Docker's "detached" mode, False to run
attached.
:param kubernetes: True to install the kubernetes plugin, False otherwise.
:param libnetwork: True to use the calico/node-libnetwork image as the node
image, False otherwise.
:return: None.
"""
# Print warnings for any known system issues before continuing
check_system(fix=False, quit_if_error=False)
# Ensure log directory exists
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Get IP address of host, if none was specified
if not ip:
ips = get_host_ips(exclude=["^docker.*", "^cbr.*"])
try:
ip = ips.pop()
except IndexError:
print "Couldn't autodetect a management IP address. Please provide" \
" an IP by rerunning the command with the --ip=<IP_ADDRESS> flag."
sys.e
|
dsiroky/snakemq
|
tests/performance/packeter_connections.py
|
Python
|
mit
| 3,249
| 0.003693
|
#!/usr/bin/env python
"""
High amount of parallel connections.
@author: David Siroky (siroky@dasir.cz)
@license: MIT License (
|
see LICENSE.txt or
U{http://www.opensource.org/licenses/mit-license.php})
"""
import time
import logging
import sys
import os
import threading
import multiprocessing
import random
sys.path.insert(0, "../..")
import snakemq
import snakemq.link
import snakemq.packeter
import snakemq.exceptions
###########################################################################
DATA_SIZE = 5
CLI_PROC_COUN
|
T = 10
CLI_THR_COUNT = 20
PACKETS_COUNT = 1000
PORT = 4000
###########################################################################
barrier = multiprocessing.Value("i", 0)
###########################################################################
def check_barrier():
barrier.acquire()
barrier.value += 1
barrier.release()
while barrier.value < CLI_PROC_COUNT * CLI_THR_COUNT:
pass
###########################################################################
def srv():
s = snakemq.link.Link()
container = {"start_time": None, "cli_count": 0, "count": 0}
def on_connect(conn_id):
container["cli_count"] += 1
if container["cli_count"] == CLI_PROC_COUNT * CLI_THR_COUNT:
container["start_time"] = time.time()
print "all connected"
def on_packet_recv(conn_id, packet):
assert len(packet) == DATA_SIZE
container["count"] += 1
if container["count"] >= PACKETS_COUNT * CLI_PROC_COUNT * CLI_THR_COUNT:
s.stop()
s.add_listener(("", PORT))
tr = snakemq.packeter.Packeter(s)
tr.on_connect = on_connect
tr.on_packet_recv = on_packet_recv
s.loop()
s.cleanup()
diff = time.time() - container["start_time"]
count = container["count"]
print "flow: %.02f MBps, total %i pkts, %i pkts/s" % (
DATA_SIZE * count / diff / 1024**2, count, count / diff)
###########################################################################
def cli():
s = snakemq.link.Link()
def on_connect(conn_id):
check_barrier()
for i in xrange(PACKETS_COUNT):
tr.send_packet(conn_id, "x" * DATA_SIZE)
def on_disconnect(conn_id):
s.stop()
# listen queue on the server is short so the reconnect interval needs to be
# short because all clients are trying to connect almost at the same time
s.add_connector(("localhost", PORT), reconnect_interval=0.3)
# spread the connections
time.sleep(random.randint(0, 1000) / 1000.0)
tr = snakemq.packeter.Packeter(s)
tr.on_connect = on_connect
tr.on_disconnect = on_disconnect
s.loop()
s.cleanup()
def cli_proc():
thrs = []
for i in range(CLI_THR_COUNT):
thr = threading.Thread(target=cli)
thrs.append(thr)
thr.start()
for thr in thrs:
thr.join()
###########################################################################
# avoid logging overhead
logger = logging.getLogger("snakemq")
logger.setLevel(logging.ERROR)
procs = []
for i in range(CLI_PROC_COUNT):
proc = multiprocessing.Process(target=cli_proc)
procs.append(proc)
proc.start()
srv()
for proc in procs:
proc.join()
|
intelxed/xed
|
scripts/apply_legal_header.py
|
Python
|
apache-2.0
| 5,516
| 0.012509
|
#!/usr/bin/env python
# -*- python -*-
# BEGIN_LEGAL
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# END_LEGAL
from __future__ import print_function
import sys
import os
import re
from stat import *
def get_mode(fn):
"get the mode of the file named fn, suitable for os.chmod() or open() calls"
mode = os.stat(fn)[ST_MODE]
cmode = S_IMODE(mode)
return cmode
def replace_original_with_new_file(file,newfile):
"Replace file with newfile"
# os.system(" mv -f %s %s" % ( newfile, file))
os.unlink(file)
os.rename(newfile,file)
def remove_existing_header(contents):
"remove existing legal header, if any"
retval = []
skipping = False
start_pattern = re.compile(r"^(/[*][ ]*BEGIN_LEGAL)|(#[ ]*BEGIN_LEGAL)")
stop_pattern = re.compile(r"^[ ]*(END_LEGAL[ ]?[*]/)|(#[ ]*END_LEGAL)")
for line in contents:
if start_pattern.match(line):
skipping = True
if skipping == False:
retval.append(line)
if stop_pattern.match(line):
skipping = False
return retval
def prepend_script_comment(header):
"Apply script comment marker to each line"
retval = []
for line in header:
retval.append( "# " + line )
return retval
def apply_header_to_source_file(header, file):
"apply header to file using C++ comment style"
f = open(file,"r")
mode = get_mode(file)
contents = f.readlines()
f.close()
trimmed_contents = remove_existing_header(contents)
newfile = file + ".new"
o = open(newfile,"w")
o.write("/* BEGIN_LEGAL \n")
o.writelines(header)
o.write("END_LEGAL */\n")
o.writelines(trimmed_contents)
o.close()
os.chmod(newfile,mode)
replace_original_with_new_file(file,newfile)
# FIXME: this will flag files that have multiline C-style comments
# with -*- in them even though the splitter will not look for the
# comment properly
def shell_script(lines):
"""return true if the lines are the start of shell script or
something that needs a mode comment at the top"""
first = ""
second = ""
if len(lines) > 0:
first = lines[0];
if len(lines) > 1:
second = lines[1];
if re.match("#!",first):
#print "\t\t First script test true"
return True
if re.search("-\*-",first) or re.search("-\*-",second):
#print "\t\t Second script test true"
return True
return False
def split_script(lines):
"Return a tuple of (header, body) for shell scripts, based on an input line list"
header = []
body = []
f = lines.pop(0)
while re.match("#",f) or re.search("-\*-",f):
header.append(f)
f = lines.pop(0)
# tack on the first non matching line from the above loop
body.append(f);
body.extend(lines);
return (header,body)
def write_script_header(o,lines):
"Write the file header for a script"
o.write("# BEGIN_LEGAL\n")
o.writelines(lines)
o.write("# END_LEGAL\n")
def apply_header_to_data_file(header, file):
"apply header to file using script comment style"
f = open(file,"r")
mode = get_mode(file)
#print "file: " + file + " mode: " + "%o" % mode
contents = f.readlines()
f.close()
trimmed_contents = remove_existing_header(contents)
newfile = file + ".new"
o = open(newfile,"w")
augmented_header = prepend_script_comment(header)
if shell_script(trimmed_contents):
(script_header, script_body) = split_script(trimmed_contents)
o.writelines(script_header)
write_script_header(o, augmented_header)
o.writelines(script_body)
else:
write_script_header(o,augmented_header)
o.writelines(trimmed_contents)
o.close()
os.chmod(newfile,mode)
replace_original_with_new_file(file,newfile)
####################################################################
### MAIN
####################################################################
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage " + sys.argv[0] + " [-s|-t] legal-header file-name [file-name...]\n")
sys.exit(1)
type = sys.argv[1]
header_file = sys.argv[2]
if not os.path.exists(header_file):
print("Could not find header file: [%s]\n" % (header_file))
sys.exit(1)
files_to_tag = sys.argv[3:]
f = open(header_file,"r")
header = f.readlines()
f.close()
sources = files_to_tag
if type in [ '-c', "-s"]:
for file in sources:
if re.search(".svn",file) == None and re.search(".new$",file) == None:
apply_header_to_source_file(header, file.strip())
elif type in ['-d', "-t"]:
|
for
|
file in sources:
if re.search(".svn",file) == None and re.search(".new$",file) == None:
apply_header_to_data_file(header, file.strip())
else:
print("2nd argument must be -s or -t\n")
sys.exit(1)
|
firemark/zephryos
|
zephryos/wtform/controller.py
|
Python
|
mit
| 3,101
| 0.002257
|
from wtforms.validators import input_required
from wtforms import fields as wtform_fields
from ..serializers import serialize_field
from ..abstract.controller import AbstractController
from wtforms_json import flatten_json
class DummyMultiDict(dict):
def getlist(self, key):
return [self[key]]
class WTFormController(AbstractController):
field_types = {}
def describe_fields(self, form=None):
form = form or self.cls_form()
fields = form._fields
return [
self.serialize_field(name, field)
for name, field in fields.items()
]
def describe_form(self, form=None):
form = form or self.cls_form()
return {
"template": getattr(form, "__template__", "default")
}
def create_and_set_form(self, document):
data = flatten_json(self.cls_form, document)
multi_dict = DummyMultiDict(
(key, "" if value is None else value) for key, value in data.items()
)
return self.cls_form(multi_dict)
def create_and_set_and_validate_form(self, document):
form = self.create_and_set_form(document)
form.validate()
return form
def get_field_type(self, field):
return self.field_types.get(field.__class__, ('unknown', None))
def serialize_field(self, name, field):
field_type, func = self.get_field_type(field)
if func is None:
attrs = {}
else:
args = func(self, field)
if isinstance(args, tuple) and len(args) == 2:
field_type, attrs = args
else:
attrs = args
return serialize_field(
type_field=field_type,
name=name,
fullname=field.label.text,
required=any(
isinstance(v, input_required) for v in field.validators),
default=field.default,
description=field.descrip
|
tion,
attrs=attrs,
widget=field.widget
)
@classmethod
def add_new_type(cls, name, cls_type, func=None):
cls.field_types[cls_type] = (name, func)
def ou
|
ter(decorated_func):
cls.field_types[cls_type] = (name, decorated_func)
return decorated_func
return outer
WTFormController.add_new_type("text", wtform_fields.StringField)
WTFormController.add_new_type("bool", wtform_fields.BooleanField)
WTFormController.add_new_type("integer", wtform_fields.IntegerField)
@WTFormController.add_new_type("listed", wtform_fields.FieldList)
def listed_type(ctrl, field):
nested_field = field._add_entry()
if isinstance(nested_field, wtform_fields.FormField):
return 'listed_subform', {'fields': ctrl.describe_fields(nested_field)}
else:
return {'field': ctrl.serialize_field(nested_field)}
@WTFormController.add_new_type("select", wtform_fields.SelectField)
def select_type(ctrl, field):
choices = field.choices
return {
'choices': [{'name': name, 'value': value} for value, name in choices]
}
#todo: add more types
|
klahnakoski/cloc
|
cloc/util/queries/es_query_aggop.py
|
Python
|
mpl-2.0
| 3,106
| 0.001932
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from ..collections.matrix import Matrix
from ..collections import AND
from ..structs.wraps import listwrap
from ..struct import unwrap
from ..queries import es_query_util
from ..queries.es_query_util import aggregates, fix_es_stats, buildESQuery
from ..queries.filters import simplify
from ..queries import MVEL
from ..queries.cube import Cube
def is_aggop(query):
if not query.edges:
return True
return False
def es_aggop(es, mvel, query):
select = listwrap(query.select)
esQuery = buildESQuery(query)
isSimple = AND(aggregates[s.aggregate] == "count" for s in select)
if isSimple:
return es_countop(es, query) # SIMPLE, USE TERMS FACET INSTEAD
value2facet = dict() # ONLY ONE FACET NEEDED PER
name2facet = dict() # MAP name TO FACET WITH STATS
for s in select:
if s.value not in value2facet:
if MVEL.isKeyword(s.value):
unwrap(esQuery.facets)[s.name] = {
"statistical": {
"field": s.value
},
"facet_f
|
ilter": simplify(query.where)
}
|
else:
unwrap(esQuery.facets)[s.name] = {
"statistical": {
"script": mvel.compile_expression(s.value, query)
},
"facet_filter": simplify(query.where)
}
value2facet[s.value] = s.name
name2facet[s.name] = value2facet[s.value]
data = es_query_util.post(es, esQuery, query.limit)
matricies = {s.name: Matrix(value=fix_es_stats(unwrap(data.facets)[s.name])[aggregates[s.aggregate]]) for s in select}
cube = Cube(query.select, [], matricies)
cube.frum = query
return cube
def es_countop(es, mvel, query):
"""
RETURN SINGLE COUNT
"""
select = listwrap(query.select)
esQuery = buildESQuery(query)
for s in select:
if MVEL.isKeyword(s.value):
esQuery.facets[s.name] = {
"terms": {
"field": s.value,
"size": query.limit,
},
"facet_filter":{"exists":{"field":s.value}}
}
else:
# COMPLICATED value IS PROBABLY A SCRIPT, USE IT
esQuery.facets[s.name] = {
"terms": {
"script_field": mvel.compile_expression(s.value, query),
"size": 200000
}
}
data = es_query_util.post(es, esQuery, query.limit)
matricies = {}
for s in select:
matricies[s.name] = Matrix(value=data.hits.facets[s.name].total)
cube = Cube(query.select, query.edges, matricies)
cube.frum = query
return cube
|
woodshop/chainer
|
tests/optimizers_tests/test_optimizers_by_linear_model.py
|
Python
|
mit
| 4,152
| 0
|
import unittest
import numpy as np
import six
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class LinearModel(object):
UNIT_NUM = 10
BATCH_SIZE = 32
EPOCH = 100
def __init__(self, optimizer):
self.model = chainer.FunctionSet(
l=F.Linear(self.UNIT_NUM, 2)
)
self.optimizer = optimizer
# true parameters
self.w = np.random.uniform(-1, 1,
(self.UNIT_NUM, 1)).astype(np.float32)
self.b = np.random.uniform(-1, 1, (1, )).astype(np.float32)
def _train_linear_classifier(self, model, optimizer, gpu):
def _make_label(x):
a = (np.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
t = np.empty_like(a).astype(np.int32)
t[a >= 0] = 0
t[a < 0] = 1
return t
def _make_dataset(batch_size, unit_num, gpu):
x_data = np.random.uniform(
-1, 1, (batch_size, unit_num)).astype(np.float32)
t_data = _make_label(x_data)
if gpu:
x_data = cuda.to_gpu(x_data)
t_data = cuda.to_gpu(t_data)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
return x, t
for epoch in six.moves.range(self.EPOCH):
x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
optimizer.zero_grads()
y = model.l(x)
loss = F.softmax_cross_entropy(y, t)
loss.backward()
optimizer.update()
x_test, t_test = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
y_test = model.l(x_test)
return F.accuracy(y_test, t_test)
def _accuracy_cpu(self):
self.optimizer.setup(self.model)
return self._train_linear_classifier(self.model, self.optimizer, False)
def _accuracy_gpu(self):
model = self.model
optimizer = self.optimizer
model.to_gpu()
optimizer.setup(model)
return self._train_linear_classifier(model, optimizer, True)
def accuracy(self, gpu):
if gpu:
return cuda.to_cpu(self._accuracy_gpu().data)
else:
return self._accuracy_cpu().data
class OptimizerTestBase(object):
def create(self):
raise NotImplementedError()
def setUp(self):
self.model = LinearModel(self.create())
@condition.retry(10)
def test_linear_model_cpu(self):
self.assertGreater(self.model.accuracy(False), 0.9)
@attr.gpu
@condition.retry(10)
def test_linear_model_gpu(self):
self.assertGreater(self.model.accuracy(True), 0.9)
def test_initialize(self):
model = self.model.model
assert isinstance(model, chainer.FunctionSet)
optimizer = self.create()
optimizer.setup(model)
self.assertEqual(len(optimizer.tuples), len(model.parameters))
msg = "'params_grads' must have 'parameters' and 'gradients'"
with self.assertRaisesRegexp(ValueError, msg):
optimizer.setup('xxx')
class TestAdaDelta(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaDelta(eps=1e-5)
class TestAdaGr
|
ad(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaGrad(0.1)
class TestAdam(OptimizerTestBase, unittest.TestCase):
|
def create(self):
return optimizers.Adam(0.1)
class TestMomentumSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.MomentumSGD(0.1)
class TestRMSprop(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.RMSprop(0.1)
class TestRMSpropGraves(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.RMSpropGraves(0.1)
class TestSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.SGD(0.1)
testing.run_module(__name__, __file__)
|
woutersmet/Zeosummer
|
share/plugins/molecular/sketch.py
|
Python
|
gpl-3.0
| 21,326
| 0.003189
|
# Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2009 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from zeobuilder import context
from zeobuilder.actions.composed import Interactive
from zeobuilder.actions.collections.interactive import InteractiveInfo, InteractiveGroup
from zeobuilder.nodes.model_object import ModelObject
from zeobuilder.nodes.glcontainermixin import GLContainerMixin
from zeobuilder.nodes.glmixin import GLTransformationMixin
from zeobuilder.nodes.analysis import common_parent
from zeobuilder.nodes.reference import TargetError
from zeobuilder.expressions import Expression
from zeobuilder.gui.glade_wrapper import GladeWrapper
from zeobuilder.gui.fields_dialogs import FieldsDialogSimple
from zeobuilder.gui import fields
from zeobuilder.gui.fields_dialogs import DialogFieldInfo
import zeobuilder.gui.fields as fields
import zeobuilder.actions.primitive as primitive
import zeobuilder.authors as authors
from molmod.transformations import Translation, Rotation
from molmod.data.bonds import BOND_SINGLE, BOND_DOUBLE, BOND_TRIPLE, BOND_HYBRID, BOND_HYDROGEN
from molmod.data.periodic import periodic
from molmod.data.bonds import bonds
from molmod.io.cml import load_cml
from molmod.vectors import angle as compute_angle
from molmod.vectors import random_orthonormal
import gtk, numpy
from math import cos,sin,sqrt,acos,pi
import os.path
class SketchOptions(GladeWrapper):
edit_erase_filter = FieldsDialogSimple(
"Edit the Erase filter",
fields.faulty.Expression(
label_text="Erase filter expression",
attribute_name="erase_filter",
show_popup=True,
history_name="filter",
),
((gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL), (gtk.STOCK_OK, gtk.RESPONSE_OK))
)
def __init__(self):
GladeWrapper.__init__(self, "plugins/molecular/gui.glade", "wi_sketch", "window")
self.window.hide()
|
self.init_callbacks(self.__class__)
self.init_proxies([
"cb_object",
"cb_vector",
"cb_erase_filter",
"bu_edit_erase_filter",
"la_current",
"bu_set_atom",
"cb_bondtype",
"hbox_atoms",
"hbox_quickpicks",
"hbox_fragments",
"la_fragment",
|
"cb_fragment"
])
self.erase_filter = Expression("True")
#Initialize atom number - this can be changed anytime with the edit_atom_number dialog
self.atom_number = 6;
# Initialize the GUI
# 1) common parts of the comboboxes
def render_icon(column, cell, model, iter):
if model.get_value(iter, 0) == "Fragment":
cell.set_property(
"pixbuf",
context.application.plugins.get_node("Atom").icon
)
else:
cell.set_property(
"pixbuf",
context.application.plugins.get_node(model.get_value(iter, 0)).icon
)
# 2) fill the objects combo box
self.object_store = gtk.ListStore(str)
self.object_store.append(["Atom"])
self.object_store.append(["Fragment"])
self.object_store.append(["Point"])
self.object_store.append(["Sphere"])
self.object_store.append(["Box"])
self.cb_object.set_model(self.object_store)
renderer_pixbuf = gtk.CellRendererPixbuf()
self.cb_object.pack_start(renderer_pixbuf, expand=False)
self.cb_object.set_cell_data_func(renderer_pixbuf, render_icon)
renderer_text = gtk.CellRendererText()
self.cb_object.pack_start(renderer_text, expand=True)
self.cb_object.add_attribute(renderer_text, "text", 0)
self.cb_object.set_active(0)
# 3) fill the vector combo box
self.vector_store = gtk.ListStore(str)
self.vector_store.append(["Bond"])
self.vector_store.append(["Arrow"])
self.vector_store.append(["Spring"])
self.cb_vector.set_model(self.vector_store)
renderer_pixbuf = gtk.CellRendererPixbuf()
self.cb_vector.pack_start(renderer_pixbuf, expand=False)
self.cb_vector.set_cell_data_func(renderer_pixbuf, render_icon)
renderer_text = gtk.CellRendererText()
self.cb_vector.pack_start(renderer_text, expand=True)
self.cb_vector.add_attribute(renderer_text, "text", 0)
self.cb_vector.set_active(0)
# 4) fill the bond type combo box
self.bondtype_store = gtk.ListStore(str,int)
self.bondtype_store.append(["Single bond",BOND_SINGLE])
self.bondtype_store.append(["Double bond",BOND_DOUBLE])
self.bondtype_store.append(["Triple bond",BOND_TRIPLE])
self.bondtype_store.append(["Hybrid bond",BOND_HYBRID])
self.bondtype_store.append(["Hydrogen bond",BOND_HYDROGEN])
self.cb_bondtype.set_model(self.bondtype_store)
#no icons like the others, just text here
renderer_text = gtk.CellRendererText()
self.cb_bondtype.pack_start(renderer_text, expand=True)
self.cb_bondtype.add_attribute(renderer_text, "text", 0)
self.cb_bondtype.set_active(0)
# register quick pick config setting
config = context.application.configuration
config.register_setting(
"sketch_quickpicks",
[6,7,8,9,10,11],
DialogFieldInfo("Sketch tool", (0,2), fields.faulty.IntegerList(
label_text="Quick pick atoms (applies after restart)",
attribute_name="sketch_quickpicks",
)),
)
# 5)create the "quick pick" atom buttons
for index in xrange(len(config.sketch_quickpicks)):
atomnumber = config.sketch_quickpicks[index]
bu_element = gtk.Button("")
bu_element.set_label("%s" % periodic[atomnumber].symbol)
bu_element.connect("clicked", self.on_bu_element_clicked, index)
# add to hbox
self.hbox_quickpicks.pack_start(bu_element)
bu_element.show()
# 6)fill the fragment combo box with filenames from share/fragments
fragmentdir = context.get_share_filename('fragments')
self.fragment_store = gtk.ListStore(str)
for filename in os.listdir (fragmentdir):
# Ignore subfolders and files with extension other than cml
if os.path.isdir (os.path.join (fragmentdir, filename)) or filename[-3:] != 'cml':
continue
self.fragment_store.append([filename[:-4]])
self.cb_fragment.set_model(self.fragment_store)
renderer_text = gtk.CellRendererText()
self.cb_fragment.pack_start(renderer_text, expand=True)
self.cb_fragment.add_attribute(renderer_text, "text", 0)
self.cb_fragment.set_active(0)
|
haoqili/MozSecWorld
|
apps/examples/views.py
|
Python
|
bsd-3-clause
| 186
| 0
|
"""Example views. Fe
|
el free to delete this app."""
from django import http
import jingo
def home(request):
data = {}
return jin
|
go.render(request, 'examples/home.html', data)
|
karrtikr/ete
|
sdoc/tutorial/examples/measuring_evolution_trees.py
|
Python
|
gpl-3.0
| 7,121
| 0.01292
|
#!/usr/bin/python
# Author: Francois-Jose Serra
# Creation Date: 2010/04/26 17:17:06
from ete3 import CodemlTree
import sys, re
typ = None
while typ != 'L' and typ != 'S':
typ = raw_input (\
"choose kind of example [L]ong or [S]hort, hit [L] or [S]:\n")
TREE_PATH = "./measuring_%s_tree.nw" % (typ)
ALG_PATH = "./alignment_%s_measuring_evol.fasta" % (typ)
WORKING_PATH = "/tmp/ete3-codeml_example/"
#MY_PATH = '/home/francisco/toolbox/ete3-codeml/doc/tutorial/examples/'
MY_PATH = ''
TREE_PATH = MY_PATH + re.sub('\./', '', TREE_PATH)
ALG_PATH = MY_PATH + re.sub('\./', '', ALG_PATH )
###
# load tree
print '\n ----> we create a CodemlTree object, and give to him a topology, from'
print TREE_PATH
out = True
while out == True:
try:
T = CodemlTree(TREE_PATH)
print TREE_PATH
out = False
except:
sys.stderr.write('Bad path for working directory. Enter new path or quit ("Q"):\n')
PATH = raw_input('')
if PATH.startswith('q') or PATH.startswith('Q'):
sys.exit()
TREE_PATH = "./measuring_%s_tree.nw" % (typ)
ALG_PATH = "./alignment_%s_measuring_evol.fasta" % (typ)
TREE_PATH = PATH + re.sub('\./', '', TREE_PATH)
ALG_PATH = PATH + re.sub('\./', '', ALG_PATH )
print TREE_PATH
print T
print '\n ----> and an alignment from: \n'+ALG_PATH+'\n\n'
T.link_to_alignment(ALG_PATH)
raw_input(" ====> hit some key to see the Tree with alignment")
T.show()
###
# run free-branch model, and display result
print '\n\n\n ----> We define now our working directory, that will be created:', \
WORKING_PATH
T.workdir = (WORKING_PATH)
print '\n ----> and run the free-branch model with run_paml function:\n\n%s\n%s\n%s\n'\
% ('*'*10 + ' doc ' + '*'*10, T.run_paml.func_doc, '*'*30)
raw_input(" ====> Hit some key to start free-branch computation with codeml...\n")
T.run_paml('fb')
T.show()
###
# run site model, and display result
print '\n\n\n ----> We are now goingn to run sites model M1 and M2 with run_paml function:\n'
raw_input(" ====> hit some key to start")
for model in ['M1', 'M2']:
print 'running model ' + model
T.run_paml(model)
print '\n\n\n ----> and use the get_most_likely function to compute the LRT between those models:\n'
print 'get_most_likely function: \n\n'+ '*'*10 + ' doc ' + '*'*10
print '\n' + T.get_most_likely.func_doc
print '*'*30
raw_input("\n ====> Hit some key to launch LRT")
pv = T.get_most_likely('M2', 'M1')
if pv <= 0.05:
print ' ----> -> most likely model is model M2, there is positive selection, pval: ',pv
else:
print ' ----> -> most likely model is model M1, pval: ',pv
raw_input(" ====> Hit some key...")
###
# tengo que encontrar un ejemplo mas bonito pero bueno.... :P
print '\n\n\n ----> We now add histograms to our tree to repesent site models with add_histface function: \n\n%s\n%s\n%s\n'\
% ('*'*10 + ' doc ' + '*'*10,T.add_histface.func_doc,'*'*30)
print 'Upper face is an histogram representing values of omega for each column
|
in the alignment,'
print '\
Colors represent significantly conserved sites (cyan to blue), neutral sites (greens), or under \n\
positive selection (orange
|
to red). \n\
Lower face also represents values of omega (red line) and bars represent the error of the estimation.\n\
Also significance of belonging to one class of site can be painted in background (here lightgrey for\n\
evrething significant)\n\
Both representation are done according to BEB estimation of M2, M1 or M7 estimation can also be \n\
drawn but should not be used.\n'
raw_input(" ====> Hit some key to display, histograms of omegas BEB from M2 model...")
col = {'NS' : 'white',
'RX' : 'lightgrey',
'RX+': 'lightgrey',
'CN' : 'lightgrey',
'CN+': 'lightgrey',
'PS' : 'lightgrey',
'PS+': 'lightgrey'}
T.add_histface('M2', down=False)
T.add_histface('M2',typ='error', col=col, lines=[1.0,0.3],col_lines=['black','grey'])
T.show()
###
# re-run without reeeeeeeeee-run
print '\n\n\n ----> Now we have runned once those 3 models, we can load again our tree from'
print ' ----> our tree file and alignment file, and this time load directly oufiles from previous'
print ' with the function link_to_evol_model \n\n%s\n%s\n%s\n' % ('*'*10 + ' doc ' + '*'*10, \
T.link_to_evol_model.func_doc, \
'*'*30)
raw_input('runs\n ====> hit some key to see...')
T = CodemlTree (TREE_PATH)
T.link_to_alignment (ALG_PATH)
T.workdir = (WORKING_PATH)
T.link_to_evol_model(T.workdir + '/fb/out','fb')
T.link_to_evol_model(T.workdir + '/M1/out','M1')
T.link_to_evol_model(T.workdir + '/M2/out','M2')
T.add_histface('M2', down=False)
T.add_histface('M2',typ='error', col=col, lines=[1.0,0.3],col_lines=['black','grey'])
T.show()
###
# mark tree functionality
print T.write(format=10)
name = None
while name not in T.get_leaf_names():
name = raw_input(' ====> As you need to mark some branches to run branch\n\
models, type the name of one leaf: ')
idname = T.get_leaves_by_name(name)[0].idname
print ' ----> you want to mark:',name,'that has this idname: ', idname
T.mark_tree([idname]) # by default will mark with '#1'
print 'have a look to the mark: '
print re.sub('#','|',re.sub('[0-9a-zA-Z_(),;]',' ',T.write(format=10)))
print re.sub('#','v',re.sub('[0-9a-zA-Z_(),;]',' ',T.write(format=10)))
print T.write(format=10)
print '\n You have marked the tree with a command like: T.mark_tree([%d])\n' % (idname)
print '\n%s\n%s\n%s\n' % ('*'*10 + ' doc ' + '*'*10, T.mark_tree.func_doc, \
'*'*30)
print '\n\n\n ----> We are now going to run branch-site models bsA and bsA1:\n\n'
raw_input(" ====> hit some key to start computation with our marked tree")
for model in ['bsA','bsA1']:
print 'running model ' + model
T.run_paml(model)
print '\n\n\n ----> again we use the get_most_likely function to compute the LRT between those models:\n'
raw_input(" ====> Hit some key to launch LRT")
pv = T.get_most_likely('bsA', 'bsA1')
if pv <= 0.05:
print ' ----> -> most likely model is model bsA, there is positive selection, pval: ',pv
print ' ' + name + ' is under positive selection.'
else:
print ' ----> -> most likely model is model bsA1, pval of LRT: ',pv
print ' ' + name + ' is not under positive selection.'
print '\n\n ----> more or less, all we have done here is feasable from the GUI,'
print ' try to reload our runs through it....'
raw_input('hit something to start')
T = CodemlTree(TREE_PATH)
T.link_to_alignment(ALG_PATH)
T.workdir = (WORKING_PATH)
T.show()
sys.stderr.write('\n\nThe End.\n\n')
|
ManageIQ/integration_tests
|
scripts/install_vddk.py
|
Python
|
gpl-2.0
| 1,303
| 0.002302
|
#!/usr/bin/env python3
"""SSH into a running appliance and install VMware VDDK.
"""
import argparse
import sys
from urllib.parse import urlparse
from cfme.utils.appliance import get_or_create_current_appliance
from cfme.utils.appliance import IPAppliance
def log(message):
print(f"[VDDK-INSTALL] {message}")
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--address',
help='hostname or ip address of target appliance', default=None)
parser.add_argument('--vd
|
dk_url', help='url to download vddk pkg')
parser.add_argument('--reboot', help='reboot after installation ' +
'(required for proper operation)', action="store_true")
parser.add_argument('--force',
help='force installation if version detected', action="store_true")
args = parser.parse_args()
if no
|
t args.address:
appliance = get_or_create_current_appliance()
else:
appliance = IPAppliance(hostname=urlparse(args.address).netloc)
appliance.install_vddk(
reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
if __name__ == '__main__':
sys.exit(main())
|
Proggie02/TestRepo
|
tests/regressiontests/indexes/models.py
|
Python
|
bsd-3-clause
| 238
| 0
|
from django.db
|
import models
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
index_together = [
["headline", "pu
|
b_date"],
]
|
othreecodes/MY-RIDE
|
app/models.py
|
Python
|
mit
| 11,305
| 0.002565
|
from __future__ import unicode_literals
import re
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
UserManager)
from django.core import validators
from django.core.mail import send_mail
from django.utils.translation import ugettext_lazy as _
from notifications.models import *
from broadcast.models import Broadcast
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""
A custom user class that basically mirrors Django's `AbstractUser` class
and doesn'0t force `first_name` or `last_name` with sensibilities for
international names.
http://www.w3.org/International/questions/qa-personal-names
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile(
'^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
full_name = models.CharField(_('full name'), max_length=254, blank=False)
short_name = models.CharField(_('short name'), max_length=30, blank=True)
choices = (('Male', 'Male'), ('Female', 'Female'))
sex = models.CharField(_('sex'), max_length=30, blank=False, choices=choices)
email = models.EmailField(_('email address'), max_length=254, unique=True)
phone_number = models.CharField(_('phone number'), max_length=20, validators=[
validators.RegexValidator(re.compile(
|
'^[0-9]+$'), _('Only numbers are allowed.'), 'invalid')
])
user_choices = (('Driver', 'Driver'), ('Passenger', 'Passenger'))
user_type = models.CharField(_('user type'), max_length=30, blank=False, choices=user_choices)
address = models.TextField(_('location'), max_length=400, blank=False)
is_staff = models.BooleanField(_('staff status'), defaul
|
t=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_verified = models.BooleanField(_('user verified'), default=False,
help_text=_('Designates whether the user is a vershified user'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/profile/%s" % self.username
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = self.full_name
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.short_name.strip()
def get_sex(self):
return self.sex
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_no_messages(self):
number = Message.objects.filter(recipient=self, read=False)
if number.count() > 0:
return number.count()
else:
return None
def get_messages(self):
msg = Message.objects.filter(recipient=self, read=False).order_by('date').reverse()
return msg
def get_messages_all(self):
msg = Message.objects.filter(recipient=self).order_by('date').reverse()
return msg
def get_notifications(self):
return self.notifications.unread()
def get_no_notifs(self):
return self.notifications.unread().count()
def is_follows(self, user_1):
foll = Follow.objects.filter(follower=self, followee=user_1)
if foll.exists():
return True
else:
return False
def get_no_followers(self):
num = Follow.objects.filter(followee=self).count()
return num
def get_no_following(self):
num = Follow.objects.filter(follower=self).count()
return num
def get_following(self):
num = Follow.objects.filter(follower=self).values_list('followee')
result = []
for follower in num:
user = CustomUser.objects.get(pk=follower[0])
result.append(user)
return result
def get_profile(self):
profile = Profile.objects.get(user=self)
return profile
def no_of_rides_shared(self):
return self.vehiclesharing_set.filter(user=self, ended=True).count()
def no_of_request_completed(self):
return self.request_set.filter(status='approved', user=self).count()
def get_no_broadcast(self):
return Broadcast.objects.filter(user=self).count()
def get_broadcast(self):
all_broad = Broadcast.objects.filter(user=self)[0:10]
return all_broad
class Vehicle(models.Model):
year = models.IntegerField(_('year of purchase'), blank=False)
make = models.CharField(_('vehicle make'), max_length=254, blank=False)
plate = models.CharField(_('liscenced plate number'), max_length=10, blank=False)
model = models.CharField(_('vehicle model'), max_length=254, blank=False)
seats = models.IntegerField(_('no of seats'), blank=False)
user_choices = (('private', 'private'), ('hired', 'hired'))
type = models.CharField(_('vehicle type'), max_length=30, blank=False, choices=user_choices)
user_choices = (('Car', 'Car'), ('Bus', 'Bus'), ('Coaster', 'Coaster'), ('Truck', 'Truck'))
category = models.CharField(_('vehicle category'), max_length=30, blank=False, choices=user_choices)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
def get_absolute_url(self):
return "/app/ride/%d/view" % self.pk
def __str__(self):
return self.make + " " + self.model + " belonging to " + self.user.username
class VehicleSharing(models.Model):
start = models.CharField(_('starting point'), max_length=256, blank=False, )
dest = models.CharField(_('destination'), max_length=256, blank=False)
cost = models.IntegerField(_('cost'), blank=False)
date = models.DateField(_('date'), default=timezone.now)
start_time = models.TimeField(_('start time'), max_length=256, blank=False)
arrival_time = models.TimeField(_('estimated arrivak'), max_length=256, blank=False)
no_pass = models.IntegerField(_('no of passengers'), blank=False)
details = models.TextField(_('ride details'), blank=False)
choices = (('Male', 'Male'), ('Female', 'Female'), ('Both', 'Both'))
sex = models.CharField(_('gender preference'), max_length=30, blank=False, choices=choices)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
vehicle = models.ForeignKey(Vehicle, on_delete=models.CASCADE)
ended = models.BooleanField(_('sharing ended'), default=False)
def __str__(self):
return self.start + " to " + self.dest
def get_user(self):
return self.user
def get_absolute_url(self):
return "/app/sharing/%d/view" % self.pk
class Request(models.Model):
pick = models.CharField(_('pick up point'), max_length=256, blank=False, )
dest = models.CharField(_('destination'), max_length=256, blank=False)
reg_date = models.DateTimeField(_('registration date'), default=timezone.now)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
bearable = models.IntegerField(_('bearable cost'), blank=False)
status = models.CharField(_('status'), max_length=256, blank=False, default=
|
cpausmit/Kraken
|
filefi/043/mc-notrig.py
|
Python
|
mit
| 9,131
| 0.013033
|
import FWCore.ParameterSet.Config as cms
#---------------------------------------------------------------------------------------------------
# M A I N
#---------------------------------------------------------------------------------------------------
# create the process
process = cms.Process('FILEFI')
# say how many events to process (-1 means no limit)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
#>> input source
process.source = cms.Source(
"PoolSource",
fileNames = cms.untracked.vstring('root://xrootd.unl.edu//store/mc/RunIIFall15DR76/TT_TuneCUETP8M1_13TeV-amcatnlo-pythia8/AODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/30000/029641E2-37A2-E511-9AB4-A0369F7F8E80.root')
)
process.source.inputCommands = cms.untracked.vstring(
"keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT"
)
#>> configurations
# determine the global tag to use
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.GlobalTag.globaltag = 'MCRUN2_74_V9'
# define meta data for this production
process.configurationMetadata = cms.untracked.PSet(
name = cms.untracked.string('BambuProd'),
version = cms.untracked.string('Mit_043'),
annotation = cms.untracked.string('AODSIM')
)
#>> standard sequences
# load some standard sequences we will need
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi')
process.load('TrackingTools.TransientTrack.TransientTrackBuilder_cfi')
# define sequence for ProductNotFound
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
wantSummary = cms.untracked.bool(False)
)
# Import/Load the filler so all is already available for config changes
from MitProd.TreeFiller.MitTreeFiller_cfi import MitTreeFiller
process.load('MitProd.TreeFiller.MitTreeFiller_cfi')
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# R E C O S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
## Load stablePart producers
from MitEdm.Producers.conversionElectronsStable_cfi import electronsStable
process.load('MitEdm.Producers.conversionElectronsStable_cfi')
# Load Mit Mvf Conversion producer
# MultiVertexFitter is currently broken
#from MitProd.TreeFiller.conversionProducer_cff import conversionProducer, addConversionFiller
#process.load('MitProd.TreeFiller.conversionProducer_cff')
#addConversionFiller(Mit
|
TreeFiller)
# Electron likelihood-based id
from RecoEgamma.ElectronIdentification.ElectronMVAValueMapProducer_cfi import electronMVAValueMapProducer
process.load('RecoEgamma.ElectronIdentification.ElectronMVAValueMapProducer_cfi')
MitTreeFiller.Electrons.eIDLikelihoodName = 'electronMVAValueMapProducer:ElectronMVAEstimatorRun2Spring15Trig25nsV1Values'
# Load basic particle flow collections
#
|
Used for rho calculation
from CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi import goodOfflinePrimaryVertices
from CommonTools.ParticleFlow.pfParticleSelection_cff import pfParticleSelectionSequence, pfPileUp, pfNoPileUp, pfPileUpIso, pfNoPileUpIso
from CommonTools.ParticleFlow.pfPhotons_cff import pfPhotonSequence
from CommonTools.ParticleFlow.pfElectrons_cff import pfElectronSequence
from CommonTools.ParticleFlow.pfMuons_cff import pfMuonSequence
from CommonTools.ParticleFlow.TopProjectors.pfNoMuon_cfi import pfNoMuon
from CommonTools.ParticleFlow.TopProjectors.pfNoElectron_cfi import pfNoElectron
process.load('CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi')
process.load('CommonTools.ParticleFlow.pfParticleSelection_cff')
process.load('CommonTools.ParticleFlow.pfPhotons_cff')
process.load('CommonTools.ParticleFlow.pfElectrons_cff')
process.load('CommonTools.ParticleFlow.pfMuons_cff')
process.load('CommonTools.ParticleFlow.TopProjectors.pfNoMuon_cfi')
process.load('CommonTools.ParticleFlow.TopProjectors.pfNoElectron_cfi')
# Loading PFProducer to get the ptrs
from RecoParticleFlow.PFProducer.pfLinker_cff import particleFlowPtrs
process.load('RecoParticleFlow.PFProducer.pfLinker_cff')
# Load PUPPI
from MitProd.TreeFiller.PuppiSetup_cff import puppiSequence
process.load('MitProd.TreeFiller.PuppiSetup_cff')
# recluster fat jets, btag subjets
from MitProd.TreeFiller.utils.makeFatJets import initFatJets,makeFatJets
pfbrecoSequence = initFatJets(process, isData = False)
ak8chsSequence = makeFatJets(process, isData = False, algoLabel = 'AK', jetRadius = 0.8)
ak8puppiSequence = makeFatJets(process, isData = False, algoLabel = 'AK', jetRadius = 0.8, pfCandidates = 'puppiNoLepPlusLep')
ca15chsSequence = makeFatJets(process, isData = False, algoLabel = 'CA', jetRadius = 1.5)
ca15puppiSequence = makeFatJets(process, isData = False, algoLabel = 'CA', jetRadius = 1.5, pfCandidates = 'puppiNoLepPlusLep')
# unload unwanted PAT stuff
delattr(process, 'pfNoTauPFBRECOPFlow')
delattr(process, 'loadRecoTauTagMVAsFromPrepDBPFlow')
pfPileUp.PFCandidates = 'particleFlowPtrs'
pfNoPileUp.bottomCollection = 'particleFlowPtrs'
pfPileUpIso.PFCandidates = 'particleFlowPtrs'
pfNoPileUpIso.bottomCollection='particleFlowPtrs'
pfPileUp.Enable = True
pfPileUp.Vertices = 'goodOfflinePrimaryVertices'
pfPileUp.checkClosestZVertex = cms.bool(False)
# PUPPI jets
from RecoJets.JetProducers.ak4PFJetsPuppi_cfi import ak4PFJetsPuppi
process.load('RecoJets.JetProducers.ak4PFJetsPuppi_cfi')
ak4PFJetsPuppi.src = cms.InputTag('puppiNoLepPlusLep')
ak4PFJetsPuppi.doAreaFastjet = True
# Load FastJet L1 corrections
from MitProd.TreeFiller.FastJetCorrection_cff import l1FastJetSequence
process.load('MitProd.TreeFiller.FastJetCorrection_cff')
# Setup jet corrections
process.load('JetMETCorrections.Configuration.JetCorrectionServices_cff')
# Load btagging
from MitProd.TreeFiller.utils.setupBTag import setupBTag
ak4PFBTagSequence = setupBTag(process, 'ak4PFJets', 'AKt4PF')
ak4PFCHSBTagSequence = setupBTag(process, 'ak4PFJetsCHS', 'AKt4PFCHS')
ak4PFPuppiBTagSequence = setupBTag(process, 'ak4PFJetsPuppi', 'AKt4PFPuppi')
# Load HPS tau reconstruction (tau in AOD is older than the latest reco in release)
from RecoTauTag.Configuration.RecoPFTauTag_cff import PFTau
process.load('RecoTauTag.Configuration.RecoPFTauTag_cff')
#> Setup the met filters
from MitProd.TreeFiller.metFilters_cff import metFilters
process.load('MitProd.TreeFiller.metFilters_cff')
#> The bambu reco sequence
recoSequence = cms.Sequence(
electronsStable *
electronMVAValueMapProducer *
# conversionProducer *
goodOfflinePrimaryVertices *
particleFlowPtrs *
pfParticleSelectionSequence *
pfPhotonSequence *
pfMuonSequence *
pfNoMuon *
pfElectronSequence *
pfNoElectron *
PFTau *
puppiSequence *
ak4PFJetsPuppi *
l1FastJetSequence *
ak4PFBTagSequence *
ak4PFCHSBTagSequence *
ak4PFPuppiBTagSequence *
pfbrecoSequence*
ak8chsSequence*
ak8puppiSequence*
ca15chsSequence*
ca15puppiSequence*
metFilters
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# G E N S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# Import/Load genjets
from RecoJets.Configuration.GenJetParticles_cff import genJetParticles
process.load('RecoJets.Configuration.GenJetParticles_cff')
from RecoJets.Configuration.RecoGenJets_cff import ak4GenJets, ak8GenJets
process.load('RecoJets.Configuration.RecoGenJets_cff')
genSequence = cms.Sequence(
genJetParticles *
ak4GenJets *
ak8GenJets
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
#
|
fredzannarbor/pagekicker-community
|
scripts_python_3/bin/sanitize.py
|
Python
|
apache-2.0
| 2,661
| 0.010147
|
from beautifulsoup4 import beautifulsoup4
import re
def sanitize(html):
# allow these tags. Other tags are removed, but their child elements remain
whitelist = ['em', 'i', 'strong', 'u', 'a', 'b', 'p', 'br', 'code', 'pre', 'table', 'tr', 'td' ]
# allow only these attributes on these tags. No other tags are allowed any
# attributes.
attr_whitelist = { 'a':['href','title','hreflang']}
# remove these tags, complete with contents.
blacklist = [ 'script', 'style' ]
attributes_with_urls = [ 'href', 'src' ]
# BeautifulSoup is catching out-of-order and unclosed tags, so markup
# can't leak out of comme
|
nts and break the rest of the page.
soup = BeautifulSoup(html)
# now strip HTML we don't like.
for tag in soup.findAll():
|
if tag.name.lower() in blacklist:
# blacklisted tags are removed in their entirety
tag.extract()
elif tag.name.lower() in whitelist:
# tag is allowed. Make sure all the attributes are allowed.
for attr in tag.attrs:
# allowed attributes are whitelisted per-tag
if tag.name.lower() in attr_whitelist and \
attr[0].lower() in attr_whitelist[ tag.name.lower() ]:
# some attributes contain urls..
if attr[0].lower() in attributes_with_urls:
# ..make sure they're nice urls
if not re.match(r'(https?|ftp)://', attr[1].lower()):
tag.attrs.remove( attr )
# ok, then
pass
else:
# not a whitelisted attribute. Remove it.
del tag[attr]
else:
# not a whitelisted tag. I'd like to remove it from the tree
# and replace it with its children. But that's hard. It's much
# easier to just replace it with an empty span tag.
tag.name = "span"
tag.attrs = []
# stringify back again
safe_html = str(soup)
# HTML comments can contain executable scripts, depending on the browser,
# so we'll
# be paranoid and just get rid of all of them
# e.g. <!--[if lt IE 7]><script type="text/javascript">h4x0r();</script><!
# [endif]-->
# TODO - I rather suspect that this is the weakest part of the operation..
safe_html = re.sub(r'<!--[.\n]*?-->','',safe_html)
return safe_html
if __name__ == "__main__":
import sys
input_file = open(sys.argv[1])
output_file = open(sys.argv[2], "w")
output_file.write(sanitize(input_file.read()).encode("utf8"))
output_file.close()
|
ProfessionalIT/professionalit-webiste
|
sdk/google_appengine/google/appengine/tools/devappserver2/static_files_handler.py
|
Python
|
lgpl-3.0
| 13,838
| 0.005853
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves static content for "static_dir" and "static_files" handlers."""
import base64
import errno
import httplib
import mimetypes
import os
import os.path
import re
import zlib
from google.appengine.api import appinfo
from google.appengine.tools import augment_mimetypes
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import url_handler
_FILE_MISSING_ERRNO_CONSTANTS = frozenset([errno.ENOENT, errno.ENOTDIR])
# Run at import time so we only do this once.
augment_mimetypes.init()
class StaticContentHandler(url_handler.UserConfiguredURLHandler):
"""Abstract base class for subclasses serving static content."""
# Associate the full path of a static file with a 2-tuple containing the:
# - mtime at which the file was last read from disk
# - a etag constructed from a hash of the file's contents
# Statting a small file to retrieve its mtime is approximately 20x faster than
# reading it to generate a hash of its contents.
_filename_to_mtime_and_etag = {}
def __init__(self, root_path, url_map, url_pattern):
"""Initializer for StaticContentHandler.
Args:
root_path: A string containing the full path of the directory containing
the application's app.yaml file.
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
url_pattern: A re.RegexObject that matches URLs that should be handled by
this handler. It may also optionally bind groups.
"""
super(StaticContentHandler, self).__init__(url_map, url_pattern)
self._root_path = root_path
def _get_mime_type(self, path):
"""Returns the mime type for the file at the given path."""
if self._url_map.mime_type is not None:
return self._url_map.mime_type
_, extension = os.path.splitext(path)
return mimetypes.types_map.get(extension, 'application/octet-stream')
def _handle_io_exception(self, start_response, e):
"""Serves the response to an OSError or IOError.
Args:
start_response: A function with semantics defined in PEP-333. This
function will be called with a status appropriate to the given
exception.
e: An instance of OSError or IOError used to generate an HTTP status.
Returns:
An emply iterable.
"""
if e.errno in _FILE_MISSING_ERRNO_CONSTANTS:
start_response('404 Not Found', [])
else:
start_response('403 Forbidden', [])
return []
@staticmethod
def _calculate_etag(data):
return base64.b64encode(str(zlib.crc32(data)))
def _handle_path(self, full_path, environ, start_response):
"""Serves the response to a request for a particular file.
Note that production App Engine treats all methods as "GET" except "HEAD".
Unless set explicitly, the "Expires" and "Cache-Control" headers are
deliberately different from their production values to make testing easier.
If set explicitly then the values are preserved because the user may
reasonably want to test for them.
Args:
full_path: A string containing the absolute path to the file to serve.
environ: An environ dict for the current request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
data = None
if full_path in self._filename_to_mtime_and_etag:
last_mtime, etag = self._filename_to_mtime_and_etag[full_path]
else:
last_mtime = etag = None
user_headers = self._url_map.http_headers or appinfo.HttpHeadersDict()
if_match = environ.get('HTTP_IF_MATCH')
if_none_match = environ.get('HTTP_IF_NONE_MATCH')
try:
mtime = os.path.getmtime(full_path)
except (OSError, IOError) as e:
# RFC-2616 section 14.24 says:
# If none of the entity tags match, or if "*" is given and no current
# entity exists, the server MUST NOT perform the requested method, and
# MUST return a 412 (Precondition Failed) response.
if if_match:
start_response('412 Precondition Failed', [])
return []
elif self._url_map.require_matching_file:
return None
else:
return self._handle_io_exception(start_response, e)
if mtime != last_mtime:
try:
data = self._read_file(full_path)
except (OSError, IOError) as e:
return self._handle_io_exception(start_response, e)
etag = self._calculate_etag(data)
self._filename_to_mtime_and_etag[full_path] = mtime, etag
if if_match and not self._check_etag_match(if_match,
etag,
|
allow_weak_
|
match=False):
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
start_response('412 Precondition Failed',
[('ETag', '"%s"' % etag)])
return []
elif if_none_match and self._check_etag_match(if_none_match,
etag,
allow_weak_match=True):
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
start_response('304 Not Modified',
[('ETag', '"%s"' % etag)])
return []
else:
if data is None:
try:
data = self._read_file(full_path)
except (OSError, IOError) as e:
return self._handle_io_exception(start_response, e)
etag = self._calculate_etag(data)
self._filename_to_mtime_and_etag[full_path] = mtime, etag
headers = [('Content-length', str(len(data)))]
if user_headers.Get('Content-type') is None:
headers.append(('Content-type', self._get_mime_type(full_path)))
if user_headers.Get('ETag') is None:
headers.append(('ETag', '"%s"' % etag))
if user_headers.Get('Expires') is None:
headers.append(('Expires', 'Fri, 01 Jan 1990 00:00:00 GMT'))
if user_headers.Get('Cache-Control') is None:
headers.append(('Cache-Control', 'no-cache'))
for name, value in user_headers.iteritems():
# "name" will always be unicode due to the way that ValidatedDict works.
headers.append((str(name), value))
start_response('200 OK', headers)
if environ['REQUEST_METHOD'] == 'HEAD':
return []
else:
return [data]
@staticmethod
def _read_file(full_path):
with open(full_path, 'rb') as f:
return f.read()
@staticmethod
def _check_etag_match(etag_headers, etag, allow_weak_match):
"""Checks if an etag header matches a given etag.
Args:
etag_headers: A string representing an e-tag header value e.g.
'"xyzzy", "r2d2xxxx", W/"c3piozzzz"' or '*'.
etag: The etag to match the header to. If None then only the '*' header
with match.
allow_weak_match: If True then weak etags are allowed to match.
Returns:
True if there is a match, False otherwise.
"""
# From RFC-2616:
# entity-tag = [ weak ] opaque-tag
# weak = "W/"
# opaque-tag = quoted-string
# quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
# qdtext = <any TEXT except <">>
# quoted-pair = "\" CHAR
# TEXT = <any OCTET except CTLs, but including LWS>
# CHAR = <any US-ASCII character (octets 0 - 127)>
# This parsing is not actually correct since it assumes that commas cannot
# appear in etags. But the generated etags do not contain commas
|
ivmech/iviny-scope
|
lib/xlsxwriter/test/comparison/test_cond_format03.py
|
Python
|
gpl-3.0
| 2,548
| 0.000392
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'cond_format03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'font_strikeout': 1, 'dxf_index': 1})
format2 = workbook.a
|
dd_format({'underline': 1, 'dxf_index': 0})
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1',
{'type': 'cell',
|
'format': format1,
'criteria': 'between',
'minimum': 2,
'maximum': 6,
})
worksheet.conditional_format('A1',
{'type': 'cell',
'format': format2,
'criteria': 'greater than',
'value': 1,
})
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
|
m4tto/pyrsyncgui
|
pyrsyncgui.py
|
Python
|
gpl-2.0
| 8,640
| 0.035069
|
#!/usr/bin/python
'''
*
* Copyright (C) 2013 Simone Denei <simone.denei@gmail.com>
*
* This file is part of pyrsyncgui.
*
* pyrsyncgui is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* pyrsyncgui is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with pyrsyncgui. If not, see <http://www.gnu.org/licenses/>.
*
'''
import PyQt4.QtCore as core
import PyQt4.QtGui as gui
import PyQt4.uic as uic
from PyQt4.QtCore import pyqtSlot
from PyQt4.QtCore import pyqtSignal
import sys
import os
import inspect
from os import path, environ
import configuration as conf
import dispatcher as disp
from addsyncwizard import addsyncwizard
from managesyncwizard import managesyncwizard
from configurationwizard import configurationwizard
from backupengine import backupengine
import platform
#######################################
# Configuration format #
#######################################
#
# GUI Configuration
#
# PollingTime Number - seconds between each server check
# RsyncCmd String - path to the rsync command
#
# Server Configuration
#
# servername - Dictionary key identifying the server entry
# Address String
# Username (String,None)
# Password (String,None)
# Transportation int - 0 Daemon, 1 ssh
#
# Sync Configuration
#
# syncname - Dictionary key identifying the sync entry
# Server (String,None) if is None it means that the destination is an harddisk
# DiskID (String,None)
# Source String
# Enabled bool
# Destination String
# NeedsUpdate bool
# Username (String,None)
# Password (String,None)
#
# ServerStatus 0 Disconnected, 1 Partial connected, 2 Connected
# BackupStatus 0 Updated, 1 Updating, 2 Paused
class pyrsyncgui(gui.QWidget):
updateGraphics = pyqtSignal()
updateInfo = pyqtSignal('QString')
def __init__(self,app):
gui.QWidget.__init__(self)
self.icons={}
self.icons['Disconnected'] = gui.QIcon('img/disconnected.svg')
self.icons['ConnectedUpdated'] = gui.QIcon('img/connectedupdated.svg')
self.icons['ConnectedUpdating'] = (gui.QIcon('img/connectedupdating1.svg'),gui.QIcon('img/connectedupdating2.svg'))
self.icons['PartialConnectedUpdating'] = (gui.QIcon('img/partialconnectedupdating1.svg'),gui.QIcon('img/partialconnectedupdating2.svg'))
self.icons['PartialConnectedUpdated'] = gui.QIcon('img/partialconnectedupdated.svg')
self.progress = gui.QMovie('img/progress.gif')
self.tray = gui.QSystemTrayIcon(self.icons['Disconnected'])
self.app = app
#Loading configuration files
cfgfile = os.path.join(self.configDir(),'.pyrsyncgui')
servcfgfile = os.path.join(self.configDir(),'.pyrsyncgui.server')
schedcfgfile = os.path.join(self.configDir(),'.pyrsyncgui.sync')
self.config = conf.configuration(cfgfile)
if len(self.config) == 0:
self.defaultconfig()
self.serverconfig = conf.configuration(servcfgfile)
if len(self.serverconfig) == 0:
self.defaultserverconfig()
self.schedconfig = conf.configuration(schedcfgfile)
if len(self.schedconfig) == 0:
self.defaultschedconfig()
self.window = uic.loadUi('gui.ui')
self.connect(self.window.AddSyncButton, core.SIGNAL('clicked()'), self, core.SLOT('addSync()'))
self.connect(self.window.ManageButton, core.SIGNAL('clicked()'), self, core.SLOT('manageSync()'))
self.connect(self.window.ConfigButton, core.SIGNAL('clicked()'), self, core.SLOT('config()'))
disp.register('SyncProgress',self.syncprogressupdate)
disp.register('InfoMsg',self.infomsg)
self.serverstatus = 0
self.backupstatus = 0
disp.register('ServerStatus',self.__serverStatusUpdate)
disp.register('BackupStatus',self.__backupStatusUpdate)
self.bckeng = backupengine(self.config,self.serverconfig,self.schedconfig)
self.app.aboutToQuit.connect(self.cleanup)
self.menu = gui.QMenu()
self.menu.addAction('Pause Backups',self.__pauseBackups)
self.menu.addSeparator()
self.menu.addAction('Exit', self.quit)
self.tray.activated.connect(self.__trayActivated)
self.tray.setContextMenu(self.menu)
self.tray.show()
self.window.closeEvent = self.closing
self.window.ServerVerifyButton.raise_()
self.window.ServerVerifyInProgress.setMovie(self.progress)
self.window.ServerProceedButton.raise_()
self.window.ServerProceedInProgress.setMovie(self.progress)
self.progress.start()
self.currentwizard=None
self.updateGraphics.connect(self.__updateGraphics)
self.updateInfo.connect(self.__updateInfo)
self.bckeng.start()
self.window.InfoLabel.setText(' Welcome to pyrsyncgui!')
def configDir(self):
if platform.system() == 'Windows':
appdata = path.join(environ['APPDATA'], 'pyrsyncgui')
else:
appdata = path.expanduser(path.join('~', '.config', 'pyrsyncgui'))
if not os.path.exists(appdata):
os.makedirs(appdata)
return appdata
def quit(self):
self.tray.hide()
self.window.hide()
self.close()
self.app.quit()
def closing(self, event):
event.ignore()
self.config.save()
self.schedconfig.save()
self.serverconfig.save()
|
self.window.hide()
if self.currentwizard != None:
self.currentwizard.stop()
def cleanup(self):
print('Closing')
self.bckeng.stop()
self.config.save()
self.schedconfig.save()
self.serverconfig.save()
@pyqtSlot()
def addSync(self):
self.currentwizard = addsyncwizard(self.window, self.config, self.serverconfig,self.schedconfig)
self
|
.currentwizard.start()
@pyqtSlot()
def manageSync(self):
self.currentwizard = managesyncwizard(self.window, self.config, self.serverconfig,self.schedconfig)
self.currentwizard.start()
@pyqtSlot()
def config(self):
self.currentwizard = configurationwizard(self.window, self.config)
self.currentwizard.start()
def infomsg(self,msg):
self.updateInfo.emit(msg)
def syncprogressupdate(self,msg):
filename = msg['CurrentFile'][msg['CurrentFile'].rfind('/')+1:]
if len(filename) > 20:
filename = filename[0:17]+'...'
self.window.InfoLabel.setText(' Sync: '+filename+' '+str(msg['FilePercentage'])+'%, '+ str(msg['OverallPercentage'])+'%')
self.tray.setToolTip(' Sync: '+filename+'\nProgress: '+str(msg['FilePercentage'])+'%, '+ str(msg['OverallPercentage'])+'%')
def defaultconfig(self):
self.config['PollingTime'] = 1
if platform.system() == 'Windows':
self.config['RsyncCmd'] = '.\\bin\\rsync'
elif platform.system() == 'Linux':
self.config['RsyncCmd'] = 'rsync'
self.config['SshCmd'] = 'ssh'
def defaultschedconfig(self):
pass
def defaultserverconfig(self):
pass
def changeState(self,state):
pass
@pyqtSlot('QString')
def __updateInfo(self,text):
self.window.InfoLabel.setText(' '+text)
@pyqtSlot()
def __updateGraphics(self):
if self.serverstatus == 0:
self.tray.setIcon(self.icons['Disconnected'])
elif self.serverstatus == 1:
if self.backupstatus == 0:
self.tray.setIcon(self.icons['PartialConnectedUpdated'])
else:
self.tray.setIcon(self.icons['PartialConnectedUpdating'][0])
else:
if self.backupstatus == 0:
self.tray.setIcon(self.icons['ConnectedUpdated'])
else:
self.tray.setIcon(self.icons['ConnectedUpdating'][0])
@pyqtSlot('QSystemTrayIcon::ActivationReason')
def __trayActivated(self,event):
if event == gui.QSystemTrayIcon.Trigger:
if len(self.schedconfig) == 0:
self.window.ManageButton.setEnabled(False)
else:
self.window.ManageButton.setEnabled(True)
self.window.setVisible(not self.window.isVisible())
def __serverStatusUpdate(self,msg):
self.serverstatus = msg
s
|
ElementalAlchemist/txircd
|
txircd/modules/rfc/cmd_connect.py
|
Python
|
bsd-3-clause
| 1,451
| 0.035837
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class ConnectCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ConnectCommand"
core = True
def actions(self):
return [ ("commandpermission-CONNECT", 1, self.canConnect) ]
def userCommands(self):
return [ ("CONNECT", 1, self) ]
def canConnect(self, user, data):
if not
|
self.ircd.runActionUntilValue("userhasoperpermission", user, "command-connect", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct op
|
erator privileges")
return False
return None
def parseParams(self, user, params, prefix, tags):
if not params:
user.sendSingleError("ConnectParams", irc.ERR_NEEDMOREPARAMS, "CONNECT", "Not enough parameters")
return None
return {
"server": params[0]
}
def execute(self, user, data):
serverName = data["server"]
if serverName in self.ircd.serverNames:
user.sendMessage("NOTICE", "*** Server {} is already on the network".format(serverName))
elif self.ircd.connectServer(serverName):
user.sendMessage("NOTICE", "*** Connecting to {}".format(serverName))
else:
user.sendMessage("NOTICE", "*** Failed to connect to {}; it's likely not configured.".format(serverName))
return True
connectCmd = ConnectCommand()
|
sungyism/sungyism
|
gmond/python_modules/memcached/memcached.py
|
Python
|
bsd-3-clause
| 11,706
| 0.009568
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import traceback
import os
import threading
import time
import socket
import select
descriptors = list()
Desc_Skel = {}
_Worker_Thread = None
_Lock = threading.Lock() # synchronization lock
Debug = False
def dprint(f, *v):
if Debug:
print >>sys.stderr, "DEBUG: "+f % v
def floatable(str):
try:
float(str)
return True
except:
return False
class UpdateMetricThread(threading.Thread):
def __init__(self, params):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.refresh_rate = 15
if "refresh_rate" in params:
self.refresh_rate = int(params["refresh_rate"])
self.metric = {}
self.last_metric = {}
self.timeout = 2
self.host = "localhost"
self.port = 11211
if "host" in params:
self.host = params["host"]
if "port" in params:
self.port = int(params["port"])
self.type = params["type"]
self.mp = params["metrix_prefix"]
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
self.join()
def run(self):
self.running = True
while not self.shuttingdown:
_Lock.acquire()
self.update_metric()
_Lock.release()
time.sleep(self.refresh_rate)
self.running = False
def update_metric(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
msg = ""
self.last_metric = self.metric.copy()
try:
dprint("connect %s:%d", self.host, self.port)
sock.connect((self.host, self.port))
sock.send("stats\r\n")
while True:
rfd, wfd, xfd = select.select([sock], [], [], self.timeout)
if not rfd:
print >>sys.stderr, "ERROR: select timeout"
break
for fd in rfd:
if fd == sock:
data = fd.recv(8192)
msg += data
if msg.find("END"):
break
sock.close()
except socket.error, e:
print >>sys.stderr, "ERROR: %s" % e
for m in msg.split("\r\n"):
d = m.split(" ")
if len(d) == 3 and d[0] == "STAT" and floatable(d[2]):
self.metric[self.mp+"_"+d[1]] = float(d[2])
def metric_of(self, name):
val = 0
mp = name.split("_")[0]
if name.rsplit("_",1)[1] == "rate" and name.rsplit("_",1)[0] in self.metric:
_Lock.acquire()
name = name.rsplit("_",1)[0]
if name in self.last_metric:
num = self.metric[name]-self.last_metric[name]
period = self.metric[mp+"_time"]-self.last_metric[mp+"_time"]
try:
val = num/period
except ZeroDivisionError:
val = 0
_Lock.release()
elif name in self.metric:
_Lock.acquire()
val = self.metric[name]
_Lock.release()
return val
def metric_init(params):
global descriptors, Desc_Skel, _Worker_Thread, Debug
print '[memcached] memcached protocol "stats"'
if
|
"type" not in params:
params["type"] = "memcached"
if "metrix_prefix" not in params:
if params["type"] == "memcached":
params["metrix_prefix"] = "mc"
elif params["type"] == "Tokyo Tyrant":
|
params["metrix_prefix"] = "tt"
print params
# initialize skeleton of descriptors
Desc_Skel = {
'name' : 'XXX',
'call_back' : metric_of,
'time_max' : 60,
'value_type' : 'float',
'format' : '%.0f',
'units' : 'XXX',
'slope' : 'XXX', # zero|positive|negative|both
'description' : 'XXX',
'groups' : params["type"],
}
if "refresh_rate" not in params:
params["refresh_rate"] = 15
if "debug" in params:
Debug = params["debug"]
dprint("%s", "Debug mode on")
_Worker_Thread = UpdateMetricThread(params)
_Worker_Thread.start()
# IP:HOSTNAME
if "spoof_host" in params:
Desc_Skel["spoof_host"] = params["spoof_host"]
mp = params["metrix_prefix"]
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_curr_items",
"units" : "items",
"slope" : "both",
"description": "Current number of items stored",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_cmd_get",
"units" : "commands",
"slope" : "positive",
"description": "Cumulative number of retrieval reqs",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_cmd_set",
"units" : "commands",
"slope" : "positive",
"description": "Cumulative number of storage reqs",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_bytes_read",
"units" : "bytes",
"slope" : "positive",
"description": "Total number of bytes read by this server from network",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_bytes_written",
"units" : "bytes",
"slope" : "positive",
"description": "Total number of bytes sent by this server to network",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_bytes",
"units" : "bytes",
"slope" : "both",
"description": "Current number of bytes used to store items",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_limit_maxbytes",
"units" : "bytes",
"slope" : "both",
"description": "Number of bytes this server is allowed to use for storage",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_curr_connections",
"units" : "connections",
"slope" : "both",
"description": "Number of open connections",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_evictions",
"units" : "items",
"slope" : "both",
"description": "Number of valid items removed from cache to free memory for new items",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_get_hits",
"units" : "items",
"slope" : "positive",
"description": "Number of keys that have been requested and found present ",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_get_misses",
"units" : "items",
"slope" : "positive",
"description": "Number of items that have been requested and not found",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_get_hits_rate",
"units" : "items",
"slope" : "both",
"description": "Hits per second",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_get_misses_rate",
"units" : "items",
"slope" : "both",
"description": "Misses per second",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_cmd_get_ra
|
garrettcap/Bulletproof-Backup
|
wx/tools/Editra/plugins/Launch/launch/launchxml.py
|
Python
|
gpl-2.0
| 4,699
| 0.003831
|
# -*- coding: utf-8 -*-
###############################################################################
# Name: launchxml.py #
# Purpose: Launch Xml Interface #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2009 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""Launch Xml Interface
Interface to add new filetype support to launch or to override existing support.
"""
xml_spec = """
<launch version="1">
<handler name="Python" id="ID_LANG_PYTHON">
<commandlist default="python">
<command name="python" execute="python2.5 -u"/>
<command name="pylint" execute="/usr/local/bin/pylint"/>
</commandlist>
<error pattern="File "(.+)", line ([0-9]+)"/>
</handler>
</launch>
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: launchxml.py 67713 2011-05-06 18:43:32Z CJP $"
__revision__ = "$Revision: 67713 $"
#-----------------------------------------------------------------------------#
# Imports
import re
import sys
#sys.path.insert(0, '../../../src/')
# Editra Imports
import ed_xml
#-----------------------------------------------------------------------------#
# Globals
#-----------------------------------------------------------------------------#
class ErrorPattern(ed_xml.EdXml):
class meta:
tagname = "error"
pattern = ed_xml.String(required=True)
class HotspotPattern(ed_xml.EdXml):
class meta:
tagname = "hotspot"
pattern = ed_xml.String(required=True)
class Command(ed_xml.EdXml):
class meta:
tagname = "command"
name = ed_xml.String(required=True)
execute = ed_xml.String(required=True)
class CommandList(ed_xml.EdXml):
class meta:
tagname = "commandlist"
default = ed_xml.String(required=True)
commands = ed_xml.List(Command)
class Handler(ed_xml.EdXml):
class meta:
tagname = "handler"
name = ed_xml.String(required=True)
id = ed_xml.String(required=True)
# Sub elements
commandlist = ed_xml.Model(CommandList, required=False)
error = ed_xml.Model(ErrorPattern, required=False)
hotspot = ed_xml.Model(HotspotPattern, required=False)
def GetDefaultCommand(self):
"""Get the default command"""
default = u""
if self.commandlist:
default = self.commandlist.default
return default
def GetCommands(self):
"""Get the list of commands"""
clist = dict()
if self.commandlist:
for cmd in self.commandlist.commands:
clist[cmd.name] = cmd.execute
return clist
def GetErrorPattern(self):
"""Get the handlers error pattern"""
if self.error and self.error.pattern:
return re.compile(self.error.pattern)
return None
def GetHotspotPattern(self):
"""Get the handlers hotspot pattern"""
if self.hotspot and self.hotspot.pattern:
return re.compile(self.hotspot.pattern)
return None
class LaunchXml(ed_xml.EdXml):
class meta:
tagname = "launch"
handlers = ed_xml.List(Handler, required=False)
def GetHandler(self, name):
"""Get a handler by name
@return: Handler instance or None
"""
rval = None
for handler in self.handlers:
if handler.name == name:
rval = handler
break
return handler
def GetHandlers(self):
"""Get the whole dictionary of handlers
@return: dict(name=Handler)
"""
return self.handlers
def HasHandler(self, name):
"""Is there a handler for the given file type
@return: bool
"""
for handler in self.handlers:
if handler.name == name:
return True
return False
#-----------------------------------------------------------------------------#
# Test
#if __name__ == '__main__':
# h = LaunchXml.Load("launch.xml")
# print "CHECK Python Handler"
# hndlr = h.GetHandler('Python')
# print hndlr.GetCommands()
# print hndlr.error.pattern
# print hndlr.hotspot.pattern
# print hndlr.commandlist.default
# print h.GetHandlers()
# print "Check C Handler"
# hndlr =
|
h.GetHandler('C')
# print hndlr.GetCommands()
# print hndlr.GetHotspotPattern()
# print hndlr.GetErrorPat
|
tern()
# print hndlr.GetDefaultCommand()
|
SalesforceFoundation/CumulusCI
|
cumulusci/tasks/salesforce/tests/test_UninstallLocal.py
|
Python
|
bsd-3-clause
| 625
| 0.0032
|
from unittest import mock
import unittest
from cumulusci.tasks.salesforce import UninstallLocal
from cumulusci.utils import temporary_dir
from .
|
util import create_task
class TestUninstallLocal(unittest.TestCase):
@mock.patch("cumulusci.tasks.metadata.package.PackageXmlGenerator.__call__")
def test_get_destructiv
|
e_changes(self, PackageXmlGenerator):
with temporary_dir() as path:
task = create_task(UninstallLocal, {"path": path})
PackageXmlGenerator.return_value = mock.sentinel.package_xml
self.assertEqual(mock.sentinel.package_xml, task._get_destructive_changes())
|
mapycz/python-mapnik
|
test/python_tests/shapefile_test.py
|
Python
|
lgpl-2.1
| 5,190
| 0.000385
|
import os
from nose.tools import assert_almost_equal, eq_, raises
import mapnik
from .utilities import execution_path, run_all
def setup():
|
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'shape' in mapnik.DatasourceCache.plugin_names():
# Shapefile initialization
def test_shapefile_init():
s = mapnik.Shapefile(file='../data/shp/
|
boundaries')
e = s.envelope()
assert_almost_equal(e.minx, -11121.6896651, places=7)
assert_almost_equal(e.miny, -724724.216526, places=6)
assert_almost_equal(e.maxx, 2463000.67866, places=5)
assert_almost_equal(e.maxy, 1649661.267, places=3)
# Shapefile properties
def test_shapefile_properties():
s = mapnik.Shapefile(file='../data/shp/boundaries', encoding='latin1')
f = list(s.features_at_point(s.envelope().center()))[0]
eq_(f['CGNS_FID'], u'6f733341ba2011d892e2080020a0f4c9')
eq_(f['COUNTRY'], u'CAN')
eq_(f['F_CODE'], u'FA001')
eq_(f['NAME_EN'], u'Quebec')
# this seems to break if icu data linking is not working
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
eq_(f['Shape_Area'], 1512185733150.0)
eq_(f['Shape_Leng'], 19218883.724300001)
@raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.Shapefile(file='../data/shp/world_merc')
eq_(len(ds.fields()), 11)
eq_(ds.fields(), ['FIPS', 'ISO2', 'ISO3', 'UN', 'NAME',
'AREA', 'POP2005', 'REGION', 'SUBREGION', 'LON', 'LAT'])
eq_(ds.field_types(),
['str',
'str',
'str',
'int',
'str',
'int',
'int',
'int',
'int',
'float',
'float'])
query = mapnik.Query(ds.envelope())
for fld in ds.fields():
query.add_property_name(fld)
# also add an invalid one, triggering throw
query.add_property_name('bogus')
ds.features(query)
def test_dbf_logical_field_is_boolean():
ds = mapnik.Shapefile(file='../data/shp/long_lat')
eq_(len(ds.fields()), 7)
eq_(ds.fields(), ['LONG', 'LAT', 'LOGICAL_TR',
'LOGICAL_FA', 'CHARACTER', 'NUMERIC', 'DATE'])
eq_(ds.field_types(), ['str', 'str',
'bool', 'bool', 'str', 'float', 'str'])
query = mapnik.Query(ds.envelope())
for fld in ds.fields():
query.add_property_name(fld)
feat = list(ds.all_features())[0]
eq_(feat.id(), 1)
eq_(feat['LONG'], '0')
eq_(feat['LAT'], '0')
eq_(feat['LOGICAL_TR'], True)
eq_(feat['LOGICAL_FA'], False)
eq_(feat['CHARACTER'], '254')
eq_(feat['NUMERIC'], 32)
eq_(feat['DATE'], '20121202')
# created by hand in qgis 1.8.0
def test_shapefile_point2d_from_qgis():
ds = mapnik.Shapefile(file='../data/shp/points/qgis.shp')
eq_(len(ds.fields()), 2)
eq_(ds.fields(), ['id', 'name'])
eq_(ds.field_types(), ['int', 'str'])
eq_(len(list(ds.all_features())), 3)
# ogr2ogr tests/data/shp/3dpoint/ogr_zfield.shp
# tests/data/shp/3dpoint/qgis.shp -zfield id
def test_shapefile_point_z_from_qgis():
ds = mapnik.Shapefile(file='../data/shp/points/ogr_zfield.shp')
eq_(len(ds.fields()), 2)
eq_(ds.fields(), ['id', 'name'])
eq_(ds.field_types(), ['int', 'str'])
eq_(len(list(ds.all_features())), 3)
def test_shapefile_multipoint_from_qgis():
ds = mapnik.Shapefile(file='../data/shp/points/qgis_multi.shp')
eq_(len(ds.fields()), 2)
eq_(ds.fields(), ['id', 'name'])
eq_(ds.field_types(), ['int', 'str'])
eq_(len(list(ds.all_features())), 1)
# pointzm from arcinfo
def test_shapefile_point_zm_from_arcgis():
ds = mapnik.Shapefile(file='../data/shp/points/poi.shp')
eq_(len(ds.fields()), 7)
eq_(ds.fields(),
['interst_id',
'state_d',
'cnty_name',
'latitude',
'longitude',
'Name',
'Website'])
eq_(ds.field_types(), ['str', 'str',
'str', 'float', 'float', 'str', 'str'])
eq_(len(list(ds.all_features())), 17)
# copy of the above with ogr2ogr that makes m record 14 instead of 18
def test_shapefile_point_zm_from_ogr():
ds = mapnik.Shapefile(file='../data/shp/points/poi_ogr.shp')
eq_(len(ds.fields()), 7)
eq_(ds.fields(),
['interst_id',
'state_d',
'cnty_name',
'latitude',
'longitude',
'Name',
'Website'])
eq_(ds.field_types(), ['str', 'str',
'str', 'float', 'float', 'str', 'str'])
eq_(len(list(ds.all_features())), 17)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
unlessbamboo/grocery-shop
|
language/python/src/package/abs_import/unable/unable_module.py
|
Python
|
gpl-3.0
| 104
| 0.012821
|
#
|
coding:utf8
"""
无法从上层目录进行导入操作
"""
class UnableTest(obj
|
ect):
pass
|
sxjscience/tvm
|
python/tvm/auto_scheduler/search_policy.py
|
Python
|
apache-2.0
| 7,529
| 0.001992
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The search policies for TVM Auto-scheduler.
This contains the strategies to generate a schedule automatically. We provide an EmptyPolicy
which always returns an unchanged initial state, and a more advanced SketchPolicy which can
deal with various ops/subgraphs on different target devices.
Reference:
L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor
Programs for Deep Learning." arXiv preprint arXiv:2006.06762 (2020).
"""
import random
import tvm._ffi
from tvm.runtime import Object
from .cost_model import RandomModel
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.SearchCallback")
class SearchCallback(Object):
"""Callback function before or after search process"""
@tvm._ffi.register_object("auto_scheduler.PreloadMeasuredStates")
class PreloadMeasuredStates(SearchCallback):
"""A SearchCallback to load measured states from the log file for a search policy.
This can resume the state of the search policy:
- Making sure an already measured state in former searches will never be measured again.
- The history states can be used to speed up the search process(e.g. SketchPolicy uses
history states as starting point to perform Evolutionary Search).
Parameters
----------
filename : str
The name of the record file.
"""
def __init__(self, filename="auto_scheduler_tuning.json"):
self.__init_handle_by_constructor__(_ffi_api.PreloadMeasuredStates, filename)
@tvm._ffi.register_object("auto_scheduler.SearchPolicy")
class SearchPolicy(Object):
""" The base class of search policies. """
@tvm._ffi.register_object("auto_scheduler.EmptyPolicy")
class EmptyPolicy(SearchPolicy):
"""This is an example empty search policy which will always generate
the init state of ComputeDAG.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
init_search_callbacks : Optional[List[SearchCallback]]
Callback functions called before the search process.
"""
def __init__(self, task, init_search_callbacks=None):
self.__init_handle_by_constructor__(_ffi_api.EmptyPolicy, task, init_search_callbacks)
@tvm._ffi.register_object("auto_scheduler.SketchPolicy")
class SketchPolicy(SearchPolicy):
"""The search policy that searches in a hierarchical search space defined by sketches.
The policy randomly samples programs from the space defined by sketches and use evolutionary
search to fine-tune them.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
program_cost_model : CostModel = RandomModel()
The cost model to estimate the complete schedules.
params : Optional[Dict[str, Any]]
Parameters of the search policy.
See `src/auto_scheduler/search_policy/sketch_search_policy.h` for the definitions.
See `DEFAULT_PARAMS` below to find the default values.
seed : Optional[int]
Random seed.
verbose : int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
init_search_callbacks : Optional[List[SearchCallback]]
Callback functions called before the search process, usually used to do extra
initializations.
Possible callbacks:
- auto_scheduler.PreloadMeasuredStates
- auto_scheduler.PreloadCustomSketchRule
TODO(jcf94): Add these search callback implementations.
"""
DEFAULT_PARAMS = {
"eps_greedy": 0.05,
"retry_search_one_round_on_empty": 10,
"evolutionary_search_population": 2048,
"evolutionary_search_num_iters": 10,
"evolutionary_search_mutation_prob": 0.85,
"evolutionary_search_use_measured_ratio": 0.2,
"cpu_multi_level_tiling_structure": "SSRSRS",
"gpu_multi_level_tiling_structure": "SSSRRSRS",
# Notice: the default thread bind policy of GPU assumes the tiling structure to have at
# least 3 spatial tiling levels in outermost
"max_innermost_split_factor": 64,
"max_vectorize_size": 16,
"disable_change_compute_location": 0,
}
def __init__(
self,
task,
program_cost_model=RandomModel(),
params=None,
seed=None,
verbose=1,
init_search_callbacks=None,
):
if params is None:
params = SketchPolicy.DEFAULT_PARAMS
else:
for key, value in SketchPolicy.DEFAULT_PARAMS.items():
if key not in params:
params[key] = value
self.__init_handle_by_constructor__(
_ffi_api.SketchPolicy,
task,
program_cost_model,
params,
seed or random.randint(1, 1 << 30),
verbose,
init_search_callbacks,
)
def generate_sketches(self, print_for_debug=False):
"""Generate the sketches.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Parameters
----------
print_for_debug : bool = False
Whether print out the sketches for debug.
Returns
-------
sketches : List[State]
The generated sketches of this search task.
"""
sketches = _ffi_api.SketchPolicyGenerateSketches(self)
if print_for_debug:
for i, s in enumerate(sketches):
print("=" * 20 + " %d " % i + "=" * 20)
print(s)
return sketches
def sample_initial_population(self, pop_size):
"""Sample initial population.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Parameters
----------
pop_size : int
The
|
size of sampled population
Returns
-------
states: List[State]
The sampled states
"""
states = _ffi_api.SketchPolicySampleInitialPopulation(self, pop_size)
return states
def evolutionary_search(self, init_populations, out_size):
"""Evolutionary search.
This python interface is mainly u
|
sed for debugging and testing.
The actual search is all done in c++.
Parameters
----------
init_populations: List[State]
The initial population states
out_size : int
The size of generated states
Returns
-------
states: List[State]
The generated states
"""
states = _ffi_api.SketchPolicyEvolutionarySearch(self, init_populations, out_size)
return states
|
namhyung/uftrace
|
tests/t198_lib_arg_float.py
|
Python
|
gpl-2.0
| 799
| 0.002503
|
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'float-libcall', result="""
# DURATION TID FUNCTION
[18276] | main() {
0.3
|
71 ms [18276] | expf(1.000000) = 2.718282;
0.118 ms [18276] | log(2.718282) = 1.000000;
3.281 ms [18276] | } /* main */
""")
def build(self, name, cflags='', ldflags=''):
# cygprof doesn't support arguments now
if cflags.find('-finstrument-func
|
tions') >= 0:
return TestBase.TEST_SKIP
ldflags += " -lm"
return TestBase.build(self, name, cflags, ldflags)
def setup(self):
self.option = '-A "expf@fparg1/32" -R "expf@retval/f32" '
self.option += '-A "log@fparg1/64" -R "log@retval/f64" '
|
ldjebran/robottelo
|
tests/foreman/cli/test_logging.py
|
Python
|
gpl-3.0
| 11,539
| 0.001907
|
"""CLI tests for logging.
:Requirement: Logging
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: logging
:TestType: Functional
:CaseImportance: Medium
:Upstream: No
"""
import re
from fauxfactory import gen_string
from robottelo import (
manifests,
ssh,
)
from robottelo.cli.factory import (
make_org,
make_product_wait,
make_repository,
)
from robottelo.cli.subscription import Subscription
from robottelo.ssh import upload_file
from robottelo.test import CLITestCase
from robottelo.decorators import tier4
def line_count(file, connection=None):
"""Get number of lines in a file."""
connection = connection or ssh.get_connection()
result = connection.run('wc -l < {0}'.format(file),
output_format='plain')
count = result.stdout.strip('\n')
return count
def cut_lines(start_line, end_line, source_file, out_file, connection=None):
"""Given start and end line numbers, cut lines from source file
and put them in out file."""
connection = connection or ssh.get_connection()
result = connection.run('sed -n "{0},{1} p" {2} < {2} > {3}'.format(
start_line,
end_line,
source_file,
out_file))
return result
class SimpleLoggingTestCase(CLITestCase):
"""Test class for default logging to files."""
org = None
product = None
@classmethod
def setUpClass(cls):
"""Tests for logging to files"""
super(SimpleLoggingTestCase, cls).setUpClass()
# need own org for the manifest refresh test
cls.org = make_org(cached=True)
cls.product = make_product_wait(
{u'organization-id': cls.org['id']},
)
def _make_repository(self, options=None):
"""Makes a new repository and asserts its success"""
if options is None:
options = {}
if not options.get('product-id'):
options[u'product-id'] = self.product['id']
return make_repository(options)
@tier4
def test_positive_logging_from_foreman_core(self):
"""Check that GET command to Hosts API is logged and has request ID.
:id: 0785260d-cb81-4351-a7cb-d7841335e2de
:expectedresults: line of log with GET has request ID
:CaseImportance: Medium
"""
GET_line_found = False
source_log = '/var/log/foreman/production.log'
test_logfile = '/var/tmp/logfile_from_foreman_core'
with ssh.get_connection() as connection:
# get the number of lines in the source log before the test
line_count_start = line_count(source_log, connection)
# hammer command for this test
result = connection.run('hammer host list')
self.assertEqual(result.return_code, 0, "BASH command error?")
# get the number of lines in the source log after the test
line_count_end = line_count(source_log, connection)
# get the log lines of interest, put them in test_logfile
cut_lines(line_count_start, line_count_end, source_log, test_logfile, connection)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile)
# search the log file extract for the line with GET to host API
with open(test_logfile, "r") as logfile:
for line in logfile:
if re.search(r'Started GET \"\/api/hosts\?page=1', line):
self.logger.info('Found the line with GET to hosts API')
GET_line_found = True
# Confirm the request ID was logged in the line with GET
match = re.search(r'\[I\|app\|\w{8}\]', line)
assert match, "Request ID not found"
self.logger.info("Request ID found for logging from foreman core")
break
assert GET_line_found, "The GET command to list hosts was not found in logs."
@tier4
def test_positive_logging_from_foreman_proxy(self):
"""Check PUT to Smart Proxy API to refresh the features is logged and has request ID.
:id: 0ecd8406-6cf1-4520-b8b6-8a164a1e60c2
:expectedresults: line of log with PUT has request ID
:CaseImportance: Medium
"""
PUT_line_found = False
request_id = None
source_log_1 = '/var/log/foreman/production.log'
test_logfile_1 = '/var/tmp/logfile_1_from_proxy'
source_log_2 = '/var/log/foreman-proxy/proxy.log'
test_logfile_2 = '/var/tmp/logfile_2_from_proxy'
with ssh.get_connection() as connection:
# get the number of lines in the source logs before the test
line_count_start_1 = line_count(source_log_1, connection)
line_count_start_2 = line_count(source_log_2, connection)
# hammer command for this test
result = connection.run('hammer proxy refresh-features --id 1')
self.assertEqual(result.return_code, 0, "BASH command error?")
# get the number of lines in the source logs after the test
line_count_end_1 = line_count(source_log_1, connection)
line_count_end_2 = line_count(source_l
|
og_2, connection)
# get the log lines of interest, put them in test_logfile_1
cut_lines(line_count_start_1, line_count_end_1, source_log_1, test_logfile_1,
connection)
# get the log lines of interest, put them in test_logfile_2
cut_lines(line_count_start_2, line_count_end_2, source_log_2, test_logfile_
|
2,
connection)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile_1)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile_2)
# search the log file extract for the line with PUT to host API
with open(test_logfile_1, "r") as logfile:
for line in logfile:
if re.search(r'Started PUT \"\/api\/smart_proxies\/1\/refresh', line):
self.logger.info('Found the line with PUT to foreman proxy API')
PUT_line_found = True
# Confirm the request ID was logged in the line with PUT
match = re.search(r'\[I\|app\|\w{8}\]', line)
assert match, "Request ID not found"
self.logger.info("Request ID found for logging from foreman proxy")
p = re.compile(r"\w{8}")
result = p.search(line)
request_id = result.group(0)
break
assert PUT_line_found, "The PUT command to refresh proxies was not found in logs."
# search the local copy of proxy.log file for the same request ID
with open(test_logfile_2, "r") as logfile:
for line in logfile:
# Confirm request ID was logged in proxy.log
match = line.find(request_id)
assert match, "Request ID not found in proxy.log"
self.logger.info("Request ID also found in proxy.log")
break
@tier4
def test_positive_logging_from_candlepin(self):
"""Check logging after manifest upload.
:id: 8c06e501-52d7-4baf-903e-7de9caffb066
:expectedresults: line of logs with POST has request ID
:CaseImportance: Medium
"""
POST_line_found = False
source_log = '/var/log/candlepin/candlepin.log'
test_logfile = '/var/tmp/logfile_from_candlepin'
# regex for a version 4 UUID (8-4-4-12 format)
regex = r"\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b"
with ssh.get_connection() as connection:
# get the number of lines in the source log before the test
line_count_start = line_count(source_log, connection)
# command for this test
with manifests.clone() as manifest:
upload_file(manifest.content, manifest.filename)
Subscription.upload({
|
JcDelay/pycr
|
libpycr/builtin/accounts/ls-diff-prefs.py
|
Python
|
apache-2.0
| 3,136
| 0
|
"""List diff preferences associated with one's account"""
# pylint: disable=invalid-name
import argparse
import logging
from libpycr.exceptions import PyCRError
from libpycr.gerrit.client import Gerrit
from libpycr.meta import GerritAccountBuiltin
from libpycr.pager import Pager
from libpycr.utils.commandline import expect_account_as_positional
from libpycr.utils.output import checkmark
from libpycr.utils.system import fail
from prettytable import PrettyTable
class LsDiffPrefs(GerritAccountBuiltin):
"""Implement the LS-DIFF-PREFS command"""
# Logger for this command
log = logging.getLogger(__name__)
@property
def name(self):
return 'ls-diff-prefs'
@property
def description(self):
return 'list diff preferences'
@staticmethod
def parse_command_line(arguments):
"""Parse the LS-DIFF-PREFS command command-line arguments
Returns the account id that is provided on the command line. If no
account is provided, returns None.
:param arguments: a list of command-line arguments to parse
:type arguments: list[str]
:rtype: str
"""
parser = argparse.ArgumentParser(
description='List account diff preferences')
expect_account_as_positional(parser)
cmdline = parser.parse_args(arguments)
# fetch changes details
return cmdline.account
def run(self, arguments, *args, **kwargs):
account_id = self.parse_command_line(arguments)
try:
account = Gerrit.get_account(account_id or 'self')
prefs = Gerrit.get_diff_prefs(account_id or 'self')
except PyCRError as why:
fail('cannot list account diff preferences', why)
table = PrettyTable(['Preference', 'Value'])
table.align['Preference'] = 'l'
table.align['Value'] = 'c'
table.add_row(['Context', prefs.context])
table.add_row(['Expand all comments',
checkmark(prefs.expand_all_comments)])
table.add_row(['Ignore whitespace', prefs.ignore_whitespace])
table.add_row(['Intraline difference',
checkmark(prefs.intraline_difference)])
table.add_row(['Line length', prefs.line_length])
table.add_row(['Manual review', checkmark(prefs.manual_review)])
table.add_row(['Retain header', checkmark(prefs.retain_header)])
table.add_row(['Show line endings',
checkmark(prefs.show_line_endings)])
table.add_row(['Show tabs', checkmark(prefs.show_tabs)])
table.add_row(['Show whitespace errors',
checkmark(prefs.show_whitespace_errors)])
table.add_row(['Skip deleted', checkmark(prefs.skip_deleted)])
table.add_row(['Skip uncommented', checkmark(prefs.skip_uncommented)])
table.add_row(['Syntax highlighting',
checkmark(prefs.syntax
|
_highlighting)])
table.add_row(['Tab size', prefs.tab_size])
with Pager(command=self.name):
print 'Account: {}'.format(account.username)
print table
| |
animekita/selvbetjening
|
selvbetjening/core/events/migrations/0006_auto__chg_field_attend_user__chg_field_payment_signee__chg_field_payme.py
|
Python
|
mit
| 17,577
| 0.008136
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Attend.user'
db.alter_column(u'events_attend', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['user.SUser']))
# Changing field 'Payment.signee'
db.alter_column(u'events_payment', 'signee_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['user.SUser']))
# Changing field 'Payment.user'
db.alter_column(u'events_payment', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['user.SUser']))
def backwards(self, orm):
# Changing field 'Attend.user'
db.alter_column(u'events_attend', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
# Changing field 'Payment.signee'
db.alter_column(u'events_payment', 'signee_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['auth.User']))
# Changing field 'Payment.user'
db.alter_column(u'events_payment', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField
|
', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {
|
'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'countries.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country', 'db_table': "'country'"},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'numcode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'events.attend': {
'Meta': {'unique_together': "(('event', 'user'),)", 'object_name': 'Attend'},
'change_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'changed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paid': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'registration_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'waiting'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['user.SUser']"})
},
'events.attendeecomment': {
'Meta': {'object_name': 'AttendeeComment'},
'attendee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comment_set'", 'to': "orm['events.Attend']"}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'comment': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'events.attendstatechange': {
'Meta': {'object_name': 'AttendStateChange'},
'attendee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'state_history'", 'to': "orm['events.Attend']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'custom_change_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'custom_signup_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'custom_status_page': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enddate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'location_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'maximum_attendees': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'move_to_accepted_policy': ('django.db.models.fields.CharField', [], {'default': "'always'", 'max_length': '32'}),
'notify_on_payment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notify_event_on_payment_registration'", 'null': 'True', 'to': u"orm['mailcenter.EmailSpecification']"}),
'notify_on_registration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notify_event_on_registration'", 'null': 'True', 'to': u"orm['mailcenter.EmailSpecification']"}),
'notify_on_registration_update': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notify_event_on_registration_update'", 'null': 'True', 'to': u"orm['mailcenter.EmailSpecification']"}),
'registration_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_custom_change_message': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_custom_signup_message': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_custom_status_page': ('django.db
|
caderache2014/django-rest-tutorial
|
tutorial/snippets/models.py
|
Python
|
mit
| 819
| 0.001221
|
from django.db import models
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Snippet(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
languag
|
e = models.CharField(
choices=LANGUAGE_CHOICES,
default='python',
max_length=100)
style = models.CharField(
choices=STYLE_CHOICES,
|
default='friendly',
max_length=100)
class Meta:
ordering = ('created',)
|
lcrees/twoq
|
twoq/tests/auto/ordering.py
|
Python
|
bsd-3-clause
| 2,838
| 0.0074
|
# -*- coding: utf-8 -*-
'''auto ordering call chain test mixins'''
from inspect import ismodule
from twoq.support import port
class ARandomQMixin(object):
def test_choice(self):
self.assertEqual(len(list(self.qclass(1, 2, 3, 4, 5, 6).choice())), 1)
def test_sample(self):
self.assertEqual(len(self.qclass(1, 2, 3, 4, 5, 6).sample(3).end()), 3)
def test_shuffle(self):
self.assertEqual(
len(self.qclass(1, 2, 3, 4, 5, 6).shuffle()),
len([5, 4, 6, 3, 1, 2]),
)
class ACombineQMixin(object):
# def test_combinations(self):
# foo = self.qclass('ABCD').combinations(2).value(),
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'C'), ('B', 'D'),
# ('C', 'D')],
# foo,
# )
#
# def test_permutations(self):
# foo = self.qclass('ABCD').permutations(2).value()
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'C'),
# ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'D'), ('D', 'A'),
# ('D', 'B'), ('D', 'C')],
# foo,
# )
def test_product(self):
foo = self.qclass('ABCD', 'xy').product().value()
self.assertEqual(
foo,
[('A', 'x'), ('A', 'y'), ('B', 'x'), ('B', 'y'), ('C', 'x'),
('C', 'y'), ('D', 'x'), ('D', 'y')],
foo,
)
class AOrderQMixin(ARandomQMixin, ACombineQMixin):
'''combination mixin'''
def test_group(self,):
from math import floor
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).tap(lambda x: floor(x)).group().end(),
[[1.0, [1.3]], [2.0, [2.1, 2.4]]]
)
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).group().end(),
[[1.3, [1.3]], [2.1, [2.1]], [2.4, [2.4]]],
)
def test_grouper(self):
self.assertEqual(
self.qclass(
'moe', 'larry', 'curly', 30, 40, 50, True
).grouper(2, 'x').end(),
[('moe', 'larry'), ('curly',
|
30), (40, 50), (True, 'x')]
)
def test_reversed(self):
self.assertEqual(
self.qclass(5, 4, 3, 2, 1).reverse().end(), [1, 2, 3, 4, 5],
)
def test_sort(self):
from math import sin
self.assertEqual(
self.
|
qclass(1, 2, 3, 4, 5, 6).tap(
lambda x: sin(x)
).sort().end(),
[5, 4, 6, 3, 1, 2],
)
self.assertEqual(
self.qclass(4, 6, 65, 3, 63, 2, 4).sort().end(),
[2, 3, 4, 4, 6, 63, 65],
)
__all__ = sorted(name for name, obj in port.items(locals()) if not any([
name.startswith('_'), ismodule(obj), name in ['ismodule', 'port']
]))
del ismodule
|
epfl-lts2/pygsp
|
examples/fourier_transform.py
|
Python
|
bsd-3-clause
| 1,371
| 0
|
r"""
Fourier transform
=================
The graph Fourier transform :meth:`pygsp.graphs.Graph.gft` transforms a
signal from the vertex domain to the spectral domain. The smoother the signal
(see :meth:`pygsp.graphs.Graph.dirichlet_energy`), the lower in the frequencies
its energy is concentrated.
"""
import numpy as np
from matplotlib import pyplot as plt
import pygsp as pg
G = pg.graphs.Sensor(seed=42)
G.compute_fourier_basis()
scales = [10, 3, 0]
limit = 0.44
fig, axes = plt.subplots(2, len(scales), figsize=(12, 4))
fig.subplots_adjust(hspace=0.5)
x0 = np.random.default_rng(1).normal(size=G.N)
for i, scale in enumerate(scales):
g = pg.filters.Heat(G, scale)
x = g.filter(x0).squeeze()
x /= np.linalg.norm(x)
x_hat = G.gft(x).squeeze()
assert np.all((-limit < x) & (x < limit))
G.plot(x, limits=[-limit, limit], ax=axes[0, i])
axes[0, i].set_axis_off()
axes[0, i].set_title('$x^T L x = {:.2f}$'.format(G.dirichlet_energy(x)))
|
axes[1, i].plot(G.e, np.abs(x_hat), '.-')
axes[1, i].set_xticks(range(0, 16, 4))
axes[1, i].set_xlabel(r'graph frequency $\lambda$')
axes[1, i].set_ylim(-0.05, 0.95)
axes[1, 0].set_ylabel(r'frequency content $\hat{x}(\lambda)$')
# axes[0, 0].set_title(r'$x$: signal in the vertex domain')
# axes[1, 0].set_title(r'$\hat{x}$: signal in the spectral domain')
fig.tight_la
|
yout()
|
appleseedhq/cortex
|
test/IECoreScene/MeshAlgoNormalsTest.py
|
Python
|
bsd-3-clause
| 4,040
| 0.036139
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import IECore
import IECoreScene
import imath
class MeshAlgoNormalsTest( unittest.TestCase ) :
def testPlane( self ) :
p = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
del p["N"]
normals = IECoreScene.MeshAlgo.calculateNormals( p )
self.assertEqual( normals.interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertTrue( normals.data.isInstanceOf( IECore.V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.data.size(), p.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
for n in normals.data :
self.assertEqual( n, imath.V3f( 0, 0, 1 ) )
def testSphere( self ) :
s = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob" ).read()
del s[
|
"N"]
normals = IECoreScene.MeshAlgo.calculateNormals( s )
self.assertEqual( normals.interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assert_( normals.data.isInstanceOf( IECore.V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.data.size(), s.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.data.getInterpretation(), IECore.GeometricData.Interpreta
|
tion.Normal )
points = s["P"].data
for i in range( 0, normals.data.size() ) :
self.assertTrue( math.fabs( normals.data[i].length() - 1 ) < 0.001 )
p = points[i].normalize()
self.assertTrue( normals.data[i].dot( p ) > 0.99 )
self.assertTrue( normals.data[i].dot( p ) < 1.01 )
def testUniformInterpolation( self ) :
m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 10 ) )
del m["N"]
normals = IECoreScene.MeshAlgo.calculateNormals( m, interpolation = IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( normals.interpolation, IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( len( normals.data ), m.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
for n in normals.data :
self.assertEqual( n, imath.V3f( 0, 0, 1 ) )
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.